Compare commits
939 Commits
Author | SHA1 | Date | |
---|---|---|---|
5a2e6ccf77 | |||
f31e32a006 | |||
2ad96e1635 | |||
7bc2e240b1 | |||
20a04cf07c | |||
a40ffb92ac | |||
e2aeff40eb | |||
d20137e5a9 | |||
6a35698796 | |||
2981cdd4c0 | |||
8c9c6c0755 | |||
2c69b69108 | |||
0bed1f2956 | |||
4ef6b7d1f0 | |||
87d8aa4278 | |||
51d900d187 | |||
519ca9d010 | |||
615a50c108 | |||
f418f4e48b | |||
c66fa32c08 | |||
2515ff35c2 | |||
33a1ef7aae | |||
9c12e82006 | |||
9f19057036 | |||
c7f7236b88 | |||
fdefe192ac | |||
1ed8698b7e | |||
0bd9c87010 | |||
fbfb64a6b2 | |||
c39852abdc | |||
1ec167ee8c | |||
11ca834317 | |||
68a6e970d4 | |||
4e851c26a2 | |||
ceb815d295 | |||
14433718fb | |||
3dc8783af7 | |||
6d89534929 | |||
aa19d5b917 | |||
a8d3f1943b | |||
3cf12ffac9 | |||
2017a47eec | |||
21185350fb | |||
17b079918e | |||
fbfc439372 | |||
27d3a232d0 | |||
1fa6083bc8 | |||
aa32a46171 | |||
6283d7d13a | |||
d4dd7ac842 | |||
451da4923b | |||
f15e094408 | |||
134779664e | |||
9ce2f903fb | |||
6802a68356 | |||
c69884a459 | |||
93205cbe92 | |||
434dd3cc84 | |||
dba37e212b | |||
db4b8683cf | |||
5557af0efb | |||
8721b42e2f | |||
5408e30ab1 | |||
70493f1823 | |||
069720f510 | |||
a93c96823c | |||
2393943fbb | |||
49d604aec1 | |||
246275e203 | |||
c9fb0f3887 | |||
84de101272 | |||
de77a20d3d | |||
997c96d6a3 | |||
513da8ed10 | |||
e87e4499fd | |||
a19b8c2e24 | |||
b8858d5186 | |||
bc001e12e2 | |||
abd8248520 | |||
974a3e521a | |||
ea2e91e52f | |||
77bd14f68a | |||
d1fba4de1d | |||
3e4994a54f | |||
75b377219d | |||
c8dc51e41f | |||
7d0dbaa013 | |||
efa62d44d4 | |||
210ded9803 | |||
99e1399729 | |||
0aa5815fb6 | |||
bb5c77fffa | |||
ebfcf75e14 | |||
83e3000349 | |||
4a4dd66c26 | |||
b9b2d635fe | |||
9f8aa8c5e2 | |||
b11693b2f7 | |||
53435bc4d5 | |||
8bec3ff691 | |||
789e22d905 | |||
a1c30e0194 | |||
d4c6e68bf0 | |||
e642344f98 | |||
d4574bb138 | |||
26b40687b3 | |||
e3c26aea31 | |||
65aba79a9b | |||
4b6a653a0f | |||
3c41d86010 | |||
93821e87e6 | |||
f12f408e91 | |||
71cad8cac0 | |||
49bea6b5d9 | |||
f7247e2b84 | |||
5664b41c30 | |||
33612525e1 | |||
a502bc5617 | |||
8772ca727c | |||
7f3b4a94e6 | |||
327d14b3d1 | |||
0f8fd71093 | |||
8d3b84e719 | |||
d1d328d582 | |||
72e344a1b4 | |||
f71a4ce6d6 | |||
a3b1026753 | |||
5e1b17018b | |||
9615d9a6b6 | |||
3ae4dab4b9 | |||
d74172bfc1 | |||
2e9a9f94a4 | |||
ed9797d67e | |||
56f0ce27ac | |||
67d4131158 | |||
acbb19498a | |||
187ec50488 | |||
b3116e5680 | |||
77baca66eb | |||
e7ddae292a | |||
c15b058db7 | |||
8af1fa5477 | |||
f61d822efa | |||
d0e3f5dd5c | |||
16e605583f | |||
62e5cf1e8c | |||
76bc66b9bd | |||
bd21a63bd2 | |||
d14512c82d | |||
e5cf0e3eda | |||
14f140d1c5 | |||
1d592668ac | |||
2c0fae66b3 | |||
cbd7db1d7f | |||
6189b956b6 | |||
c2add820a4 | |||
e1dc2d2210 | |||
bffc923420 | |||
c760a67278 | |||
0181b0f1f7 | |||
d22363ad08 | |||
7784698948 | |||
3697161800 | |||
e13303fca6 | |||
eefa297aa0 | |||
5ae393af15 | |||
f2fe00f1e2 | |||
1afce610c7 | |||
f15601f1c9 | |||
90915ab629 | |||
ebab1e5ed9 | |||
6da6bafeac | |||
067c77329b | |||
8c4131708a | |||
a7646fe42a | |||
dadaa9e2f0 | |||
6a7b673872 | |||
0606432e9b | |||
7ebd97e8ea | |||
44df558d66 | |||
9c75e2f3e1 | |||
4adb574d74 | |||
fb840eda4d | |||
007388f053 | |||
1d9ba1cc8b | |||
63e98028cc | |||
e3ea577011 | |||
6bfc94ea19 | |||
1f2126fd7c | |||
456456483e | |||
3eb15257b9 | |||
597398cb48 | |||
3d2baf4170 | |||
3aafa61362 | |||
60b9676fa2 | |||
e5824cd61f | |||
508d644e87 | |||
b0166d4e8d | |||
ca3f8757ba | |||
118e984996 | |||
45ba884d0d | |||
d1f9cceada | |||
4ac8ec11fb | |||
66ad63bac2 | |||
9ec82aefb4 | |||
e3cda36ba5 | |||
5d05f334f1 | |||
a3d61f3fba | |||
ed289736cf | |||
dc193e8197 | |||
1f71e44172 | |||
7da520ae46 | |||
cbde538c0c | |||
cf1b029b3f | |||
4f897c8cf9 | |||
2e63a46414 | |||
9f4d9abbf6 | |||
8122eaadaa | |||
22cfad134f | |||
de27ebc6b6 | |||
74391d1c32 | |||
fca84a4b94 | |||
602319f9fc | |||
8d2a9b2904 | |||
c8e93b31ff | |||
cf99333b83 | |||
b70a12e723 | |||
f6b09e83cb | |||
6f836d3ffa | |||
80df7caded | |||
12d334615b | |||
1e37156a6b | |||
e49bd1e98f | |||
707c48ad46 | |||
07ffb86451 | |||
fc99c2791b | |||
6b61d319c5 | |||
be97e0a55b | |||
999293bbca | |||
9c65e6ab4a | |||
1e4e1514d3 | |||
05b7175a56 | |||
bc21ade293 | |||
f07e660153 | |||
addcb7803e | |||
1ddfae5499 | |||
54d315c951 | |||
9dde8cd625 | |||
87be232d1c | |||
e40c7fb906 | |||
66abc4cb7d | |||
11567dfbad | |||
7a3e777ded | |||
b9310489cf | |||
d9aad37f2f | |||
2a088b9975 | |||
78e1ee5230 | |||
c7d42dac97 | |||
8ca7cccf5f | |||
e30a2e9058 | |||
08982a3746 | |||
42fb291c7c | |||
e9b9f33aee | |||
f4d246072d | |||
15808a9023 | |||
e22ad28302 | |||
0408f60b58 | |||
d4d730e589 | |||
5b460ef525 | |||
03d4f43d5a | |||
5225817de6 | |||
0ae5f76277 | |||
09d903034f | |||
9eb804006c | |||
6e3391c85b | |||
71139be203 | |||
fbca018229 | |||
7e8b24bd8c | |||
fe79687c59 | |||
9ccf933be5 | |||
87cdc327b9 | |||
5566099849 | |||
92b9cc1554 | |||
0e3de42aa7 | |||
8c29bca57c | |||
d895b26bb9 | |||
c06c1b4bd7 | |||
31aa38b684 | |||
9d8090626c | |||
6b4d057370 | |||
53d073ec1a | |||
30ccc3003e | |||
bc4af01559 | |||
d83ce0d0c7 | |||
ad7741a294 | |||
a327f918af | |||
0b1edf297b | |||
59229bd7f1 | |||
8e82cc807c | |||
d4037525a8 | |||
40d495de6d | |||
d3a570eb79 | |||
9f8fb928f1 | |||
226a4e68da | |||
473063e9ec | |||
93b0659ff2 | |||
e8112eb37b | |||
6f5753cfa3 | |||
3c09413a0a | |||
028346e42c | |||
bc06c7b4e9 | |||
7a404dc53d | |||
c939698414 | |||
1909ece229 | |||
2bc2435a96 | |||
a724f5fd47 | |||
133d718fe4 | |||
1baf9030ad | |||
2f5417f845 | |||
a7f5e64154 | |||
55ffd4a946 | |||
94135ccca2 | |||
968270ae3d | |||
d45506d4a4 | |||
cabda57f0a | |||
7d6fc15b20 | |||
18934ae56b | |||
15a9272495 | |||
08aa5fe7aa | |||
e687d1b8ee | |||
c12a075b83 | |||
c5648f1920 | |||
dc3d716bdb | |||
6dd8a2ced0 | |||
be5b3ebfdd | |||
c18d481fd7 | |||
68857aecb3 | |||
352e13db9d | |||
220b66077c | |||
2772159692 | |||
89ae3c3255 | |||
13d6de3787 | |||
33f2c2a1bf | |||
4c7cc5b39e | |||
90e3869690 | |||
02ec2ae9b8 | |||
c2425132c4 | |||
11ffd737e3 | |||
8c74349b08 | |||
42103c467d | |||
b68bd900c1 | |||
77337b3b4c | |||
b6c8717cc2 | |||
dfea916ca7 | |||
d49025064c | |||
dd612daab0 | |||
8915c1e74a | |||
c94d2867c1 | |||
0b232f2edc | |||
2c64201e64 | |||
41c1a17999 | |||
aefbaa4dc6 | |||
60ed7aeae6 | |||
29c56859b0 | |||
aa07391764 | |||
df768ebea9 | |||
20814a3986 | |||
8550de7403 | |||
0f198b82f5 | |||
a0781d7b9e | |||
f732942089 | |||
1b7479c968 | |||
fbd6f54f39 | |||
adf5dcba8d | |||
e022d13cf3 | |||
dd09432a90 | |||
6ddd69c5ce | |||
25be1fa0d7 | |||
8eaa46ffea | |||
4d76ab91e4 | |||
436a48d611 | |||
274ac755a1 | |||
579362f743 | |||
f3b02a9b86 | |||
684a402931 | |||
1eef52c206 | |||
f03649b8f3 | |||
5c9c23b6b2 | |||
b298e9f16e | |||
cc295e2c7a | |||
4b77d300a2 | |||
df5c6a11cd | |||
07a683d266 | |||
7098f5d885 | |||
f37d8540e1 | |||
eb1cd24e21 | |||
6da20161f0 | |||
bb628c295a | |||
2c88dc97fd | |||
6b0c6492f7 | |||
10a0059602 | |||
5203cfcff9 | |||
cf320b6ba1 | |||
5116453b6d | |||
db87d93efc | |||
38aa71fcc8 | |||
1f6a45c938 | |||
b444eb68af | |||
2d5c20c8f5 | |||
c4b2d26cdb | |||
fe94c9962e | |||
24cb5c7a81 | |||
988d575dbb | |||
33eb23d57e | |||
249dde8b63 | |||
7b125de3e1 | |||
de015ce7e1 | |||
72f8154571 | |||
693f3285eb | |||
7d9cb8c458 | |||
c90dbb5c7b | |||
bdfa637058 | |||
f9a5beaa15 | |||
00ae34dfda | |||
9531d2c570 | |||
ee0ea73500 | |||
dc7a5b3491 | |||
35f151e010 | |||
42c2b5bec9 | |||
fb3c007f8a | |||
ff7568f1d9 | |||
1fd46218ea | |||
ede9dc0d1a | |||
ae60eed310 | |||
e3746a329e | |||
7546e9c997 | |||
0bb4036f25 | |||
84d3af3a0e | |||
055eab54ff | |||
984ddb2ff2 | |||
23af572d3f | |||
99f09fd3c1 | |||
da7a71115c | |||
ebb85c1ca3 | |||
fb6e48f402 | |||
b7c3eaa981 | |||
32e2b5abe6 | |||
2ef2c0fe0c | |||
9c3b29bd8f | |||
3c8f240712 | |||
6353e22c00 | |||
38774184a9 | |||
845baef61b | |||
73ce2ae1c7 | |||
556eda0537 | |||
5fd823c3f2 | |||
758c6ed588 | |||
4bc84a6549 | |||
e9d2fc9362 | |||
2a05c75ff1 | |||
66b88dadba | |||
9ee2ef2e55 | |||
12558e0dde | |||
b22d785c18 | |||
4ad118c613 | |||
6082d75966 | |||
4de1c42c20 | |||
429bc9d0a2 | |||
a22d338831 | |||
1e724828b4 | |||
40853461d1 | |||
416194d799 | |||
eb419c5267 | |||
baefc29544 | |||
b23adfd4ee | |||
a527b54f84 | |||
b2df21bb02 | |||
2b323a359d | |||
48fcee6a50 | |||
c650378a39 | |||
40ea990c05 | |||
aaaa10894d | |||
41583796b1 | |||
b300e6fbc2 | |||
085ae87380 | |||
938a1f137c | |||
5525ec246f | |||
b676dbce78 | |||
7c22932c64 | |||
2b422b82fb | |||
9e2b423e27 | |||
39ffb75d91 | |||
762f7d15dc | |||
80ab05e40c | |||
e099bd0717 | |||
171a00ca97 | |||
c8322f8a33 | |||
eb080f361a | |||
bd0300917e | |||
787c6550d4 | |||
c6140c62ab | |||
9735f5de84 | |||
a07ace0d1e | |||
904ce33d9f | |||
6dd5944772 | |||
dcd1518e10 | |||
8d6425aa24 | |||
4042eedf18 | |||
1c8efc0062 | |||
a9a15a9ab4 | |||
bd4562e4b1 | |||
e1f9553f2d | |||
118deb4db8 | |||
9c96e5368a | |||
83b5076dce | |||
fef61684b4 | |||
118f8589a9 | |||
c2f84841b6 | |||
b0728103b6 | |||
00d41438b9 | |||
50654b22df | |||
d96c7da31f | |||
9c890d72b9 | |||
229c1788c1 | |||
f26d7ca5c5 | |||
b066586a47 | |||
0d7873cf09 | |||
667476f19d | |||
a1b800c232 | |||
4b8395ee0e | |||
dcd9c17fff | |||
f30757df50 | |||
ac20cb1f65 | |||
c19af51ecb | |||
d6644e29fe | |||
260147bd73 | |||
e705b3057f | |||
192ece47fb | |||
7739004815 | |||
11363a6a69 | |||
41adda1c64 | |||
77d6d7a22c | |||
5b93835744 | |||
fd7f760304 | |||
af6fdb9d0d | |||
a1c906cb02 | |||
dcf5a0f62d | |||
bb9e503964 | |||
b2fc573a62 | |||
415da09826 | |||
5ffa68d2c4 | |||
e7668a3eea | |||
21898bb831 | |||
7b944ff11a | |||
fce49eab30 | |||
af35bc8b9c | |||
e5e48b01ad | |||
5d74f79643 | |||
b0427dda76 | |||
70ba718ce9 | |||
68811af9f9 | |||
163629e62e | |||
1993d98695 | |||
127c5ac3a9 | |||
7a1a5d206d | |||
7a524f1048 | |||
1d3b253721 | |||
1f8b29f578 | |||
48ce3d00a4 | |||
d91a0f9fc9 | |||
3af17d8919 | |||
98983a9dab | |||
e92df23806 | |||
5ee8dd784f | |||
f37167aeff | |||
2eba3967b2 | |||
1d552d2dd5 | |||
1ec7f7e6f2 | |||
8ad9eb779e | |||
18ba1b2249 | |||
e2e587e3c7 | |||
c10a6755f0 | |||
d43aca148f | |||
5dfe3b66ab | |||
50c0840146 | |||
2e02a859cf | |||
64c075b6c2 | |||
f27b6086b1 | |||
7c069e82d1 | |||
b44483a853 | |||
ba857cbe68 | |||
c772a4a683 | |||
e466526137 | |||
62222ed068 | |||
f06b5283b0 | |||
645d52308b | |||
7f6c169b25 | |||
9987872382 | |||
6f1c26b083 | |||
3afecb8409 | |||
540fca5c9e | |||
8ff886773f | |||
aa174e8e8a | |||
0a7f902e2a | |||
9a37bd6c84 | |||
a0c69902c8 | |||
f30ada6bbe | |||
c3b8e74fdf | |||
9fa3026a08 | |||
821aa8eae6 | |||
0b50c18ed0 | |||
25e41aa802 | |||
ff6b6cd74d | |||
48910f9b0a | |||
dfe5c4c494 | |||
beb1d6f362 | |||
323ad7ddc0 | |||
1f53f6128f | |||
4912d5f0e3 | |||
d4877712f8 | |||
7549114c9f | |||
c72f8784a5 | |||
6a5a60ebfd | |||
f49cd6c135 | |||
b3c7567e3c | |||
d0d970f70b | |||
f66d814792 | |||
b25e07b3ce | |||
0e994eb938 | |||
1a211f0d96 | |||
f7fde5c81b | |||
af5a55509d | |||
68b6c1202c | |||
ad72fda1d6 | |||
705f4b0d95 | |||
65bd918ac3 | |||
7d4d8f47c9 | |||
73fba2edea | |||
e25982f24e | |||
368daf13fd | |||
e6e2927e72 | |||
0fee4ff2a4 | |||
3dcea3ce33 | |||
726b9d4469 | |||
577095e2f7 | |||
f35e187f16 | |||
e2b12ce988 | |||
92ef0b56d8 | |||
8a8a1850d0 | |||
fddb9bcc3e | |||
0df179c2b4 | |||
689ed51397 | |||
3c56335d7b | |||
9eb58647c1 | |||
0ff214bedd | |||
25877d05ac | |||
bd00ff10e4 | |||
149b969d9a | |||
56d3b59c71 | |||
c1e6efa8e1 | |||
3b5473a682 | |||
4954d3130b | |||
064497756e | |||
ce3c7a1bda | |||
50a39bbc1f | |||
154d01b042 | |||
1f3352018b | |||
b721783c48 | |||
76ee3085a4 | |||
5d5a53059f | |||
77d8c593b3 | |||
c450a3cafd | |||
f8f4d7cab4 | |||
91abfef049 | |||
963b7ec51b | |||
16aab0c137 | |||
bf8b8be976 | |||
e201104d0b | |||
d63db0863d | |||
7a36833103 | |||
ca6e66aa5a | |||
94a6b33680 | |||
2d5287fbbc | |||
6eb756bcab | |||
5647219049 | |||
b810972823 | |||
3a07cdf574 | |||
193ec30c2b | |||
c94723062c | |||
0eadfdf670 | |||
ebf8ce20bc | |||
e7acdde758 | |||
f2c9da2349 | |||
ff344655e2 | |||
3490d9460c | |||
fdf9373f9e | |||
ba80611324 | |||
07a579c632 | |||
188a37fbed | |||
f251367c33 | |||
ac4e399a10 | |||
4fe77c36df | |||
118515dbd0 | |||
42ba4cd399 | |||
ab1c07a622 | |||
930a71460f | |||
a58a5cf795 | |||
92a8f0bc82 | |||
bf298a16ef | |||
9a1b24b6b1 | |||
ea67cd70c9 | |||
281a5dd1fc | |||
df3b3d1798 | |||
f5e2b4726d | |||
daaeea8b4b | |||
6f6df501a0 | |||
d5790a9f27 | |||
860eaec58f | |||
26e949d5fe | |||
ac7dbba458 | |||
7c2431d42c | |||
25c1420a12 | |||
c1a1e1ae8f | |||
a9df9df25d | |||
10beed1199 | |||
df32530750 | |||
062edce27f | |||
efd2713aa8 | |||
8a21566c8a | |||
c8c5c7f571 | |||
91357c2034 | |||
097ccfe1d5 | |||
61ef4ae8cb | |||
01ae7bfaf2 | |||
1b52122a1f | |||
1d9bc184f5 | |||
5f83d3f636 | |||
71e534631f | |||
6e9e6c7a54 | |||
e2e7560d5e | |||
0ceb97538a | |||
3e276f6fb6 | |||
2b00c5abca | |||
15cc41b6cb | |||
729bd1fd16 | |||
9a7431e2e0 | |||
52fbc86fc9 | |||
afe6c79ce3 | |||
9407810fe1 | |||
c42a54795d | |||
96ec3801a9 | |||
c4707d0c1d | |||
24f9af9e0f | |||
a0172d766b | |||
09f999337a | |||
e3eb062c09 | |||
de21d4efdc | |||
d5f58006d3 | |||
cb80ffc1de | |||
1859a0eb8b | |||
9e7132c0b3 | |||
bf013be1c4 | |||
b935209584 | |||
efd4ddc17b | |||
e511e0e553 | |||
610150a4b4 | |||
485b2438ac | |||
bfd12e871f | |||
0c136bfab1 | |||
245e2aea23 | |||
b9d588ffde | |||
e4bc3e0e8d | |||
2419dc0de9 | |||
68fd9ca6d6 | |||
4beb7d2dbe | |||
2bc1250c28 | |||
9b1e2ae83c | |||
9b5ecbe2ff | |||
342ed4aea0 | |||
d4e9d5470e | |||
5c1cabdea1 | |||
38517ca053 | |||
e33758d1b8 | |||
aba6189c4f | |||
adcc21716b | |||
87e17fb4d1 | |||
8292d3d20e | |||
5cc7d89139 | |||
343392613d | |||
de91418b79 | |||
fe9c47ab4f | |||
02db72678f | |||
db4b469285 | |||
92c5cf42d1 | |||
e9558f290a | |||
572e6594d2 | |||
88691284d8 | |||
85c622807e | |||
9d42e0475b | |||
181a335bfa | |||
0a33951e9e | |||
7a356a748a | |||
1c402740a2 | |||
e0a19d3313 | |||
6b8329ee34 | |||
1d4448998a | |||
d6473f5359 | |||
f5f9ec81d2 | |||
fea950155f | |||
ef2944bc24 | |||
934c8724e2 | |||
98eb435d90 | |||
bd10af6eda | |||
7f381a6246 | |||
c17fbbbc07 | |||
ac2ca6c341 | |||
d26865c52c | |||
2b05008a11 | |||
45700e2ecf | |||
f84304235b | |||
0ca41155b2 | |||
a291ab59ba | |||
fce7cd0d36 | |||
658357c5a8 | |||
7484fce24d | |||
f28a713e2b | |||
a9017805b7 | |||
2e3f94e12f | |||
d531c7ae61 | |||
7df1580fa6 | |||
58f70bccbb | |||
fae4f6c509 | |||
ddafb28572 | |||
642c7b9915 | |||
5a8726e6d2 | |||
b3f279e2d9 | |||
82f5ad18f0 | |||
bacc99c7f8 | |||
6728d0977b | |||
bff7c027c9 | |||
79b3113361 | |||
5885767b91 | |||
ec08247e5c | |||
400f081487 | |||
03664514ab | |||
c68fa58a59 | |||
426dda0730 | |||
eb37d4ece2 | |||
1198f8d4e6 | |||
4b709ade68 | |||
fa49d0fde9 | |||
1d44f175c6 | |||
890b88cbef | |||
27709b49d5 | |||
7ccbce03d3 | |||
5fb852afed | |||
60589e6066 | |||
717ce40612 | |||
75442e813e | |||
853c55a049 | |||
6ef1b649d9 | |||
e3f3359c86 | |||
0e1edf19b1 | |||
de55fff226 | |||
b3a67f1f14 | |||
3cc23ca6cc | |||
3def6bfc64 | |||
18e8bc17e4 | |||
f66d66aafe | |||
7380c48dff | |||
0191759316 | |||
dbc42e6f75 | |||
d1c3bc5350 | |||
a97301350f | |||
09340f28f5 | |||
20497c6346 | |||
d0f7d0d9c1 | |||
608806e884 | |||
48176b0a77 | |||
3483a3b3a1 | |||
347e0d4c57 | |||
ae9b5c077a | |||
747446eb50 | |||
e1c8c27f47 | |||
63cec1622a | |||
31142ef291 | |||
058b4b9708 | |||
9a1330c72e | |||
0a6df20986 | |||
6680878b5c | |||
593043ed53 | |||
038f385089 | |||
b914b94773 | |||
2194bc59c8 | |||
a98a288e2d | |||
49e25688f1 | |||
d7eedbd24b | |||
5b17a02da4 | |||
8735247f29 | |||
0d5d15c9d1 | |||
2e44983a37 | |||
c76ff4b472 | |||
aaf4f40285 | |||
e64f77b716 | |||
fd1b65cc3c | |||
11148dce43 | |||
38da8ca1bc | |||
a0ffd4a413 | |||
450105b0c3 | |||
b62edce929 | |||
67678ec39c | |||
bf95fba72e | |||
d265420025 | |||
01a080215d | |||
8cf445ecc4 | |||
20def38e96 | |||
be5b43cb87 | |||
6f0565fa60 | |||
99940358e3 | |||
53daae8e89 | |||
8a23ea4656 | |||
c95c1c83b0 | |||
b446fa14c5 | |||
6d5d305d9d | |||
af2eb422d5 | |||
bbd57396d7 | |||
0fd55b08d9 | |||
619cd5cbcb | |||
1ec0d70d09 | |||
c8449217dc | |||
f7348a23cd | |||
ae18c436dd | |||
b0e20a71e2 | |||
b9700a9fe5 | |||
81867f0539 | |||
0a33fba49c | |||
049a22a3a3 | |||
4d4f94dedf | |||
a844fa0ba0 |
71
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "2.0.10"
|
version = "2.2.3"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -25,9 +25,8 @@ members = [
|
|||||||
"pbs-config",
|
"pbs-config",
|
||||||
"pbs-datastore",
|
"pbs-datastore",
|
||||||
"pbs-fuse-loop",
|
"pbs-fuse-loop",
|
||||||
"pbs-runtime",
|
|
||||||
"proxmox-rest-server",
|
"proxmox-rest-server",
|
||||||
"proxmox-systemd",
|
"proxmox-rrd",
|
||||||
"pbs-tape",
|
"pbs-tape",
|
||||||
"pbs-tools",
|
"pbs-tools",
|
||||||
|
|
||||||
@ -44,32 +43,33 @@ path = "src/lib.rs"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
apt-pkg-native = "0.3.2"
|
apt-pkg-native = "0.3.2"
|
||||||
base64 = "0.12"
|
base64 = "0.13"
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
cidr = "0.2.1"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
env_logger = "0.7"
|
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
|
hex = "0.4.3"
|
||||||
http = "0.2"
|
http = "0.2"
|
||||||
hyper = { version = "0.14", features = [ "full" ] }
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4.17"
|
||||||
nix = "0.19.1"
|
nix = "0.24"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
openssl = "0.10"
|
openssl = "0.10.38" # currently patched!
|
||||||
pam = "0.7"
|
pam = "0.7"
|
||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
regex = "1.2"
|
regex = "1.5.5"
|
||||||
rustyline = "7"
|
rustyline = "9"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
@ -77,13 +77,12 @@ syslog = "4.0"
|
|||||||
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
tokio-util = { version = "0.7", features = [ "codec", "io" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = "0.4"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
webauthn-rs = "0.2.5"
|
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
@ -94,28 +93,58 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
|
|||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
proxmox-http = { version = "0.6.1", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
proxmox-acme-rs = "0.2.1"
|
proxmox-io = "1"
|
||||||
proxmox-apt = "0.7.0"
|
proxmox-lang = "1.1"
|
||||||
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
proxmox-router = { version = "1.2.2", features = [ "cli" ] }
|
||||||
proxmox-openid = "0.7.0"
|
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
|
||||||
|
proxmox-section-config = "1"
|
||||||
|
proxmox-tfa = { version = "2", features = [ "api", "api-types" ] }
|
||||||
|
proxmox-time = "1.1.2"
|
||||||
|
proxmox-uuid = "1"
|
||||||
|
proxmox-serde = "0.1"
|
||||||
|
proxmox-shared-memory = "0.2"
|
||||||
|
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
|
||||||
|
proxmox-compression = "0.1"
|
||||||
|
|
||||||
|
|
||||||
|
proxmox-acme-rs = "0.4"
|
||||||
|
proxmox-apt = "0.8.0"
|
||||||
|
proxmox-async = "0.4"
|
||||||
|
proxmox-openid = "0.9.0"
|
||||||
|
|
||||||
pbs-api-types = { path = "pbs-api-types" }
|
pbs-api-types = { path = "pbs-api-types" }
|
||||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||||
pbs-client = { path = "pbs-client" }
|
pbs-client = { path = "pbs-client" }
|
||||||
pbs-config = { path = "pbs-config" }
|
pbs-config = { path = "pbs-config" }
|
||||||
pbs-datastore = { path = "pbs-datastore" }
|
pbs-datastore = { path = "pbs-datastore" }
|
||||||
pbs-runtime = { path = "pbs-runtime" }
|
|
||||||
proxmox-rest-server = { path = "proxmox-rest-server" }
|
proxmox-rest-server = { path = "proxmox-rest-server" }
|
||||||
proxmox-systemd = { path = "proxmox-systemd" }
|
proxmox-rrd = { path = "proxmox-rrd" }
|
||||||
pbs-tools = { path = "pbs-tools" }
|
pbs-tools = { path = "pbs-tools" }
|
||||||
pbs-tape = { path = "pbs-tape" }
|
pbs-tape = { path = "pbs-tape" }
|
||||||
|
|
||||||
# Local path overrides
|
# Local path overrides
|
||||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
#proxmox = { path = "../proxmox/proxmox" }
|
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
|
||||||
|
#proxmox-apt = { path = "../proxmox-apt" }
|
||||||
|
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
||||||
|
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
||||||
|
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
||||||
|
#proxmox-fuse = { path = "../proxmox-fuse" }
|
||||||
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||||
|
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||||
|
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||||
|
#proxmox-openid = { path = "../proxmox-openid-rs" }
|
||||||
|
#proxmox-router = { path = "../proxmox/proxmox-router" }
|
||||||
|
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
||||||
|
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
|
||||||
|
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
|
||||||
|
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
|
||||||
|
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
|
||||||
|
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
||||||
|
#proxmox-time = { path = "../proxmox/proxmox-time" }
|
||||||
|
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
||||||
#pxar = { path = "../pxar" }
|
#pxar = { path = "../pxar" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
16
Makefile
@ -38,9 +38,8 @@ SUBCRATES := \
|
|||||||
pbs-config \
|
pbs-config \
|
||||||
pbs-datastore \
|
pbs-datastore \
|
||||||
pbs-fuse-loop \
|
pbs-fuse-loop \
|
||||||
pbs-runtime \
|
|
||||||
proxmox-rest-server \
|
proxmox-rest-server \
|
||||||
proxmox-systemd \
|
proxmox-rrd \
|
||||||
pbs-tape \
|
pbs-tape \
|
||||||
pbs-tools \
|
pbs-tools \
|
||||||
proxmox-backup-banner \
|
proxmox-backup-banner \
|
||||||
@ -171,14 +170,11 @@ cargo-build:
|
|||||||
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||||
.do-cargo-build:
|
.do-cargo-build:
|
||||||
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||||
--bin proxmox-backup-api \
|
|
||||||
--bin proxmox-backup-proxy \
|
|
||||||
--bin proxmox-backup-manager \
|
|
||||||
--bin docgen \
|
|
||||||
--package proxmox-backup-banner \
|
--package proxmox-backup-banner \
|
||||||
--bin proxmox-backup-banner \
|
--bin proxmox-backup-banner \
|
||||||
--package proxmox-backup-client \
|
--package proxmox-backup-client \
|
||||||
--bin proxmox-backup-client \
|
--bin proxmox-backup-client \
|
||||||
|
--bin dump-catalog-shell-cli \
|
||||||
--bin proxmox-backup-debug \
|
--bin proxmox-backup-debug \
|
||||||
--package proxmox-file-restore \
|
--package proxmox-file-restore \
|
||||||
--bin proxmox-file-restore \
|
--bin proxmox-file-restore \
|
||||||
@ -190,7 +186,10 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
|||||||
--package proxmox-restore-daemon \
|
--package proxmox-restore-daemon \
|
||||||
--bin proxmox-restore-daemon \
|
--bin proxmox-restore-daemon \
|
||||||
--package proxmox-backup \
|
--package proxmox-backup \
|
||||||
--bin dump-catalog-shell-cli \
|
--bin docgen \
|
||||||
|
--bin proxmox-backup-api \
|
||||||
|
--bin proxmox-backup-manager \
|
||||||
|
--bin proxmox-backup-proxy \
|
||||||
--bin proxmox-daily-update \
|
--bin proxmox-daily-update \
|
||||||
--bin proxmox-file-restore \
|
--bin proxmox-file-restore \
|
||||||
--bin proxmox-tape \
|
--bin proxmox-tape \
|
||||||
@ -222,9 +221,6 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
|
||||||
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}
|
||||||
|
37
README.rst
@ -1,3 +1,7 @@
|
|||||||
|
|
||||||
|
Build & Release Notes
|
||||||
|
*********************
|
||||||
|
|
||||||
``rustup`` Toolchain
|
``rustup`` Toolchain
|
||||||
====================
|
====================
|
||||||
|
|
||||||
@ -40,41 +44,44 @@ example for proxmox crate above).
|
|||||||
|
|
||||||
Build
|
Build
|
||||||
=====
|
=====
|
||||||
on Debian Buster
|
on Debian 11 Bullseye
|
||||||
|
|
||||||
Setup:
|
Setup:
|
||||||
1. # echo 'deb http://download.proxmox.com/debian/devel/ buster main' >> /etc/apt/sources.list.d/proxmox-devel.list
|
1. # echo 'deb http://download.proxmox.com/debian/devel/ bullseye main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list
|
||||||
2. # sudo wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
2. # sudo wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
3. # sudo apt update
|
3. # sudo apt update
|
||||||
4. # sudo apt install devscripts debcargo clang
|
4. # sudo apt install devscripts debcargo clang
|
||||||
5. # git clone git://git.proxmox.com/git/proxmox-backup.git
|
5. # git clone git://git.proxmox.com/git/proxmox-backup.git
|
||||||
6. # sudo mk-build-deps -ir
|
6. # cd proxmox-backup; sudo mk-build-deps -ir
|
||||||
|
|
||||||
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
||||||
|
|
||||||
You are now able to build using the Makefile or cargo itself.
|
You are now able to build using the Makefile or cargo itself, e.g.::
|
||||||
|
|
||||||
|
# make deb-all
|
||||||
|
# # or for a non-package build
|
||||||
|
# cargo build --all --release
|
||||||
|
|
||||||
Design Notes
|
Design Notes
|
||||||
============
|
************
|
||||||
|
|
||||||
Here are some random thought about the software design (unless I find a better place).
|
Here are some random thought about the software design (unless I find a better place).
|
||||||
|
|
||||||
|
|
||||||
Large chunk sizes
|
Large chunk sizes
|
||||||
-----------------
|
=================
|
||||||
|
|
||||||
It is important to notice that large chunk sizes are crucial for
|
It is important to notice that large chunk sizes are crucial for performance.
|
||||||
performance. We have a multi-user system, where different people can do
|
We have a multi-user system, where different people can do different operations
|
||||||
different operations on a datastore at the same time, and most operation
|
on a datastore at the same time, and most operation involves reading a series
|
||||||
involves reading a series of chunks.
|
of chunks.
|
||||||
|
|
||||||
So what is the maximal theoretical speed we can get when reading a
|
So what is the maximal theoretical speed we can get when reading a series of
|
||||||
series of chunks? Reading a chunk sequence need the following steps:
|
chunks? Reading a chunk sequence need the following steps:
|
||||||
|
|
||||||
- seek to the first chunk start location
|
- seek to the first chunk's start location
|
||||||
- read the chunk data
|
- read the chunk data
|
||||||
- seek to the first chunk start location
|
- seek to the next chunk's start location
|
||||||
- read the chunk data
|
- read the chunk data
|
||||||
- ...
|
- ...
|
||||||
|
|
||||||
|
552
debian/changelog
vendored
@ -1,4 +1,554 @@
|
|||||||
rust-proxmox-backup (2.0.10-1) UNRELEASED; urgency=medium
|
rust-proxmox-backup (2.2.3-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* datastore: swap dirtying the datastore cache every 60s by just using the
|
||||||
|
available config digest to detect any changes accuratly when the actually
|
||||||
|
happen
|
||||||
|
|
||||||
|
* api: datastore list and datastore status: avoid opening datastore and
|
||||||
|
possibly iterating over namespace (for lesser privileged users), but
|
||||||
|
rather use the in-memory ACL tree directly to check if there's access to
|
||||||
|
any namespace below.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 04 Jun 2022 16:30:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.2-3) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* datastore: lookup: reuse ChunkStore on stale datastore re-open
|
||||||
|
|
||||||
|
* bump tokio (async framework) dependency
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 02 Jun 2022 17:25:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.2-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* improvement of error handling when removing status files and locks from
|
||||||
|
jobs that were never executed.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 16:22:22 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.2-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* Revert "verify: allow '0' days for reverification", was already possible
|
||||||
|
by setting "ignore-verified" to false
|
||||||
|
|
||||||
|
* ui: datastore permissions: allow ACL path edit & query namespaces
|
||||||
|
|
||||||
|
* accessible group iter: allow NS descending with DATASTORE_READ privilege
|
||||||
|
|
||||||
|
* prune datastore: rework worker tak log
|
||||||
|
|
||||||
|
* prune datastore: support max-depth and improve priv checks
|
||||||
|
|
||||||
|
* ui: prune input: support opt-in recursive/max-depth field
|
||||||
|
|
||||||
|
* add prune job config and api, allowing one to setup a scheduled pruning
|
||||||
|
for a specific namespace only
|
||||||
|
|
||||||
|
* ui: add ui for prune jobs
|
||||||
|
|
||||||
|
* api: disable setting prune options in datastore.cfg and transform any
|
||||||
|
existing prune tasks from datastore config to new prune job config in a
|
||||||
|
post installation hook
|
||||||
|
|
||||||
|
* proxmox-tape: use correct api call for 'load-media-from-slot'
|
||||||
|
|
||||||
|
* avoid overly strict privilege restrictions for some API endpoints and
|
||||||
|
actions when using namespaces. Better support navigating the user
|
||||||
|
interface when only having Datastore.Admin on a (sub) namespace.
|
||||||
|
|
||||||
|
* include required privilege names in some permission errors
|
||||||
|
|
||||||
|
* docs: fix some typos
|
||||||
|
|
||||||
|
* api: status: include empty entry for stores with ns-only privs
|
||||||
|
|
||||||
|
* ui: datastore options: avoid breakage if rrd store ore active-ops cannot
|
||||||
|
be queried
|
||||||
|
|
||||||
|
* ui: datastore content: only mask the inner treeview, not the top bar on
|
||||||
|
error to allow a user to trigger a manual reload
|
||||||
|
|
||||||
|
* ui: system config: improve bottom margins and scroll behavior
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 15:09:36 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.1-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* docs: update some screenshots and add new ones
|
||||||
|
|
||||||
|
* docs: port overcertificate management chapters from Proxmox VE
|
||||||
|
|
||||||
|
* ui: datastore/Summary: correctly show the io-delay chart
|
||||||
|
|
||||||
|
* ui: sync/verify jobs: use pmxDisplayEditField to fix editing
|
||||||
|
|
||||||
|
* ui: server status: use power of two base for memory and swap
|
||||||
|
|
||||||
|
* ui: use base 10 (SI) for all storage related displays
|
||||||
|
|
||||||
|
* ui: datastore selector: show maintenance mode in selector
|
||||||
|
|
||||||
|
* docs: basic maintenance mode section
|
||||||
|
|
||||||
|
* docs: storage: refer to options
|
||||||
|
|
||||||
|
* storage: add some initial namespace docs
|
||||||
|
|
||||||
|
* ui: tape restore: fix form validation for datastore mapping
|
||||||
|
|
||||||
|
* ui: namespace selector: show picker empty text if no namespace
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 17 May 2022 13:56:50 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.0-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* client: add CLI auto-completion callbacks for ns parameters
|
||||||
|
|
||||||
|
* ui: fix setting protection in namespace
|
||||||
|
|
||||||
|
* ui: switch summary repo status to widget toolkit one
|
||||||
|
|
||||||
|
* ui: verify outdated: disallow blank and drop wrong empty text
|
||||||
|
|
||||||
|
* docs: add namespace section to sync documentation
|
||||||
|
|
||||||
|
* ui: datastore summary: add maintenance mask for offline entries
|
||||||
|
|
||||||
|
* ui: verify/sync: allow to optionally override ID again
|
||||||
|
|
||||||
|
* prune: fix workerid issues
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 19:01:13 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.2.0-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* cli: improve namespace integration in proxmox-backup-client and
|
||||||
|
proxmox-tape
|
||||||
|
|
||||||
|
* docs: tape: add information about namespaces
|
||||||
|
|
||||||
|
* api: datastore status: make counts for groups and snapshots iterate over
|
||||||
|
all accessible namespaces recursively
|
||||||
|
|
||||||
|
* ui: fix storeId casing to register store correctly, so that we can query
|
||||||
|
it again for the ACL permission path selector
|
||||||
|
|
||||||
|
* ui: trigger datastore update after maintenance mode edit
|
||||||
|
|
||||||
|
* ui: namespace selector: set queryMode to local to avoid bogus background
|
||||||
|
requests on typing
|
||||||
|
|
||||||
|
* ui: sync job: fix clearing value of remote target-namespace by mistake on
|
||||||
|
edit
|
||||||
|
|
||||||
|
* ui: remote target ns selector: add clear trigger
|
||||||
|
|
||||||
|
* ui: prune group: add namespace info to title
|
||||||
|
|
||||||
|
* fix #4001: ui: add prefix to files downloaded through the pxar browser
|
||||||
|
|
||||||
|
* ui: datastore: reload content tree on successful datastore add
|
||||||
|
|
||||||
|
* ui: datastore: allow deleting currently shown namespace
|
||||||
|
|
||||||
|
* docs: rework access control, list available privileges
|
||||||
|
|
||||||
|
* docs: access control: add "Objects and Paths" section and fix
|
||||||
|
add-permission screenshot
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 11:06:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.10-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* datastore: drop bogus chunk size check, can cause trouble
|
||||||
|
|
||||||
|
* pull/sync: detect remote lack of namespace support
|
||||||
|
|
||||||
|
* pull/sync: correctly query with remote-ns as parent
|
||||||
|
|
||||||
|
* ui: sync: add reduced max-depth selector
|
||||||
|
|
||||||
|
* ui: group filter: make also local filter NS aware
|
||||||
|
|
||||||
|
* api types: set NS_MAX_DEPTH schema default to MAX_NAMESPACE_DEPTH instead
|
||||||
|
of 0
|
||||||
|
|
||||||
|
* tape: notify when arriving at end of media
|
||||||
|
|
||||||
|
* tree-wide: rename 'backup-ns' API parameters to 'ns'
|
||||||
|
|
||||||
|
* tape: add namespaces/recursion depth to tape backup jobs
|
||||||
|
|
||||||
|
* api: tape/restore: add namespace mapping
|
||||||
|
|
||||||
|
* tape: bump catalog/snapshot archive magic
|
||||||
|
|
||||||
|
* ui: tape: backup overview: show namespaces as their own level above groups
|
||||||
|
|
||||||
|
* ui: tape restore: allow simple namespace mapping
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 13 May 2022 14:26:32 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.9-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* api: tape restore: lock the target datastore, not the source one
|
||||||
|
|
||||||
|
* chunk store: force write chunk again if it exist but its metadata length
|
||||||
|
is zero
|
||||||
|
|
||||||
|
* completion: fix 'group-filter' parameter name
|
||||||
|
|
||||||
|
* implement backup namespaces for datastores, allowing one to reuse a single
|
||||||
|
chunkstore deduplication domain for multiple sources without naming
|
||||||
|
conflicts and with fine-grained access control.
|
||||||
|
|
||||||
|
* make various datastore related API calls backup namespace aware
|
||||||
|
|
||||||
|
* make sync and pull backup namespace aware
|
||||||
|
|
||||||
|
* ui: datastore content: show namespaces but only one level at a time
|
||||||
|
|
||||||
|
* ui: make various datastore related UI components namespace aware
|
||||||
|
|
||||||
|
* fix various bugs, add namespace support to file-restore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 12 May 2022 14:25:53 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.8-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* api: status: return gc-status again
|
||||||
|
|
||||||
|
* proxmox-backup-proxy: stop accept() loop on daemon shutdown to avoid that
|
||||||
|
new request get accepted while the REST stack is already stopped, for
|
||||||
|
example on the reload triggered by a package upgrade.
|
||||||
|
|
||||||
|
* pull: improve filtering local removal candidates
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 02 May 2022 17:36:11 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.7-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* pbs-tape: sgutils2: check sense data when status is 'CHECK_CONDITION'
|
||||||
|
|
||||||
|
* rework & refactor datastore implementation for a more hierarchical access
|
||||||
|
structure
|
||||||
|
|
||||||
|
* datastore: implement Iterator for backup group and snapshot listing to
|
||||||
|
allow more efficient access for cases where we do not need the whole list
|
||||||
|
in memory
|
||||||
|
|
||||||
|
* pbs-client: extract: rewrite create_zip with sequential decoder
|
||||||
|
|
||||||
|
* pbs-client: extract: add top-level dir in tar.zst
|
||||||
|
|
||||||
|
* fix #3067: ui: add a separate notes view for longer markdown notes and
|
||||||
|
copy the markdown primer from Proxmox VE to Proxmox Backup Server docs
|
||||||
|
|
||||||
|
* restore-daemon: start disk initialization in parallel to the api
|
||||||
|
|
||||||
|
* restore-daemon: put blocking code into 'block_in_place'
|
||||||
|
|
||||||
|
* restore-daemon: avoid auto-pre-mounting zpools completely, the upfront
|
||||||
|
(time) cost can be to big to pay up initially, e.g., if there are many
|
||||||
|
subvolumes present, so only mount on demand.
|
||||||
|
|
||||||
|
* file-restore: add 'timeout' and 'json-error' parameter
|
||||||
|
|
||||||
|
* ui: add summary mask when in maintenance mode
|
||||||
|
|
||||||
|
* ui: update datastore's navigation icon and tooltip if it is in maintenance
|
||||||
|
mode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Apr 2022 19:53:53 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.6-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* api: verify: allow passing '0 days' for immediate re-verification
|
||||||
|
|
||||||
|
* fix #3103. node configuration: allow to configure default UI language
|
||||||
|
|
||||||
|
* fix #3856: tape: encryption key's password hint parameter is not optional
|
||||||
|
|
||||||
|
* re-use PROXMOX_DEBUG environment variable to control log level filter
|
||||||
|
|
||||||
|
* ui: WebAuthn: fix stopping store upgrades on destroy and decrease interval
|
||||||
|
|
||||||
|
* report: add tape, traffic control and disk infos and tune output order
|
||||||
|
|
||||||
|
* fix #3853: cli/api: add force option to tape key change-passphrase
|
||||||
|
|
||||||
|
* fix #3323: cli client: add dry-run option for backup command
|
||||||
|
|
||||||
|
* tape: make iterating over chunks to backup smarter to avoid some work
|
||||||
|
|
||||||
|
* bin: daily-update: make single checks/updates fail gracefully and log
|
||||||
|
to syslog directly instead of going through stdout indirectly.
|
||||||
|
|
||||||
|
* datastore: allow to turn of inode-sorting for chunk iteration. While inode
|
||||||
|
sorting benefits read-performance on block devices with higher latency
|
||||||
|
(e.g., spinning disks), it's also some extra work to get the metadata
|
||||||
|
required for sorting, so its a trade-off. For setups that have either very
|
||||||
|
slow or very fast metadata IO the benefits may turn into a net cost.
|
||||||
|
|
||||||
|
* docs: explain retention time for event allocation policy in more detail
|
||||||
|
|
||||||
|
* docs: add tape schedule examples
|
||||||
|
|
||||||
|
* proxmox-backup-debug api: parse parameters before sending to api
|
||||||
|
|
||||||
|
* ui: fix panel height in the dashboard for three-column view mode
|
||||||
|
|
||||||
|
* fix #3934 tape owner-selector to auth-id (user OR token)
|
||||||
|
|
||||||
|
* fix #3067: api: add support for multi-line comments in the node
|
||||||
|
configuration
|
||||||
|
|
||||||
|
* pbs-client: print error when we couldn't download previous FIDX/DIDX for
|
||||||
|
incremental change tracking
|
||||||
|
|
||||||
|
* fix #3854 add command to import a key from a file (json or paper-key
|
||||||
|
format) to proxmox-tape
|
||||||
|
|
||||||
|
* improve IO access pattern for some scenarios like TFA with high user and
|
||||||
|
login count or the file-restore-for-block-backup VM's internal driver.
|
||||||
|
|
||||||
|
* pxar create: fix anchored path pattern matching when adding entries
|
||||||
|
|
||||||
|
* docs: client: file exclusion: add note about leading slash
|
||||||
|
|
||||||
|
* rest-server: add option to rotate task logs by 'max_days' instead of
|
||||||
|
'max_files'
|
||||||
|
|
||||||
|
* pbs-datastore: add active operations tracking and use it to implement a
|
||||||
|
graceful transition into the also newly added maintenance mode (read-only
|
||||||
|
or offline) for datastores. Note that the UI implementation may still show
|
||||||
|
some rough edges if a datastore is in offline mode for maintenance.
|
||||||
|
|
||||||
|
* add new streaming-response type for API call responses and enable it for
|
||||||
|
the snapshot and task-log list, which can both get rather big. This avoids
|
||||||
|
allocation of a potentially big intermediate memory buffer and thus
|
||||||
|
overall memory usage.
|
||||||
|
|
||||||
|
* pxar: accompany existing .zip download support with a tar.zst(d) one. The
|
||||||
|
tar archive supports more file types (e.g., hard links or device nodes)
|
||||||
|
and zstd allows for a efficient but still effective compression.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 13 Apr 2022 17:00:53 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.5-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* tell system allocator to always use mmap for allocations >= 128 KiB to
|
||||||
|
improve reclaimability of free'd memory to the OS and reduce peak and avg.
|
||||||
|
RSS consumption
|
||||||
|
|
||||||
|
* file restore: always wait up to 25s for the file-restore-VM to have
|
||||||
|
scanned all possible filesystems in a backup. While theoretically there
|
||||||
|
are some edge cases where the tool waits less now, most common ones should
|
||||||
|
be waiting more compared to the 12s "worst" case previously.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 26 Jan 2022 16:23:09 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.4-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* config: add tls ciphers to NodeConfig
|
||||||
|
|
||||||
|
* pbs-tools: improve memory foot print of LRU Cache
|
||||||
|
|
||||||
|
* update dependencies to avoid a ref-count leak in async helpers
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 21 Jan 2022 10:48:14 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.3-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* fix #3618: proxmox-async: zip: add conditional EFS flag to zip files to
|
||||||
|
improve non-ascii code point extraction under windows.
|
||||||
|
|
||||||
|
* OpenID Connect login: improve error message for disabled users
|
||||||
|
|
||||||
|
* ui: tape: backup job: add second tab for group-filters to add/edit window
|
||||||
|
|
||||||
|
* ui: sync job: add second tab for group-filters to add/edit window
|
||||||
|
|
||||||
|
* ui: calendar event: add once daily example and clarify the workday one
|
||||||
|
|
||||||
|
* fix #3794: api types: set backup time (since the UNIX epoch) lower limit
|
||||||
|
to 1
|
||||||
|
|
||||||
|
* ui: fix opening settings window in datastore panel
|
||||||
|
|
||||||
|
* api: zfs: create zpool with `relatime=on` flag set
|
||||||
|
|
||||||
|
* fix #3763: disable SSL/TLS renegotiation
|
||||||
|
|
||||||
|
* node config: add email-from parameter to control notification sender
|
||||||
|
address
|
||||||
|
|
||||||
|
* ui: configuration: rename the "Authentication" tab to "Other" and add a
|
||||||
|
"General" section with HTTP-proxy and email-from settings
|
||||||
|
|
||||||
|
* datastore stats: not include the unavailable `io_ticks` for ZFS
|
||||||
|
datastores
|
||||||
|
|
||||||
|
* ui: hide RRD chart for IO delay if no `io_ticks` are returned
|
||||||
|
|
||||||
|
* fix #3058: ui: improve remote edit UX by clarifying ID vs host fields
|
||||||
|
|
||||||
|
* docs: fix some minor typos
|
||||||
|
|
||||||
|
* api-types: relax nodename API schema, make it a simple regex check like in
|
||||||
|
Proxmox VE
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 12 Jan 2022 16:49:13 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* docs: backup-client: fix wrong reference
|
||||||
|
|
||||||
|
* docs: remotes: note that protected flags will not be synced
|
||||||
|
|
||||||
|
* sync job: correctly apply rate limit
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Nov 2021 13:56:15 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.1-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* docs: update and add traffic control related screenshots
|
||||||
|
|
||||||
|
* docs: mention traffic control (bandwidth limits) for sync jobs
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 16:07:39 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.1-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* fix proxmox-backup-manager sync-job list
|
||||||
|
|
||||||
|
* ui, api: sync-job: allow one to configure a rate limit
|
||||||
|
|
||||||
|
* api: snapshot list: set default for 'protected' flag
|
||||||
|
|
||||||
|
* ui: datastore content: rework rendering protection state
|
||||||
|
|
||||||
|
* docs: update traffic control docs (use HumanBytes)
|
||||||
|
|
||||||
|
* ui: traffic-control: include ipv6 in 'all' networks
|
||||||
|
|
||||||
|
* ui: traffic-control edit: add spaces between networks for more
|
||||||
|
readabillity
|
||||||
|
|
||||||
|
* tape: fix passing-through key-fingerprint
|
||||||
|
|
||||||
|
* avoid a bogus error regarding logrotate-path due to a reversed check
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 12:24:31 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.1.0-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* rest server: make successful-ticket auth log a debug one to avoid
|
||||||
|
syslog spam
|
||||||
|
|
||||||
|
* traffic-controls: add API/CLI to show current traffic
|
||||||
|
|
||||||
|
* docs: add traffic control section
|
||||||
|
|
||||||
|
* ui: use TFA widgets from widget toolkit
|
||||||
|
|
||||||
|
* sync: allow pulling groups selectively
|
||||||
|
|
||||||
|
* fix #3533: tape backup: filter groups according to config
|
||||||
|
|
||||||
|
* proxmox-tape: add missing notify-user option to backup command
|
||||||
|
|
||||||
|
* openid: allow arbitrary username-claims
|
||||||
|
|
||||||
|
* openid: support configuring the prompt, scopes and ACR values
|
||||||
|
|
||||||
|
* use human-byte for traffic-control rate-in/out and burst-in/out config
|
||||||
|
|
||||||
|
* ui: add traffic control view and editor
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 20 Nov 2021 22:44:07 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.14-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* fix directory permission problems
|
||||||
|
|
||||||
|
* add traffic control configuration config with API
|
||||||
|
|
||||||
|
* proxmox-backup-proxy: implement traffic control
|
||||||
|
|
||||||
|
* proxmox-backup-client: add rate/burst parameter to backup/restore CLI
|
||||||
|
|
||||||
|
* openid_login: vertify that firstname, lastname and email fits our
|
||||||
|
schema definitions
|
||||||
|
|
||||||
|
* docs: add info about protection flag to client docs
|
||||||
|
|
||||||
|
* fix #3602: ui: datastore/Content: add action to set protection status
|
||||||
|
|
||||||
|
* ui: add protected icon to snapshot (if they are protected)
|
||||||
|
|
||||||
|
* ui: PruneInputPanel: add keepReason 'protected' for protected backups
|
||||||
|
|
||||||
|
* proxmox-backup-client: add 'protected' commands
|
||||||
|
|
||||||
|
* acme: interpret no TOS as accepted
|
||||||
|
|
||||||
|
* acme: new_account: prevent replacing existing accounts
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Nov 2021 08:04:55 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.13-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* tape: simplify export_media_set for pool writer
|
||||||
|
|
||||||
|
* tape: improve export_media error message for not found tape
|
||||||
|
|
||||||
|
* rest-server: use hashmap for parameter errors
|
||||||
|
|
||||||
|
* proxmox-rrd: use new file firmat with higher resolution
|
||||||
|
|
||||||
|
* proxmox-rrd: use a journal to reduce amount of bytes written
|
||||||
|
|
||||||
|
* use new fsync parameter to replace_file and atomic_open_or_create
|
||||||
|
|
||||||
|
* docs: langauge and formatting fixup
|
||||||
|
|
||||||
|
* docs: Update for new features/functionality
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 08:17:00 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* proxmox-backup-proxy: clean up old tasks when their reference was rotated
|
||||||
|
out of the task-log index
|
||||||
|
|
||||||
|
* api daemons: fix sending log-reopen command
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Oct 2021 10:48:28 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.11-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* drop aritifical limits for task-UPID length
|
||||||
|
|
||||||
|
* tools: smart: only throw error for the fatal usage errors of smartctl
|
||||||
|
|
||||||
|
* api: improve returning errors for extjs formatter
|
||||||
|
|
||||||
|
* proxmox-rest-server: improve logging
|
||||||
|
|
||||||
|
* subscription: switch verification domain over to shop.proxmox.com
|
||||||
|
|
||||||
|
* rest-server/daemon: use new sd_notify_barrier helper for handling
|
||||||
|
synchronization with systemd on service reloading
|
||||||
|
|
||||||
|
* ui: datastore/Content: add empty text for no snapshots
|
||||||
|
|
||||||
|
* ui: datastore/Content: move first store-load into activate listener to
|
||||||
|
ensure we've a proper loading mask for better UX
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 05 Oct 2021 16:34:14 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.10-1) bullseye; urgency=medium
|
||||||
|
|
||||||
* ui: fix order of prune keep reasons
|
* ui: fix order of prune keep reasons
|
||||||
|
|
||||||
|
79
debian/control
vendored
@ -8,56 +8,77 @@ Build-Depends: debhelper (>= 12),
|
|||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
librust-anyhow-1+default-dev,
|
librust-anyhow-1+default-dev,
|
||||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||||
librust-base64-0.12+default-dev,
|
librust-base64-0.13+default-dev,
|
||||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||||
librust-bytes-1+default-dev,
|
librust-bytes-1+default-dev,
|
||||||
|
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
|
||||||
librust-crc32fast-1+default-dev,
|
librust-crc32fast-1+default-dev,
|
||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
librust-env-logger-0.7+default-dev,
|
librust-env-logger-0.9+default-dev,
|
||||||
librust-flate2-1+default-dev,
|
librust-flate2-1+default-dev,
|
||||||
librust-foreign-types-0.3+default-dev,
|
librust-foreign-types-0.3+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
librust-handlebars-3+default-dev,
|
librust-handlebars-3+default-dev,
|
||||||
|
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||||
|
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||||
librust-http-0.2+default-dev,
|
librust-http-0.2+default-dev,
|
||||||
librust-hyper-0.14+default-dev,
|
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
|
||||||
librust-hyper-0.14+full-dev,
|
librust-hyper-0.14+full-dev (>= 0.14.5-~~),
|
||||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev,
|
librust-log-0.4+default-dev (>= 0.4.17-~~) <!nocheck>,
|
||||||
librust-nix-0.19+default-dev (>= 0.19.1-~~),
|
librust-nix-0.24+default-dev,
|
||||||
librust-nom-5+default-dev (>= 5.1-~~),
|
librust-nom-5+default-dev (>= 5.1-~~),
|
||||||
librust-num-traits-0.2+default-dev,
|
librust-num-traits-0.2+default-dev,
|
||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
librust-openssl-0.10+default-dev,
|
librust-openssl-0.10+default-dev (>= 0.10.38-~~),
|
||||||
librust-pam-0.7+default-dev,
|
librust-pam-0.7+default-dev,
|
||||||
librust-pam-sys-0.5+default-dev,
|
librust-pam-sys-0.5+default-dev,
|
||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-lite-0.2+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-proxmox-0.13+api-macro-dev,
|
librust-proxmox-acme-rs-0.4+default-dev,
|
||||||
librust-proxmox-0.13+cli-dev,
|
librust-proxmox-apt-0.8+default-dev,
|
||||||
librust-proxmox-0.13+default-dev,
|
librust-proxmox-async-0.4+default-dev,
|
||||||
librust-proxmox-0.13+router-dev,
|
librust-proxmox-borrow-1+default-dev,
|
||||||
librust-proxmox-0.13+sortable-macro-dev,
|
librust-proxmox-compression-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-proxmox-0.13+tfa-dev,
|
|
||||||
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
|
||||||
librust-proxmox-apt-0.7+default-dev,
|
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-proxmox-http-0.4+client-dev,
|
librust-proxmox-http-0.6+client-dev (>= 0.6.1-~~),
|
||||||
librust-proxmox-http-0.4+default-dev ,
|
librust-proxmox-http-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-proxmox-http-0.4+http-helpers-dev,
|
librust-proxmox-http-0.6+http-helpers-dev (>= 0.6.1-~~),
|
||||||
librust-proxmox-http-0.4+websocket-dev,
|
librust-proxmox-http-0.6+websocket-dev (>= 0.6.1-~~),
|
||||||
librust-proxmox-openid-0.7+default-dev,
|
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
||||||
|
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||||
|
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
||||||
|
librust-proxmox-openid-0.9+default-dev,
|
||||||
|
librust-proxmox-router-1+cli-dev (>= 1.2-~~),
|
||||||
|
librust-proxmox-router-1+default-dev (>= 1.2.2-~~),
|
||||||
|
librust-proxmox-schema-1+api-macro-dev (>= 1.3.1-~~),
|
||||||
|
librust-proxmox-schema-1+default-dev (>= 1.3.1-~~),
|
||||||
|
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.3.1-~~),
|
||||||
|
librust-proxmox-section-config-1+default-dev,
|
||||||
|
librust-proxmox-serde-0.1+default-dev,
|
||||||
|
librust-proxmox-shared-memory-0.2+default-dev,
|
||||||
|
librust-proxmox-sys-0.3+default-dev,
|
||||||
|
librust-proxmox-sys-0.3+logrotate-dev,
|
||||||
|
librust-proxmox-sys-0.3+sortable-macro-dev,
|
||||||
|
librust-proxmox-tfa-2+api-dev,
|
||||||
|
librust-proxmox-tfa-2+api-types-dev,
|
||||||
|
librust-proxmox-tfa-2+default-dev,
|
||||||
|
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
|
||||||
|
librust-proxmox-uuid-1+default-dev,
|
||||||
|
librust-proxmox-uuid-1+serde-dev,
|
||||||
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.5.5-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-9+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
librust-serde-1+derive-dev,
|
librust-serde-1+derive-dev,
|
||||||
|
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
|
||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
@ -73,23 +94,23 @@ Build-Depends: debhelper (>= 12),
|
|||||||
librust-tokio-1+rt-dev (>= 1.6-~~),
|
librust-tokio-1+rt-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+signal-dev (>= 1.6-~~),
|
librust-tokio-1+signal-dev (>= 1.6-~~),
|
||||||
|
librust-tokio-1+sync-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+time-dev (>= 1.6-~~),
|
librust-tokio-1+time-dev (>= 1.6-~~),
|
||||||
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-tokio-stream-0.1+default-dev,
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-util-0.6+codec-dev,
|
librust-tokio-util-0.7+codec-dev,
|
||||||
librust-tokio-util-0.6+default-dev,
|
librust-tokio-util-0.7+default-dev,
|
||||||
librust-tokio-util-0.6+io-dev,
|
librust-tokio-util-0.7+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.6+bindgen-dev,
|
librust-zstd-0.6+bindgen-dev,
|
||||||
librust-zstd-0.6+default-dev,
|
librust-zstd-0.6+default-dev,
|
||||||
libacl1-dev,
|
libacl1-dev,
|
||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev (>= 246-~~),
|
||||||
uuid-dev,
|
uuid-dev,
|
||||||
libsgutils2-dev,
|
libsgutils2-dev,
|
||||||
bash-completion,
|
bash-completion,
|
||||||
@ -131,7 +152,7 @@ Depends: fonts-font-awesome,
|
|||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 3.3-2),
|
proxmox-widget-toolkit (>= 3.4-3),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
|
38
debian/postinst
vendored
@ -4,6 +4,14 @@ set -e
|
|||||||
|
|
||||||
#DEBHELPER#
|
#DEBHELPER#
|
||||||
|
|
||||||
|
update_sync_job() {
|
||||||
|
job="$1"
|
||||||
|
|
||||||
|
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
|
||||||
|
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|
||||||
|
|| echo "Failed, please check sync.cfg manually!"
|
||||||
|
}
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
# need to have user backup in the tape group
|
# need to have user backup in the tape group
|
||||||
@ -32,6 +40,36 @@ case "$1" in
|
|||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '2.2.2~'; then
|
||||||
|
echo "moving prune schedule from datacenter config to new prune job config"
|
||||||
|
proxmox-backup-manager update-to-prune-jobs-config \
|
||||||
|
|| echo "Failed to move prune jobs, please check manually"
|
||||||
|
true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '2.1.3~' && test -e /etc/proxmox-backup/sync.cfg; then
|
||||||
|
prev_job=""
|
||||||
|
|
||||||
|
# read from HERE doc because POSIX sh limitations
|
||||||
|
while read -r key value; do
|
||||||
|
if test "$key" = "sync:"; then
|
||||||
|
if test -n "$prev_job"; then
|
||||||
|
# previous job doesn't have an explicit value
|
||||||
|
update_sync_job "$prev_job"
|
||||||
|
fi
|
||||||
|
prev_job=$value
|
||||||
|
else
|
||||||
|
prev_job=""
|
||||||
|
fi
|
||||||
|
done <<EOF
|
||||||
|
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
|
||||||
|
EOF
|
||||||
|
if test -n "$prev_job"; then
|
||||||
|
# last job doesn't have an explicit value
|
||||||
|
update_sync_job "$prev_job"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
3
debian/rules
vendored
@ -32,9 +32,6 @@ override_dh_auto_build:
|
|||||||
override_dh_missing:
|
override_dh_missing:
|
||||||
dh_missing --fail-missing
|
dh_missing --fail-missing
|
||||||
|
|
||||||
override_dh_auto_test:
|
|
||||||
# ignore here to avoid rebuilding the binaries with the wrong target
|
|
||||||
|
|
||||||
override_dh_auto_install:
|
override_dh_auto_install:
|
||||||
dh_auto_install -- \
|
dh_auto_install -- \
|
||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
|
@ -1,31 +1,33 @@
|
|||||||
Backup Client Usage
|
Backup Client Usage
|
||||||
===================
|
===================
|
||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client for Proxmox Backup Server is called
|
||||||
|
:command:`proxmox-backup-client`.
|
||||||
|
|
||||||
.. _client_repository:
|
.. _client_repository:
|
||||||
|
|
||||||
Backup Repository Locations
|
Backup Repository Locations
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following format to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server (where username is specified in the form of user@realm):
|
||||||
|
|
||||||
[[username@]server[:port]:]datastore
|
[[username@]server[:port]:]datastore
|
||||||
|
|
||||||
The default value for ``username`` is ``root@pam``. If no server is specified,
|
The default value for ``username`` is ``root@pam``. If no server is specified,
|
||||||
the default is the local host (``localhost``).
|
the default is the local host (``localhost``).
|
||||||
|
|
||||||
You can specify a port if your backup server is only reachable on a different
|
You can specify a port if your backup server is only reachable on a non-default
|
||||||
port (e.g. with NAT and port forwarding).
|
port (for example, with NAT and port forwarding configurations).
|
||||||
|
|
||||||
Note that if the server is an IPv6 address, you have to write it with square
|
Note that if the server uses an IPv6 address, you have to write it with square
|
||||||
brackets (for example, `[fe80::01]`).
|
brackets (for example, `[fe80::01]`).
|
||||||
|
|
||||||
You can pass the repository with the ``--repository`` command line option, or
|
You can pass the repository with the ``--repository`` command line option, or
|
||||||
by setting the ``PBS_REPOSITORY`` environment variable.
|
by setting the ``PBS_REPOSITORY`` environment variable.
|
||||||
|
|
||||||
Here some examples of valid repositories and the real values
|
Below are some examples of valid repositories and their corresponding real
|
||||||
|
values:
|
||||||
|
|
||||||
================================ ================== ================== ===========
|
================================ ================== ================== ===========
|
||||||
Example User Host:Port Datastore
|
Example User Host:Port Datastore
|
||||||
@ -46,8 +48,8 @@ Environment Variables
|
|||||||
The default backup repository.
|
The default backup repository.
|
||||||
|
|
||||||
``PBS_PASSWORD``
|
``PBS_PASSWORD``
|
||||||
When set, this value is used for the password required for the backup server.
|
When set, this value is used as the password for the backup server.
|
||||||
You can also set this to a API token secret.
|
You can also set this to an API token secret.
|
||||||
|
|
||||||
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
||||||
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
||||||
@ -63,15 +65,14 @@ Environment Variables
|
|||||||
a file name or from the `stdout` of a command, respectively. The first
|
a file name or from the `stdout` of a command, respectively. The first
|
||||||
defined environment variable from the order above is preferred.
|
defined environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
``PBS_FINGERPRINT``
|
||||||
certificate (only used if the system CA certificates cannot validate the
|
When set, this value is used to verify the server certificate (only used if
|
||||||
certificate).
|
the system CA certificates cannot validate the certificate).
|
||||||
|
|
||||||
|
|
||||||
.. Note:: Passwords must be valid UTF8 an may not contain
|
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
|
||||||
newlines. For your convienience, we just use the first line as
|
convenience, Proxmox Backup Server only uses the first line as password, so
|
||||||
password, so you can add arbitrary comments after the
|
you can add arbitrary comments after the first newline.
|
||||||
first newline.
|
|
||||||
|
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
@ -86,14 +87,15 @@ Creating Backups
|
|||||||
----------------
|
----------------
|
||||||
|
|
||||||
This section explains how to create a backup from within the machine. This can
|
This section explains how to create a backup from within the machine. This can
|
||||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
be a physical host, a virtual machine, or a container. Such backups may contain
|
||||||
and image archives. There are no restrictions in this case.
|
file and image archives. There are no restrictions in this case.
|
||||||
|
|
||||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
.. Note:: If you want to backup virtual machines or containers on Proxmox VE,
|
||||||
|
see :ref:`pve-integration`.
|
||||||
|
|
||||||
For the following example you need to have a backup server set up, working
|
For the following example, you need to have a backup server set up, have working
|
||||||
credentials and need to know the repository name.
|
credentials, and know the repository name.
|
||||||
In the following examples we use ``backup-server:store1``.
|
In the following examples, we use ``backup-server:store1``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -107,32 +109,32 @@ In the following examples we use ``backup-server:store1``.
|
|||||||
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
||||||
End Time: 2019-12-03T10:36:29+01:00
|
End Time: 2019-12-03T10:36:29+01:00
|
||||||
|
|
||||||
This will prompt you for a password and then uploads a file archive named
|
This will prompt you for a password, then upload a file archive named
|
||||||
``root.pxar`` containing all the files in the ``/`` directory.
|
``root.pxar`` containing all the files in the ``/`` directory.
|
||||||
|
|
||||||
.. Caution:: Please note that the proxmox-backup-client does not
|
.. Caution:: Please note that proxmox-backup-client does not
|
||||||
automatically include mount points. Instead, you will see a short
|
automatically include mount points. Instead, you will see a short
|
||||||
``skip mount point`` notice for each of them. The idea is to
|
``skip mount point`` message for each of them. The idea is to
|
||||||
create a separate file archive for each mounted disk. You can
|
create a separate file archive for each mounted disk. You can
|
||||||
explicitly include them using the ``--include-dev`` option
|
explicitly include them using the ``--include-dev`` option
|
||||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||||
multiple times for each mount point that should be included.
|
multiple times for each mount point that should be included.
|
||||||
|
|
||||||
The ``--repository`` option can get quite long and is used by all
|
The ``--repository`` option can get quite long and is used by all commands. You
|
||||||
commands. You can avoid having to enter this value by setting the
|
can avoid having to enter this value by setting the environment variable
|
||||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
``PBS_REPOSITORY``. Note that if you would like this to remain set over
|
||||||
over multiple sessions, you should instead add the below line to your
|
multiple sessions, you should instead add the below line to your ``.bashrc``
|
||||||
``.bashrc`` file.
|
file.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# export PBS_REPOSITORY=backup-server:store1
|
# export PBS_REPOSITORY=backup-server:store1
|
||||||
|
|
||||||
After this you can execute all commands without specifying the ``--repository``
|
After this, you can execute all commands without having to specify the
|
||||||
option.
|
``--repository`` option.
|
||||||
|
|
||||||
One single backup is allowed to contain more than one archive. For example, if
|
A single backup is allowed to contain more than one archive. For example, if
|
||||||
you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -140,59 +142,71 @@ you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
|||||||
|
|
||||||
This creates a backup of both disks.
|
This creates a backup of both disks.
|
||||||
|
|
||||||
The backup command takes a list of backup specifications, which
|
If you want to use a namespace for the backup target you can add the `--ns`
|
||||||
include the archive name on the server, the type of the archive, and the
|
parameter:
|
||||||
archive source at the client. The format is:
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2 --ns a/b/c
|
||||||
|
|
||||||
|
The backup command takes a list of backup specifications, which include the
|
||||||
|
archive name on the server, the type of the archive, and the archive source at
|
||||||
|
the client. The format is:
|
||||||
|
|
||||||
<archive-name>.<type>:<source-path>
|
<archive-name>.<type>:<source-path>
|
||||||
|
|
||||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||||
device images. To create a backup of a block device run the following command:
|
device images. To create a backup of a block device, run the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
||||||
|
|
||||||
|
|
||||||
Excluding files/folders from a backup
|
Excluding Files/Directories from a Backup
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
Sometimes it is desired to exclude certain files or directories from a backup
|
||||||
To tell the Proxmox Backup client when and how to ignore files and directories,
|
archive. To tell the Proxmox Backup client when and how to ignore files and
|
||||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
directories, place a text file named ``.pxarexclude`` in the filesystem
|
||||||
Whenever the backup client encounters such a file in a directory, it interprets
|
hierarchy. Whenever the backup client encounters such a file in a directory,
|
||||||
each line as glob match patterns for files and directories that are to be excluded
|
it interprets each line as a glob match pattern for files and directories that
|
||||||
from the backup.
|
are to be excluded from the backup.
|
||||||
|
|
||||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
The file must contain a single glob pattern per line. Empty lines and lines
|
||||||
The same is true for lines starting with ``#``, which indicates a comment.
|
starting with ``#`` (indicating a comment) are ignored.
|
||||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
A ``!`` at the beginning of a line reverses the glob match pattern from an
|
||||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
exclusion to an explicit inclusion. This makes it possible to exclude all
|
||||||
directory except for a few single files/subdirectories.
|
entries in a directory except for a few single files/subdirectories.
|
||||||
Lines ending in ``/`` match only on directories.
|
Lines ending in ``/`` match only on directories.
|
||||||
The directory containing the ``.pxarexclude`` file is considered to be the root of
|
The directory containing the ``.pxarexclude`` file is considered to be the root
|
||||||
the given patterns. It is only possible to match files in this directory and its subdirectories.
|
of the given patterns. It is only possible to match files in this directory and
|
||||||
|
its subdirectories.
|
||||||
|
|
||||||
|
.. Note:: Patterns without a leading ``/`` will also match in subdirectories,
|
||||||
|
while patterns with a leading ``/`` will only match in the current directory.
|
||||||
|
|
||||||
``\`` is used to escape special glob characters.
|
``\`` is used to escape special glob characters.
|
||||||
``?`` matches any single character.
|
``?`` matches any single character.
|
||||||
``*`` matches any character, including an empty string.
|
``*`` matches any character, including an empty string.
|
||||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
``**`` is used to match current directory and subdirectories. For example, with
|
||||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
|
||||||
following pattern ``**/*.tmp``.
|
a directory and its subdirectories.
|
||||||
``[...]`` matches a single character from any of the provided characters within
|
``[...]`` matches a single character from any of the provided characters within
|
||||||
the brackets. ``[!...]`` does the complementary and matches any single character
|
the brackets. ``[!...]`` does the complementary and matches any single
|
||||||
not contained within the brackets. It is also possible to specify ranges with two
|
character not contained within the brackets. It is also possible to specify
|
||||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
ranges with two characters separated by ``-``. For example, ``[a-z]`` matches
|
||||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
any lowercase alphabetic character, and ``[0-9]`` matches any single digit.
|
||||||
|
|
||||||
The order of the glob match patterns defines whether a file is included or
|
The order of the glob match patterns defines whether a file is included or
|
||||||
excluded, that is to say later entries override previous ones.
|
excluded, that is to say, later entries override earlier ones.
|
||||||
This is also true for match patterns encountered deeper down the directory tree,
|
This is also true for match patterns encountered deeper down the directory
|
||||||
which can override a previous exclusion.
|
tree, which can override a previous exclusion.
|
||||||
Be aware that excluded directories will **not** be read by the backup client.
|
|
||||||
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
.. Note:: Excluded directories will **not** be read by the backup client. Thus,
|
||||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||||
backup archive.
|
``.pxarexclude`` files are treated as regular files and will be included in
|
||||||
|
the backup archive.
|
||||||
|
|
||||||
For example, consider the following directory structure:
|
For example, consider the following directory structure:
|
||||||
|
|
||||||
@ -280,7 +294,7 @@ You can avoid entering the passwords by setting the environment
|
|||||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||||
|
|
||||||
|
|
||||||
Using a master key to store and recover encryption keys
|
Using a Master Key to Store and Recover Encryption Keys
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||||
@ -360,7 +374,7 @@ To set up a master key:
|
|||||||
keep keys ordered and in a place that is separate from the contents being
|
keep keys ordered and in a place that is separate from the contents being
|
||||||
backed up. It can happen, for example, that you back up an entire system, using
|
backed up. It can happen, for example, that you back up an entire system, using
|
||||||
a key on that system. If the system then becomes inaccessible for any reason
|
a key on that system. If the system then becomes inaccessible for any reason
|
||||||
and needs to be restored, this will not be possible as the encryption key will be
|
and needs to be restored, this will not be possible, as the encryption key will be
|
||||||
lost along with the broken system.
|
lost along with the broken system.
|
||||||
|
|
||||||
It is recommended that you keep your master key safe, but easily accessible, in
|
It is recommended that you keep your master key safe, but easily accessible, in
|
||||||
@ -382,10 +396,10 @@ version of your master key. The following command sends the output of the
|
|||||||
Restoring Data
|
Restoring Data
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
The regular creation of backups is a necessary step to avoiding data
|
The regular creation of backups is a necessary step in avoiding data loss. More
|
||||||
loss. More importantly, however, is the restoration. It is good practice to perform
|
importantly, however, is the restoration. It is good practice to perform
|
||||||
periodic recovery tests to ensure that you can access the data in
|
periodic recovery tests to ensure that you can access the data in case of
|
||||||
case of problems.
|
disaster.
|
||||||
|
|
||||||
First, you need to find the snapshot which you want to restore. The snapshot
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
list command provides a list of all the snapshots on the server:
|
list command provides a list of all the snapshots on the server:
|
||||||
@ -402,6 +416,11 @@ list command provides a list of all the snapshots on the server:
|
|||||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
|
.. tip:: List will by default only output the backup snapshots of the root
|
||||||
|
namespace itself. To list backups from another namespace use the ``--ns
|
||||||
|
<ns>`` option
|
||||||
|
|
||||||
You can inspect the catalog to find specific files.
|
You can inspect the catalog to find specific files.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -444,23 +463,22 @@ to use the interactive recovery shell.
|
|||||||
|
|
||||||
The interactive recovery shell is a minimal command line interface that
|
The interactive recovery shell is a minimal command line interface that
|
||||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||||
search files in a file archive.
|
search for files in a file archive.
|
||||||
To restore files, you can select them individually or match them with a glob
|
To restore files, you can select them individually or match them with a glob
|
||||||
pattern.
|
pattern.
|
||||||
|
|
||||||
Using the catalog for navigation reduces the overhead considerably because only
|
Using the catalog for navigation reduces the overhead considerably because only
|
||||||
the catalog needs to be downloaded and, optionally, decrypted.
|
the catalog needs to be downloaded and, optionally, decrypted.
|
||||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
The actual chunks are only accessed if the metadata in the catalog is
|
||||||
or for the actual restore.
|
insufficient or for the actual restore.
|
||||||
|
|
||||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
Similar to common UNIX shells, ``cd`` and ``ls`` are the commands used to change
|
||||||
working directory and list directory contents in the archive.
|
working directory and list directory contents in the archive.
|
||||||
``pwd`` shows the full path of the current working directory with respect to the
|
``pwd`` shows the full path of the current working directory with respect to the
|
||||||
archive root.
|
archive root.
|
||||||
|
|
||||||
Being able to quickly search the contents of the archive is a commonly needed feature.
|
The ability to quickly search the contents of the archive is a commonly required
|
||||||
That's where the catalog is most valuable.
|
feature. That's where the catalog is most valuable. For example:
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -471,8 +489,8 @@ For example:
|
|||||||
pxar:/ > restore-selected /target/path
|
pxar:/ > restore-selected /target/path
|
||||||
...
|
...
|
||||||
|
|
||||||
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
|
This will find and print all files ending in ``.txt`` located in ``etc/`` or its
|
||||||
subdirectory and add the corresponding pattern to the list for subsequent restores.
|
subdirectories, and add the corresponding pattern to the list for subsequent restores.
|
||||||
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
||||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||||
host. This will scan the whole archive.
|
host. This will scan the whole archive.
|
||||||
@ -497,7 +515,7 @@ Mounting of Archives via FUSE
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
||||||
file archive as a read-only filesystem to a mountpoint on your host.
|
file archive as a read-only filesystem to a mount point on your host.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -513,7 +531,7 @@ This allows you to access the full contents of the archive in a seamless manner.
|
|||||||
load on your host, depending on the operations you perform on the mounted
|
load on your host, depending on the operations you perform on the mounted
|
||||||
filesystem.
|
filesystem.
|
||||||
|
|
||||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
To unmount the filesystem, use the ``umount`` command on the mount point:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -522,7 +540,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
|
|||||||
Login and Logout
|
Login and Logout
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
The client tool prompts you to enter the logon password as soon as you
|
The client tool prompts you to enter the login password as soon as you
|
||||||
want to access the backup server. The server checks your credentials
|
want to access the backup server. The server checks your credentials
|
||||||
and responds with a ticket that is valid for two hours. The client
|
and responds with a ticket that is valid for two hours. The client
|
||||||
tool automatically stores that ticket and uses it for further requests
|
tool automatically stores that ticket and uses it for further requests
|
||||||
@ -551,7 +569,7 @@ Changing the Owner of a Backup Group
|
|||||||
By default, the owner of a backup group is the user which was used to originally
|
By default, the owner of a backup group is the user which was used to originally
|
||||||
create that backup group (or in the case of sync jobs, ``root@pam``). This
|
create that backup group (or in the case of sync jobs, ``root@pam``). This
|
||||||
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
|
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
|
||||||
can not be used to create backups in that same backup group. In case you want
|
can not be used to create backups in that same backup group. In case you want
|
||||||
to change the owner of a backup, you can do so with the below command, using a
|
to change the owner of a backup, you can do so with the below command, using a
|
||||||
user that has ``Datastore.Modify`` privileges on the datastore.
|
user that has ``Datastore.Modify`` privileges on the datastore.
|
||||||
|
|
||||||
@ -560,10 +578,10 @@ user that has ``Datastore.Modify`` privileges on the datastore.
|
|||||||
# proxmox-backup-client change-owner vm/103 john@pbs
|
# proxmox-backup-client change-owner vm/103 john@pbs
|
||||||
|
|
||||||
This can also be done from within the web interface, by navigating to the
|
This can also be done from within the web interface, by navigating to the
|
||||||
`Content` section of the datastore that contains the backup group and
|
`Content` section of the datastore that contains the backup group and selecting
|
||||||
selecting the user icon under the `Actions` column. Common cases for this could
|
the user icon under the `Actions` column. Common cases for this could be to
|
||||||
be to change the owner of a sync job from ``root@pam``, or to repurpose a
|
change the owner of a sync job from ``root@pam``, or to repurpose a backup
|
||||||
backup group.
|
group.
|
||||||
|
|
||||||
|
|
||||||
.. _backup-pruning:
|
.. _backup-pruning:
|
||||||
@ -571,16 +589,24 @@ backup group.
|
|||||||
Pruning and Removing Backups
|
Pruning and Removing Backups
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
You can manually delete a backup snapshot using the ``forget``
|
You can manually delete a backup snapshot using the ``forget`` command:
|
||||||
command:
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client snapshot forget <snapshot>
|
# proxmox-backup-client snapshot forget <snapshot>
|
||||||
|
|
||||||
|
|
||||||
.. caution:: This command removes all archives in this backup
|
.. caution:: This command removes all archives in this backup snapshot. They
|
||||||
snapshot. They will be inaccessible and unrecoverable.
|
will be inaccessible and *unrecoverable*.
|
||||||
|
|
||||||
|
Don't forget to add the namespace ``--ns`` parameter if you want to forget a
|
||||||
|
snapshot that is contained in the root namespace:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client snapshot forget <snapshot> --ns <ns>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Although manual removal is sometimes required, the ``prune``
|
Although manual removal is sometimes required, the ``prune``
|
||||||
@ -652,6 +678,25 @@ shows the list of existing snapshots and what actions prune would take.
|
|||||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||||
space you need to perform :ref:`client_garbage-collection`.
|
space you need to perform :ref:`client_garbage-collection`.
|
||||||
|
|
||||||
|
It is also possible to protect single snapshots from being pruned or deleted:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client snapshot protected update <snapshot> true
|
||||||
|
|
||||||
|
This will set the protected flag on the snapshot and prevent pruning or manual
|
||||||
|
deletion of this snapshot untilt he flag is removed again with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client snapshot protected update <snapshot> false
|
||||||
|
|
||||||
|
When a group is with a protected snapshot is deleted, only the non-protected
|
||||||
|
ones are removed and the group will remain.
|
||||||
|
|
||||||
|
.. note:: This flag will not be synced when using pull or sync jobs. If you
|
||||||
|
want to protect a synced snapshot, you have to manually to this again on
|
||||||
|
the target backup server.
|
||||||
|
|
||||||
.. _client_garbage-collection:
|
.. _client_garbage-collection:
|
||||||
|
|
||||||
@ -677,7 +722,7 @@ unused data blocks are removed.
|
|||||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||||
by default. This results in a better performance by only updating the
|
by default. This results in a better performance by only updating the
|
||||||
``atime`` property if the last access has been at least 24 hours ago. The
|
``atime`` property if the last access has been at least 24 hours ago. The
|
||||||
downside is, that touching a chunk within these 24 hours will not always
|
downside is that touching a chunk within these 24 hours will not always
|
||||||
update its ``atime`` property.
|
update its ``atime`` property.
|
||||||
|
|
||||||
Chunks in the grace period will be logged at the end of the garbage
|
Chunks in the grace period will be logged at the end of the garbage
|
||||||
@ -701,8 +746,8 @@ unused data blocks are removed.
|
|||||||
Average chunk size: 2486565
|
Average chunk size: 2486565
|
||||||
TASK OK
|
TASK OK
|
||||||
|
|
||||||
|
Garbage collection can also be scheduled using ``promxox-backup-manager`` or
|
||||||
.. todo:: howto run garbage-collection at regular intervals (cron)
|
from the Proxmox Backup Server's web interface.
|
||||||
|
|
||||||
Benchmarking
|
Benchmarking
|
||||||
------------
|
------------
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
Backup Protocol
|
Backup Protocol
|
||||||
===============
|
===============
|
||||||
|
|
||||||
Proxmox Backup Server uses a REST based API. While the management
|
Proxmox Backup Server uses a REST-based API. While the management
|
||||||
interface use normal HTTP, the actual backup and restore interface use
|
interface uses normal HTTP, the actual backup and restore interface uses
|
||||||
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
||||||
standards, so the following section assumes that you are familiar on
|
standards, so the following section assumes that you are familiar with
|
||||||
how to use them.
|
how to use them.
|
||||||
|
|
||||||
|
|
||||||
@ -13,35 +13,35 @@ Backup Protocol API
|
|||||||
|
|
||||||
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
||||||
be upgraded to a HTTP/2 connection using
|
be upgraded to a HTTP/2 connection using
|
||||||
``proxmox-backup-protocol-v1`` as protocol name::
|
``proxmox-backup-protocol-v1`` as the protocol name::
|
||||||
|
|
||||||
GET /api2/json/backup HTTP/1.1
|
GET /api2/json/backup HTTP/1.1
|
||||||
UPGRADE: proxmox-backup-protocol-v1
|
UPGRADE: proxmox-backup-protocol-v1
|
||||||
|
|
||||||
The server replies with HTTP 101 Switching Protocol status code,
|
The server replies with the ``HTTP 101 Switching Protocol`` status code,
|
||||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
and you can then issue REST commands on the updated HTTP/2 connection.
|
||||||
|
|
||||||
The backup protocol allows you to upload three different kind of files:
|
The backup protocol allows you to upload three different kind of files:
|
||||||
|
|
||||||
- Chunks and blobs (binary data)
|
- Chunks and blobs (binary data)
|
||||||
|
|
||||||
- Fixed Indexes (List of chunks with fixed size)
|
- Fixed indexes (List of chunks with fixed size)
|
||||||
|
|
||||||
- Dynamic Indexes (List of chunk with variable size)
|
- Dynamic indexes (List of chunks with variable size)
|
||||||
|
|
||||||
The following section gives a short introduction how to upload such
|
The following section provides a short introduction on how to upload such
|
||||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
||||||
details about available REST commands.
|
details about the available REST commands.
|
||||||
|
|
||||||
|
|
||||||
Upload Blobs
|
Upload Blobs
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
|
Blobs are uploaded using ``POST /blob``. The HTTP body contains the
|
||||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||||
|
|
||||||
The file name needs to end with ``.blob``, and is automatically added
|
The file name must end with ``.blob``, and is automatically added
|
||||||
to the backup manifest.
|
to the backup manifest, following the call to ``POST /finish``.
|
||||||
|
|
||||||
|
|
||||||
Upload Chunks
|
Upload Chunks
|
||||||
@ -56,40 +56,41 @@ encoded as :ref:`Data Blob <data-blob-format>`).
|
|||||||
Upload Fixed Indexes
|
Upload Fixed Indexes
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Fixed indexes are use to store VM image data. The VM image is split
|
Fixed indexes are used to store VM image data. The VM image is split
|
||||||
into equally sized chunks, which are uploaded individually. The index
|
into equally sized chunks, which are uploaded individually. The index
|
||||||
file simply contains a list to chunk digests.
|
file simply contains a list of chunk digests.
|
||||||
|
|
||||||
You create a fixed index with ``POST /fixed_index``. Then upload
|
You create a fixed index with ``POST /fixed_index``. Then, upload
|
||||||
chunks with ``POST /fixed_chunk``, and append them to the index with
|
chunks with ``POST /fixed_chunk``, and append them to the index with
|
||||||
``PUT /fixed_index``. When finished, you need to close the index using
|
``PUT /fixed_index``. When finished, you need to close the index using
|
||||||
``POST /fixed_close``.
|
``POST /fixed_close``.
|
||||||
|
|
||||||
The file name needs to end with ``.fidx``, and is automatically added
|
The file name needs to end with ``.fidx``, and is automatically added
|
||||||
to the backup manifest.
|
to the backup manifest, following the call to ``POST /finish``.
|
||||||
|
|
||||||
|
|
||||||
Upload Dynamic Indexes
|
Upload Dynamic Indexes
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Dynamic indexes are use to store file archive data. The archive data
|
Dynamic indexes are used to store file archive data. The archive data
|
||||||
is split into dynamically sized chunks, which are uploaded
|
is split into dynamically sized chunks, which are uploaded
|
||||||
individually. The index file simply contains a list to chunk digests
|
individually. The index file simply contains a list of chunk digests
|
||||||
and offsets.
|
and offsets.
|
||||||
|
|
||||||
You create a dynamic sized index with ``POST /dynamic_index``. Then
|
You can create a dynamically sized index with ``POST /dynamic_index``. Then,
|
||||||
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
||||||
``PUT /dynamic_index``. When finished, you need to close the index using
|
``PUT /dynamic_index``. When finished, you need to close the index using
|
||||||
``POST /dynamic_close``.
|
``POST /dynamic_close``.
|
||||||
|
|
||||||
The file name needs to end with ``.didx``, and is automatically added
|
The filename needs to end with ``.didx``, and is automatically added
|
||||||
to the backup manifest.
|
to the backup manifest, following the call to ``POST /finish``.
|
||||||
|
|
||||||
|
|
||||||
Finish Backup
|
Finish Backup
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
Once you have uploaded all data, you need to call ``POST
|
Once you have uploaded all data, you need to call ``POST /finish``. This
|
||||||
/finish``. This commits all data and ends the backup protocol.
|
commits all data and ends the backup protocol.
|
||||||
|
|
||||||
|
|
||||||
Restore/Reader Protocol API
|
Restore/Reader Protocol API
|
||||||
@ -102,39 +103,39 @@ be upgraded to a HTTP/2 connection using
|
|||||||
GET /api2/json/reader HTTP/1.1
|
GET /api2/json/reader HTTP/1.1
|
||||||
UPGRADE: proxmox-backup-reader-protocol-v1
|
UPGRADE: proxmox-backup-reader-protocol-v1
|
||||||
|
|
||||||
The server replies with HTTP 101 Switching Protocol status code,
|
The server replies with the ``HTTP 101 Switching Protocol`` status code,
|
||||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||||
|
|
||||||
The reader protocol allows you to download three different kind of files:
|
The reader protocol allows you to download three different kinds of files:
|
||||||
|
|
||||||
- Chunks and blobs (binary data)
|
- Chunks and blobs (binary data)
|
||||||
|
|
||||||
- Fixed Indexes (List of chunks with fixed size)
|
- Fixed indexes (list of chunks with fixed size)
|
||||||
|
|
||||||
- Dynamic Indexes (List of chunk with variable size)
|
- Dynamic indexes (list of chunks with variable size)
|
||||||
|
|
||||||
The following section gives a short introduction how to download such
|
The following section provides a short introduction on how to download such
|
||||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
||||||
available REST commands.
|
the available REST commands.
|
||||||
|
|
||||||
|
|
||||||
Download Blobs
|
Download Blobs
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Downloading blobs is done using ``GET /download``. The HTTP body contains the
|
Blobs are downloaded using ``GET /download``. The HTTP body contains the
|
||||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||||
|
|
||||||
|
|
||||||
Download Chunks
|
Download Chunks
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
|
Chunks are downloaded using ``GET /chunk``. The HTTP body contains the
|
||||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||||
|
|
||||||
|
|
||||||
Download Index Files
|
Download Index Files
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Downloading index files is done using ``GET /download``. The HTTP body
|
Index files are downloaded using ``GET /download``. The HTTP body
|
||||||
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
||||||
or :ref:`Dynamic Index <dynamic-index-format>`.
|
or :ref:`Dynamic Index <dynamic-index-format>`.
|
||||||
|
@ -37,7 +37,7 @@ Each field can contain multiple values in the following formats:
|
|||||||
* and a combination of the above: e.g., 01,05..10,12/02
|
* and a combination of the above: e.g., 01,05..10,12/02
|
||||||
* or a `*` for every possible value: e.g., \*:00
|
* or a `*` for every possible value: e.g., \*:00
|
||||||
|
|
||||||
There are some special values that have specific meaning:
|
There are some special values that have a specific meaning:
|
||||||
|
|
||||||
================================= ==============================
|
================================= ==============================
|
||||||
Value Syntax
|
Value Syntax
|
||||||
@ -81,19 +81,19 @@ Not all features of systemd calendar events are implemented:
|
|||||||
|
|
||||||
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
|
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||||
a specific point in time
|
a specific point in time
|
||||||
* no timezone: all schedules use the set timezone on the server
|
* no timezone: all schedules use the timezone of the server
|
||||||
* no sub-second resolution
|
* no sub-second resolution
|
||||||
* no reverse day syntax (e.g. 2020-03~01)
|
* no reverse day syntax (e.g. 2020-03~01)
|
||||||
* no repetition of ranges (e.g. 1..10/2)
|
* no repetition of ranges (e.g. 1..10/2)
|
||||||
|
|
||||||
Notes on scheduling
|
Notes on Scheduling
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
In `Proxmox Backup`_ scheduling for most tasks is done in the
|
In `Proxmox Backup`_, scheduling for most tasks is done in the
|
||||||
`proxmox-backup-proxy`. This daemon checks all job schedules
|
`proxmox-backup-proxy`. This daemon checks all job schedules
|
||||||
if they are due every minute. This means that even if
|
every minute, to see if any are due. This means that even though
|
||||||
`calendar events` can contain seconds, it will only be checked
|
`calendar events` can contain seconds, it will only be checked
|
||||||
once a minute.
|
once per minute.
|
||||||
|
|
||||||
Also, all schedules will be checked against the timezone set
|
Also, all schedules will be checked against the timezone set
|
||||||
in the `Proxmox Backup`_ server.
|
in the `Proxmox Backup`_ server.
|
||||||
|
333
docs/certificate-management.rst
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
.. _sysadmin_certificate_management:
|
||||||
|
|
||||||
|
Certificate Management
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
Access to the API and thus the web-based administration interface is always
|
||||||
|
encrypted through ``https``. Each `Proxmox Backup`_ host creates by default its
|
||||||
|
own (self-signed) certificate. This certificate is used for encrypted
|
||||||
|
communication with the host’s ``proxmox-backup-proxy`` service, for any API
|
||||||
|
call between a user or backup-client and the web-interface.
|
||||||
|
|
||||||
|
Certificate verification when sending backups to a `Proxmox Backup`_ server
|
||||||
|
is either done based on pinning the certificate fingerprints in the storage/remote
|
||||||
|
configuration, or by using certificates, signed by a trusted certificate authority.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_api_gui:
|
||||||
|
|
||||||
|
Certificates for the API and SMTP
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
`Proxmox Backup`_ stores it certificate and key in:
|
||||||
|
|
||||||
|
- ``/etc/proxmox-backup/proxy.pem``
|
||||||
|
|
||||||
|
- ``/etc/proxmox-backup/proxy.key``
|
||||||
|
|
||||||
|
You have the following options for the certificate:
|
||||||
|
|
||||||
|
1. Keep using the default self-signed certificate in
|
||||||
|
``/etc/proxmox-backup/proxy.pem``.
|
||||||
|
|
||||||
|
2. Use an externally provided certificate (for example, signed by a
|
||||||
|
commercial Certificate Authority (CA)).
|
||||||
|
|
||||||
|
3. Use an ACME provider like Let’s Encrypt to get a trusted certificate
|
||||||
|
with automatic renewal; this is also integrated in the `Proxmox Backup`_
|
||||||
|
API and web interface.
|
||||||
|
|
||||||
|
Certificates are managed through the `Proxmox Backup`_
|
||||||
|
web-interface/API or using the the ``proxmox-backup-manager`` CLI tool.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_upload_custom:
|
||||||
|
|
||||||
|
Upload Custom Certificate
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you already have a certificate which you want to use for a Proxmox
|
||||||
|
Mail Gateway host, you can simply upload that certificate over the web
|
||||||
|
interface.
|
||||||
|
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
|
||||||
|
:align: right
|
||||||
|
:alt: Upload a custom certificate
|
||||||
|
|
||||||
|
Note that any certificate key files must not be password protected.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_get_trusted_acme_cert:
|
||||||
|
|
||||||
|
Trusted certificates via Let’s Encrypt (ACME)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
`Proxmox Backup`_ includes an implementation of the **A**\ utomatic
|
||||||
|
**C**\ ertificate **M**\ anagement **E**\ nvironment (**ACME**)
|
||||||
|
protocol, allowing `Proxmox Backup`_ admins to use an ACME provider
|
||||||
|
like Let’s Encrypt for easy setup of TLS certificates, which are
|
||||||
|
accepted and trusted by modern operating systems and web browsers out of
|
||||||
|
the box.
|
||||||
|
|
||||||
|
Currently, the two ACME endpoints implemented are the `Let’s Encrypt
|
||||||
|
(LE) <https://letsencrypt.org>`_ production and staging environments.
|
||||||
|
Our ACME client supports validation of ``http-01`` challenges using a
|
||||||
|
built-in web server and validation of ``dns-01`` challenges using a DNS
|
||||||
|
plugin supporting all the DNS API endpoints
|
||||||
|
`acme.sh <https://acme.sh>`_ does.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_account:
|
||||||
|
|
||||||
|
ACME Account
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-acme-create-account.png
|
||||||
|
:align: right
|
||||||
|
:alt: Create ACME Account
|
||||||
|
|
||||||
|
You need to register an ACME account per cluster, with the endpoint you
|
||||||
|
want to use. The email address used for that account will serve as the
|
||||||
|
contact point for renewal-due or similar notifications from the ACME
|
||||||
|
endpoint.
|
||||||
|
|
||||||
|
You can register or deactivate ACME accounts over the web interface
|
||||||
|
``Certificates -> ACME Accounts`` or using the ``proxmox-backup-manager`` command
|
||||||
|
line tool.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
proxmox-backup-manager acme account register <account-name> <mail@example.com>
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
|
||||||
|
Because of
|
||||||
|
`rate-limits <https://letsencrypt.org/docs/rate-limits/>`_ you
|
||||||
|
should use LE ``staging`` for experiments or if you use ACME for the
|
||||||
|
very first time until all is working there, and only then switch over
|
||||||
|
to the production directory.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_plugins:
|
||||||
|
|
||||||
|
ACME Plugins
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The ACME plugin’s role is to provide automatic verification that you,
|
||||||
|
and thus the `Proxmox Backup`_ server under your operation, are the
|
||||||
|
real owner of a domain. This is the basic building block of automatic
|
||||||
|
certificate management.
|
||||||
|
|
||||||
|
The ACME protocol specifies different types of challenges, for example
|
||||||
|
the ``http-01``, where a web server provides a file with a specific
|
||||||
|
token to prove that it controls a domain. Sometimes this isn’t possible,
|
||||||
|
either because of technical limitations or if the address of a record is
|
||||||
|
not reachable from the public internet. The ``dns-01`` challenge can be
|
||||||
|
used in such cases. This challenge is fulfilled by creating a certain
|
||||||
|
DNS record in the domain’s zone.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-acme-create-challenge-plugin.png
|
||||||
|
:align: right
|
||||||
|
:alt: Create ACME Account
|
||||||
|
|
||||||
|
`Proxmox Backup`_ supports both of those challenge types out of the
|
||||||
|
box, you can configure plugins either over the web interface under
|
||||||
|
``Certificates -> ACME Challenges``, or using the
|
||||||
|
``proxmox-backup-manager acme plugin add`` command.
|
||||||
|
|
||||||
|
ACME Plugin configurations are stored in ``/etc/proxmox-backup/acme/plugins.cfg``.
|
||||||
|
|
||||||
|
.. _domains:
|
||||||
|
|
||||||
|
Domains
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
You can add new or manage existing domain entries under
|
||||||
|
``Certificates``, or using the ``proxmox-backup-manager`` command.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-acme-add-domain.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a Domain for ACME verification
|
||||||
|
|
||||||
|
After configuring the desired domain(s) for a node and ensuring that the
|
||||||
|
desired ACME account is selected, you can order your new certificate
|
||||||
|
over the web-interface. On success, the interface will reload after
|
||||||
|
roughly 10 seconds.
|
||||||
|
|
||||||
|
Renewal will happen `automatically <#sysadmin-certs-acme-automatic-renewal>`_
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_http_challenge:
|
||||||
|
|
||||||
|
ACME HTTP Challenge Plugin
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
There is always an implicitly configured ``standalone`` plugin for
|
||||||
|
validating ``http-01`` challenges via the built-in web server spawned on
|
||||||
|
port 80.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The name ``standalone`` means that it can provide the validation on
|
||||||
|
its own, without any third party service.
|
||||||
|
|
||||||
|
There are a few prerequisites to use this for certificate management
|
||||||
|
with Let’s Encrypts ACME.
|
||||||
|
|
||||||
|
- You have to accept the ToS of Let’s Encrypt to register an account.
|
||||||
|
|
||||||
|
- **Port 80** of the node needs to be reachable from the internet.
|
||||||
|
|
||||||
|
- There **must** be no other listener on port 80.
|
||||||
|
|
||||||
|
- The requested (sub)domain needs to resolve to a public IP of the
|
||||||
|
`Proxmox Backup`_ host.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_dns_challenge:
|
||||||
|
|
||||||
|
ACME DNS API Challenge Plugin
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On systems where external access for validation via the ``http-01``
|
||||||
|
method is not possible or desired, it is possible to use the ``dns-01``
|
||||||
|
validation method. This validation method requires a DNS server that
|
||||||
|
allows provisioning of ``TXT`` records via an API.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_dns_api_config:
|
||||||
|
|
||||||
|
Configuring ACME DNS APIs for validation
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
`Proxmox Backup`_ re-uses the DNS plugins developed for the
|
||||||
|
``acme.sh`` [1]_ project. Please refer to its documentation for details
|
||||||
|
on configuration of specific APIs.
|
||||||
|
|
||||||
|
The easiest way to configure a new plugin with the DNS API is using the
|
||||||
|
web interface (``Certificates -> ACME Accounts/Challenges``).
|
||||||
|
|
||||||
|
Here you can add a new challenge plugin by selecting your API provider
|
||||||
|
and entering the credential data to access your account over their API.
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
|
||||||
|
See the acme.sh `How to use DNS
|
||||||
|
API <https://github.com/acmesh-official/acme.sh/wiki/dnsapi#how-to-use-dns-api>`_
|
||||||
|
wiki for more detailed information about getting API credentials for
|
||||||
|
your provider. Configuration values do not need to be quoted with
|
||||||
|
single or double quotes; for some plugins that is even an error.
|
||||||
|
|
||||||
|
As there are many DNS providers and API endpoints, `Proxmox Backup`_
|
||||||
|
automatically generates the form for the credentials, but not all
|
||||||
|
providers are annotated yet. For those you will see a bigger text area,
|
||||||
|
into which you simply need to copy all the credential’s
|
||||||
|
``KEY``\ =\ ``VALUE`` pairs.
|
||||||
|
|
||||||
|
.. _dns_validation_through_cname_alias:
|
||||||
|
|
||||||
|
DNS Validation through CNAME Alias
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
A special ``alias`` mode can be used to handle validation on a different
|
||||||
|
domain/DNS server, in case your primary/real DNS does not support
|
||||||
|
provisioning via an API. Manually set up a permanent ``CNAME`` record
|
||||||
|
for ``_acme-challenge.domain1.example`` pointing to
|
||||||
|
``_acme-challenge.domain2.example``, and set the ``alias`` property in
|
||||||
|
the `Proxmox Backup`_ node configuration file ``/etc/proxmox-backup/node.cfg``
|
||||||
|
to ``domain2.example`` to allow the DNS server of ``domain2.example`` to
|
||||||
|
validate all challenges for ``domain1.example``.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_dns_wildcard:
|
||||||
|
|
||||||
|
Wildcard Certificates
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Wildcard DNS names start with a ``*.`` prefix and are considered valid
|
||||||
|
for all (one-level) subdomain names of the verified domain. So a
|
||||||
|
certificate for ``*.domain.example`` is valid for ``foo.domain.example``
|
||||||
|
and ``bar.domain.example``, but not for ``baz.foo.domain.example``.
|
||||||
|
|
||||||
|
Currently, you can only create wildcard certificates with the `DNS
|
||||||
|
challenge
|
||||||
|
type <https://letsencrypt.org/docs/challenge-types/#dns-01-challenge>`_.
|
||||||
|
|
||||||
|
.. _combination_of_plugins:
|
||||||
|
|
||||||
|
Combination of Plugins
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Combining ``http-01`` and ``dns-01`` validation is possible in case your
|
||||||
|
node is reachable via multiple domains with different requirements / DNS
|
||||||
|
provisioning capabilities. Mixing DNS APIs from multiple providers or
|
||||||
|
instances is also possible by specifying different plugin instances per
|
||||||
|
domain.
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
|
||||||
|
Accessing the same service over multiple domains increases complexity
|
||||||
|
and should be avoided if possible.
|
||||||
|
|
||||||
|
.. _sysadmin_certs_acme_automatic_renewal:
|
||||||
|
|
||||||
|
Automatic renewal of ACME certificates
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If a node has been successfully configured with an ACME-provided
|
||||||
|
certificate (either via ``proxmox-backup-manager`` or via the web-interface/API), the
|
||||||
|
certificate will be renewed automatically by the ``proxmox-backup-daily-update.service``.
|
||||||
|
Currently, renewal is triggered if the certificate either has already
|
||||||
|
expired or if it will expire in the next 30 days.
|
||||||
|
|
||||||
|
.. _manually_change_certificate_over_command_line:
|
||||||
|
|
||||||
|
Manually Change Certificate over Command-Line
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to get rid of certificate verification warnings, you have to
|
||||||
|
generate a valid certificate for your server.
|
||||||
|
|
||||||
|
Log in to your `Proxmox Backup`_ via ssh or use the console:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openssl req -newkey rsa:2048 -nodes -keyout key.pem -out req.pem
|
||||||
|
|
||||||
|
Follow the instructions on the screen, for example:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
Country Name (2 letter code) [AU]: AT
|
||||||
|
State or Province Name (full name) [Some-State]:Vienna
|
||||||
|
Locality Name (eg, city) []:Vienna
|
||||||
|
Organization Name (eg, company) [Internet Widgits Pty Ltd]: Proxmox GmbH
|
||||||
|
Organizational Unit Name (eg, section) []:Proxmox Backup
|
||||||
|
Common Name (eg, YOUR name) []: yourproxmox.yourdomain.com
|
||||||
|
Email Address []:support@yourdomain.com
|
||||||
|
|
||||||
|
Please enter the following 'extra' attributes to be sent with your certificate request
|
||||||
|
A challenge password []: not necessary
|
||||||
|
An optional company name []: not necessary
|
||||||
|
|
||||||
|
After you have finished the certificate request, you have to send the
|
||||||
|
file ``req.pem`` to your Certification Authority (CA). The CA will issue
|
||||||
|
the certificate (BASE64 encoded), based on your request – save this file
|
||||||
|
as ``cert.pem`` to your `Proxmox Backup`_.
|
||||||
|
|
||||||
|
To activate the new certificate, do the following on your `Proxmox Backup`_
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
cp key.pem /etc/proxmox-backup/proxy.key
|
||||||
|
cp cert.pem /etc/proxmox-backup/proxy.pem
|
||||||
|
|
||||||
|
Then restart the API servers:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
systemctl restart proxmox-backup-proxy
|
||||||
|
|
||||||
|
Test your new certificate, using your browser.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
To transfer files to and from your `Proxmox Backup`_, you can use
|
||||||
|
secure copy: If your desktop runs Linux, you can use the ``scp``
|
||||||
|
command line tool. If your desktop PC runs windows, please use an scp
|
||||||
|
client like WinSCP (see https://winscp.net/).
|
||||||
|
|
||||||
|
.. [1]
|
||||||
|
acme.sh https://github.com/acmesh-official/acme.sh
|
@ -6,22 +6,37 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: proxmox-backup-client/description.rst
|
.. include:: proxmox-backup-client/description.rst
|
||||||
|
|
||||||
``proxmox-file-restore``
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. include:: proxmox-file-restore/description.rst
|
|
||||||
|
|
||||||
``proxmox-backup-manager``
|
``proxmox-backup-manager``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. include:: proxmox-backup-manager/description.rst
|
.. include:: proxmox-backup-manager/description.rst
|
||||||
|
|
||||||
|
``proxmox-tape``
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-tape/description.rst
|
||||||
|
|
||||||
|
``pmt``
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
.. include:: pmt/description.rst
|
||||||
|
|
||||||
|
``pmtx``
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: pmtx/description.rst
|
||||||
|
|
||||||
``pxar``
|
``pxar``
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
.. include:: pxar/description.rst
|
.. include:: pxar/description.rst
|
||||||
|
|
||||||
|
``proxmox-file-restore``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-file-restore/description.rst
|
||||||
|
|
||||||
``proxmox-backup-debug``
|
``proxmox-backup-debug``
|
||||||
~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. include:: proxmox-backup-debug/description.rst
|
.. include:: proxmox-backup-debug/description.rst
|
||||||
|
@ -10,7 +10,7 @@ Command Syntax
|
|||||||
Catalog Shell Commands
|
Catalog Shell Commands
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Those command are available when you start an interactive restore shell:
|
The following commands are available in an interactive restore shell:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -51,3 +51,13 @@ Those command are available when you start an interactive restore shell:
|
|||||||
--------
|
--------
|
||||||
|
|
||||||
.. include:: pxar/synopsis.rst
|
.. include:: pxar/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-file-restore``
|
||||||
|
------------------------
|
||||||
|
.. include:: proxmox-file-restore/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-backup-debug``
|
||||||
|
------------------------
|
||||||
|
.. include:: proxmox-backup-debug/synopsis.rst
|
||||||
|
@ -77,7 +77,7 @@ project = 'Proxmox Backup'
|
|||||||
copyright = '2019-2021, Proxmox Server Solutions GmbH'
|
copyright = '2019-2021, Proxmox Server Solutions GmbH'
|
||||||
author = 'Proxmox Support Team'
|
author = 'Proxmox Support Team'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as a replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
@ -108,11 +108,14 @@ today_fmt = '%A, %d %B %Y'
|
|||||||
exclude_patterns = [
|
exclude_patterns = [
|
||||||
'_build', 'Thumbs.db', '.DS_Store',
|
'_build', 'Thumbs.db', '.DS_Store',
|
||||||
'*/man1.rst',
|
'*/man1.rst',
|
||||||
|
'certificate-management.rst',
|
||||||
'config/*/man5.rst',
|
'config/*/man5.rst',
|
||||||
'epilog.rst',
|
'epilog.rst',
|
||||||
'pbs-copyright.rst',
|
'pbs-copyright.rst',
|
||||||
'local-zfs.rst'
|
'local-zfs.rst',
|
||||||
'package-repositories.rst',
|
'package-repositories.rst',
|
||||||
|
'system-booting.rst',
|
||||||
|
'traffic-control.rst',
|
||||||
]
|
]
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
|
@ -2,13 +2,13 @@ This file contains the access control list for the Proxmox Backup
|
|||||||
Server API.
|
Server API.
|
||||||
|
|
||||||
Each line starts with ``acl:``, followed by 4 additional values
|
Each line starts with ``acl:``, followed by 4 additional values
|
||||||
separated by collon.
|
separated by colon.
|
||||||
|
|
||||||
:propagate: Propagate permissions down the hierachrchy
|
:propagate: Propagate permissions down the hierarchy
|
||||||
|
|
||||||
:path: The object path
|
:path: The object path
|
||||||
|
|
||||||
:User/Token: List of users and token
|
:User/Token: List of users and tokens
|
||||||
|
|
||||||
:Role: List of assigned roles
|
:Role: List of assigned roles
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
The file contains a list of datastore configuration sections. Each
|
This file contains a list of datastore configuration sections. Each
|
||||||
section starts with a header ``datastore: <name>``, followed by the
|
section starts with the header ``datastore: <name>``, followed by the
|
||||||
datastore configuration options.
|
datastore configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
datastore: <name1>
|
datastore: <name1>
|
||||||
path <path1>
|
path <path1>
|
||||||
<option1> <value1>
|
<option1> <value1>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each entry starts with a header ``pool: <name>``, followed by the
|
Each entry starts with the header ``pool: <name>``, followed by the
|
||||||
media pool configuration options.
|
media pool configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
@ -8,6 +8,6 @@ media pool configuration options.
|
|||||||
retention overwrite
|
retention overwrite
|
||||||
|
|
||||||
pool: ...
|
pool: ...
|
||||||
|
|
||||||
|
|
||||||
You can use the ``proxmox-tape pool`` command to manipulate this file.
|
You can use the ``proxmox-tape pool`` command to manipulate this file.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
This file contains information used to access remote servers.
|
This file contains information used to access remote servers.
|
||||||
|
|
||||||
Each entry starts with a header ``remote: <name>``, followed by the
|
Each entry starts with the header ``remote: <name>``, followed by the
|
||||||
remote configuration options.
|
remote configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
@ -11,7 +11,7 @@ remote configuration options.
|
|||||||
...
|
...
|
||||||
|
|
||||||
remote: ...
|
remote: ...
|
||||||
|
|
||||||
|
|
||||||
You can use the ``proxmox-backup-manager remote`` command to manipulate
|
You can use the ``proxmox-backup-manager remote`` command to manipulate
|
||||||
this file.
|
this file.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each entry starts with a header ``sync: <name>``, followed by the
|
Each entry starts with the header ``sync: <name>``, followed by the
|
||||||
job configuration options.
|
job configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
@ -9,7 +9,7 @@ job configuration options.
|
|||||||
remote lina
|
remote lina
|
||||||
|
|
||||||
sync: ...
|
sync: ...
|
||||||
|
|
||||||
|
|
||||||
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
|
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
|
||||||
this file.
|
this file.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each entry starts with a header ``backup: <name>``, followed by the
|
Each entry starts with the header ``backup: <name>``, followed by the
|
||||||
job configuration options.
|
job configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
Each LTO drive configuration section starts with a header ``lto: <name>``,
|
Each LTO drive configuration section starts with the header ``lto: <name>``,
|
||||||
followed by the drive configuration options.
|
followed by the drive configuration options.
|
||||||
|
|
||||||
Tape changer configurations starts with ``changer: <name>``,
|
Tape changer configurations start with the header ``changer: <name>``,
|
||||||
followed by the changer configuration options.
|
followed by the changer configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
@ -18,5 +18,5 @@ followed by the changer configuration options.
|
|||||||
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
||||||
commands to manipulate this file.
|
commands to manipulate this file.
|
||||||
|
|
||||||
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
|
.. NOTE:: The ``virtual:`` drive type is experimental and should only be used
|
||||||
for debugging.
|
for debugging.
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
This file contains the list of API users and API tokens.
|
This file contains the list of API users and API tokens.
|
||||||
|
|
||||||
Each user configuration section starts with a header ``user: <name>``,
|
Each user configuration section starts with the header ``user: <name>``,
|
||||||
followed by the user configuration options.
|
followed by the user configuration options.
|
||||||
|
|
||||||
API token configuration starts with a header ``token:
|
API token configuration starts with the header ``token:
|
||||||
<userid!token_name>``, followed by the token configuration. The data
|
<userid!token_name>``, followed by the token configuration. The data
|
||||||
used to authenticate tokens is stored in a separate file
|
used to authenticate tokens is stored in a separate file
|
||||||
(``token.shadow``).
|
(``token.shadow``).
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each entry starts with a header ``verification: <name>``, followed by the
|
Each entry starts with the header ``verification: <name>``, followed by the
|
||||||
job configuration options.
|
job configuration options.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
Configuration Files
|
Configuration Files
|
||||||
===================
|
===================
|
||||||
|
|
||||||
All Proxmox Backup Server configuration files resides inside directory
|
All Proxmox Backup Server configuration files reside in the directory
|
||||||
``/etc/proxmox-backup/``.
|
``/etc/proxmox-backup/``.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
.. Epilog (included at top of each file)
|
.. Epilog (included at top of each file)
|
||||||
|
|
||||||
We use this file to define external links and common replacement
|
We use this file to define external links and common replacement
|
||||||
patterns.
|
patterns.
|
||||||
|
|
||||||
@ -13,7 +13,6 @@
|
|||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. FIXME
|
|
||||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
@ -23,6 +22,7 @@
|
|||||||
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
|
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
|
||||||
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
|
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
|
||||||
.. _QEMU: https://www.qemu.org/
|
.. _QEMU: https://www.qemu.org/
|
||||||
|
.. _LXC: https://linuxcontainers.org/lxc/introduction/
|
||||||
|
|
||||||
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
|
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
|
||||||
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption
|
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption
|
||||||
@ -35,7 +35,7 @@
|
|||||||
.. _ZFS: https://en.wikipedia.org/wiki/ZFS
|
.. _ZFS: https://en.wikipedia.org/wiki/ZFS
|
||||||
.. _Proxmox VE: https://pve.proxmox.com
|
.. _Proxmox VE: https://pve.proxmox.com
|
||||||
|
|
||||||
.. _RFC3399: https://tools.ietf.org/html/rfc3339
|
.. _RFC3339: https://tools.ietf.org/html/rfc3339
|
||||||
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
|
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
|
||||||
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
|
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ How long will my Proxmox Backup Server version be supported?
|
|||||||
+=======================+======================+===============+============+====================+
|
+=======================+======================+===============+============+====================+
|
||||||
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
|
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
|
||||||
+-----------------------+----------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
|
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 |
|
||||||
+-----------------------+----------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|
|
||||||
|
|
||||||
@ -69,6 +69,6 @@ be able to read the data.
|
|||||||
Is the backup incremental/deduplicated?
|
Is the backup incremental/deduplicated?
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
With Proxmox Backup Server, backups are sent incremental and data is
|
With Proxmox Backup Server, backups are sent incrementally to the server, and
|
||||||
deduplicated on the server.
|
data is then deduplicated on the server. This minimizes both the storage
|
||||||
This minimizes both the storage consumed and the network impact.
|
consumed and the impact on the network.
|
||||||
|
@ -14,7 +14,8 @@ Proxmox File Archive Format (``.pxar``)
|
|||||||
Data Blob Format (``.blob``)
|
Data Blob Format (``.blob``)
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
The data blob format is used to store small binary data. The magic number decides the exact format:
|
The data blob format is used to store small binary data. The magic number
|
||||||
|
decides the exact format:
|
||||||
|
|
||||||
.. list-table::
|
.. list-table::
|
||||||
:widths: auto
|
:widths: auto
|
||||||
@ -32,7 +33,8 @@ The data blob format is used to store small binary data. The magic number decide
|
|||||||
- encrypted
|
- encrypted
|
||||||
- compressed
|
- compressed
|
||||||
|
|
||||||
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
|
The compression algorithm used is ``zstd``. The encryption cipher is
|
||||||
|
``AES_256_GCM``.
|
||||||
|
|
||||||
Unencrypted blobs use the following format:
|
Unencrypted blobs use the following format:
|
||||||
|
|
||||||
@ -43,9 +45,9 @@ Unencrypted blobs use the following format:
|
|||||||
* - ``CRC32: [u8; 4]``
|
* - ``CRC32: [u8; 4]``
|
||||||
* - ``Data: (max 16MiB)``
|
* - ``Data: (max 16MiB)``
|
||||||
|
|
||||||
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
|
Encrypted blobs additionally contain a 16 byte initialization vector (IV),
|
||||||
byte Authenticated Encyryption (AE) tag, followed by the encrypted
|
followed by a 16 byte authenticated encryption (AE) tag, followed by the
|
||||||
data:
|
encrypted data:
|
||||||
|
|
||||||
.. list-table::
|
.. list-table::
|
||||||
|
|
||||||
@ -72,19 +74,19 @@ All numbers are stored as little-endian.
|
|||||||
* - ``ctime: i64``,
|
* - ``ctime: i64``,
|
||||||
- Creation Time (epoch)
|
- Creation Time (epoch)
|
||||||
* - ``index_csum: [u8; 32]``,
|
* - ``index_csum: [u8; 32]``,
|
||||||
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
- SHA-256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||||
* - ``size: u64``,
|
* - ``size: u64``,
|
||||||
- Image size
|
- Image size
|
||||||
* - ``chunk_size: u64``,
|
* - ``chunk_size: u64``,
|
||||||
- Chunk size
|
- Chunk size
|
||||||
* - ``reserved: [u8; 4016]``,
|
* - ``reserved: [u8; 4016]``,
|
||||||
- overall header size is one page (4096 bytes)
|
- Overall header size is one page (4096 bytes)
|
||||||
* - ``digest1: [u8; 32]``
|
* - ``digest1: [u8; 32]``
|
||||||
- first chunk digest
|
- First chunk digest
|
||||||
* - ``digest2: [u8; 32]``
|
* - ``digest2: [u8; 32]``
|
||||||
- next chunk
|
- Second chunk digest
|
||||||
* - ...
|
* - ...
|
||||||
- next chunk ...
|
- Next chunk digest ...
|
||||||
|
|
||||||
|
|
||||||
.. _dynamic-index-format:
|
.. _dynamic-index-format:
|
||||||
@ -103,16 +105,16 @@ All numbers are stored as little-endian.
|
|||||||
* - ``ctime: i64``,
|
* - ``ctime: i64``,
|
||||||
- Creation Time (epoch)
|
- Creation Time (epoch)
|
||||||
* - ``index_csum: [u8; 32]``,
|
* - ``index_csum: [u8; 32]``,
|
||||||
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
- SHA-256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||||
* - ``reserved: [u8; 4032]``,
|
* - ``reserved: [u8; 4032]``,
|
||||||
- Overall header size is one page (4096 bytes)
|
- Overall header size is one page (4096 bytes)
|
||||||
* - ``offset1: u64``
|
* - ``offset1: u64``
|
||||||
- End of first chunk
|
- End of first chunk
|
||||||
* - ``digest1: [u8; 32]``
|
* - ``digest1: [u8; 32]``
|
||||||
- first chunk digest
|
- First chunk digest
|
||||||
* - ``offset2: u64``
|
* - ``offset2: u64``
|
||||||
- End of second chunk
|
- End of second chunk
|
||||||
* - ``digest2: [u8; 32]``
|
* - ``digest2: [u8; 32]``
|
||||||
- second chunk digest
|
- Second chunk digest
|
||||||
* - ...
|
* - ...
|
||||||
- next chunk offset/digest
|
- Next chunk offset/digest
|
||||||
|
@ -11,7 +11,7 @@ Glossary
|
|||||||
`Container`_
|
`Container`_
|
||||||
|
|
||||||
A container is an isolated user space. Programs run directly on
|
A container is an isolated user space. Programs run directly on
|
||||||
the host's kernel, but with limited access to the host resources.
|
the host's kernel, but with limited access to the host's resources.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
|
|
||||||
@ -23,19 +23,19 @@ Glossary
|
|||||||
Rust is a new, fast and memory-efficient system programming
|
Rust is a new, fast and memory-efficient system programming
|
||||||
language. It has no runtime or garbage collector. Rust’s rich type
|
language. It has no runtime or garbage collector. Rust’s rich type
|
||||||
system and ownership model guarantee memory-safety and
|
system and ownership model guarantee memory-safety and
|
||||||
thread-safety. I can eliminate many classes of bugs
|
thread-safety. This can eliminate many classes of bugs
|
||||||
at compile-time.
|
at compile-time.
|
||||||
|
|
||||||
`Sphinx`_
|
`Sphinx`_
|
||||||
|
|
||||||
Is a tool that makes it easy to create intelligent and
|
Is a tool that makes it easy to create intelligent and nicely formatted
|
||||||
beautiful documentation. It was originally created for the
|
documentation. It was originally created for the documentation of the
|
||||||
documentation of the Python programming language. It has excellent facilities for the
|
Python programming language. It has excellent facilities for the
|
||||||
documentation of software projects in a range of languages.
|
documentation of software projects in a range of languages.
|
||||||
|
|
||||||
`reStructuredText`_
|
`reStructuredText`_
|
||||||
|
|
||||||
Is an easy-to-read, what-you-see-is-what-you-get plaintext
|
Is an easy-to-read, what-you-see-is-what-you-get, plaintext
|
||||||
markup syntax and parser system.
|
markup syntax and parser system.
|
||||||
|
|
||||||
`FUSE`
|
`FUSE`
|
||||||
|
54
docs/gui.rst
@ -8,8 +8,9 @@ tools. The web interface also provides a built-in console, so if you prefer the
|
|||||||
command line or need some extra control, you have this option.
|
command line or need some extra control, you have this option.
|
||||||
|
|
||||||
The web interface can be accessed via https://youripaddress:8007. The default
|
The web interface can be accessed via https://youripaddress:8007. The default
|
||||||
login is `root`, and the password is the one specified during the installation
|
login is `root`, and the password is either the one specified during the
|
||||||
process.
|
installation process or the password of the root user, in case of installation
|
||||||
|
on top of Debian.
|
||||||
|
|
||||||
|
|
||||||
Features
|
Features
|
||||||
@ -48,12 +49,13 @@ GUI Overview
|
|||||||
|
|
||||||
The Proxmox Backup Server web interface consists of 3 main sections:
|
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||||
|
|
||||||
* **Header**: At the top. This shows version information, and contains buttons to view
|
* **Header**: At the top. This shows version information and contains buttons to
|
||||||
documentation, monitor running tasks, set the language and logout.
|
view documentation, monitor running tasks, set the language, configure various
|
||||||
* **Sidebar**: On the left. This contains the configuration options for
|
display settings, and logout.
|
||||||
|
* **Sidebar**: On the left. This contains the administration options for
|
||||||
the server.
|
the server.
|
||||||
* **Configuration Panel**: In the center. This contains the control interface for the
|
* **Configuration Panel**: In the center. This contains the respective control
|
||||||
configuration options in the *Sidebar*.
|
interfaces for the administration options in the *Sidebar*.
|
||||||
|
|
||||||
|
|
||||||
Sidebar
|
Sidebar
|
||||||
@ -74,12 +76,14 @@ previous and currently running tasks, and subscription information.
|
|||||||
Configuration
|
Configuration
|
||||||
^^^^^^^^^^^^^
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
The Configuration section contains some system configuration options, such as
|
The Configuration section contains some system options, such as time, network,
|
||||||
time and network configuration. It also contains the following subsections:
|
WebAuthn, and HTTP proxy configuration. It also contains the following
|
||||||
|
subsections:
|
||||||
|
|
||||||
* **Access Control**: Add and manage users, API tokens, and the permissions
|
* **Access Control**: Add and manage users, API tokens, and the permissions
|
||||||
associated with these items
|
associated with these items
|
||||||
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||||
|
* **Certificates**: Manage ACME accounts and create SSL certificates.
|
||||||
* **Subscription**: Upload a subscription key, view subscription status and
|
* **Subscription**: Upload a subscription key, view subscription status and
|
||||||
access a text-based system report.
|
access a text-based system report.
|
||||||
|
|
||||||
@ -98,6 +102,7 @@ tasks and information. These are:
|
|||||||
resource usage statistics
|
resource usage statistics
|
||||||
* **Services**: Manage and monitor system services
|
* **Services**: Manage and monitor system services
|
||||||
* **Updates**: An interface for upgrading packages
|
* **Updates**: An interface for upgrading packages
|
||||||
|
* **Repositories**: An interface for configuring APT repositories
|
||||||
* **Syslog**: View log messages from the server
|
* **Syslog**: View log messages from the server
|
||||||
* **Tasks**: Task history with multiple filter options
|
* **Tasks**: Task history with multiple filter options
|
||||||
|
|
||||||
@ -110,7 +115,7 @@ The administration menu item also contains a disk management subsection:
|
|||||||
* **Disks**: View information on available disks
|
* **Disks**: View information on available disks
|
||||||
|
|
||||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
* **ZFS**: Create and view information on *ZFS* disks
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
Tape Backup
|
Tape Backup
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
@ -119,11 +124,20 @@ Tape Backup
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Tape Backup: Tape changer overview
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
The `Tape Backup`_ section contains a top panel, with options for managing tape
|
||||||
inventories, drives, changers and the tape backup jobs itself.
|
media sets, inventories, drives, changers, encryption keys, and the tape backup
|
||||||
|
jobs itself. The tabs are as follows:
|
||||||
|
|
||||||
It also contains a subsection per standalone drive and per changer, with a
|
* **Content**: Information on the contents of the tape backup
|
||||||
status and management view for those devices.
|
* **Inventory**: Manage the tapes attached to the system
|
||||||
|
* **Changers**: Manage tape loading devices
|
||||||
|
* **Drives**: Manage drives used for reading and writing to tapes
|
||||||
|
* **Media Pools**: Manage logical pools of tapes
|
||||||
|
* **Encryption Keys**: Manage tape backup encryption keys
|
||||||
|
* **Backup Jobs**: Manage tape backup jobs
|
||||||
|
|
||||||
|
The section also contains a subsection per standalone drive and per changer,
|
||||||
|
with a status and management view for those devices.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
@ -133,9 +147,9 @@ Datastore
|
|||||||
:alt: Datastore Configuration
|
:alt: Datastore Configuration
|
||||||
|
|
||||||
The Datastore section contains interfaces for creating and managing
|
The Datastore section contains interfaces for creating and managing
|
||||||
datastores. It contains a button to create a new datastore on the server, as
|
datastores. It also contains a button for creating a new datastore on the
|
||||||
well as a subsection for each datastore on the system, in which you can use the
|
server, as well as a subsection for each datastore on the system, in which you
|
||||||
top panel to view:
|
can use the top panel to view:
|
||||||
|
|
||||||
* **Summary**: Access a range of datastore usage statistics
|
* **Summary**: Access a range of datastore usage statistics
|
||||||
* **Content**: Information on the datastore's backup groups and their respective
|
* **Content**: Information on the datastore's backup groups and their respective
|
||||||
@ -144,5 +158,7 @@ top panel to view:
|
|||||||
collection <client_garbage-collection>` operations, and run garbage collection
|
collection <client_garbage-collection>` operations, and run garbage collection
|
||||||
manually
|
manually
|
||||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs
|
||||||
datastore
|
on the datastore
|
||||||
|
* **Options**: Configure notification and verification settings
|
||||||
|
* **Permissions**: Manage permissions on the datastore
|
||||||
|
BIN
docs/images/screenshots/boot-grub.png
Normal file
After Width: | Height: | Size: 9.9 KiB |
BIN
docs/images/screenshots/boot-systemdboot.png
Normal file
After Width: | Height: | Size: 4.5 KiB |
BIN
docs/images/screenshots/pbs-gui-acme-add-domain.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
docs/images/screenshots/pbs-gui-acme-create-account.png
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
docs/images/screenshots/pbs-gui-acme-create-challenge-plugin.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/images/screenshots/pbs-gui-administration-apt-repos.png
Normal file
After Width: | Height: | Size: 149 KiB |
BIN
docs/images/screenshots/pbs-gui-administration-logs.png
Normal file
After Width: | Height: | Size: 438 KiB |
Before Width: | Height: | Size: 140 KiB After Width: | Height: | Size: 197 KiB |
BIN
docs/images/screenshots/pbs-gui-administration-services.png
Normal file
After Width: | Height: | Size: 104 KiB |
BIN
docs/images/screenshots/pbs-gui-administration-tasks.png
Normal file
After Width: | Height: | Size: 367 KiB |
BIN
docs/images/screenshots/pbs-gui-administration-updates.png
Normal file
After Width: | Height: | Size: 83 KiB |
BIN
docs/images/screenshots/pbs-gui-certs-upload-custom.png
Normal file
After Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 90 KiB After Width: | Height: | Size: 59 KiB |
Before Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-create.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-options.png
Normal file
After Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 131 KiB |
Before Width: | Height: | Size: 79 KiB After Width: | Height: | Size: 139 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
BIN
docs/images/screenshots/pbs-gui-permissions.png
Normal file
After Width: | Height: | Size: 174 KiB |
BIN
docs/images/screenshots/pbs-gui-remote.png
Normal file
After Width: | Height: | Size: 84 KiB |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 33 KiB |
BIN
docs/images/screenshots/pbs-gui-syncjob.png
Normal file
After Width: | Height: | Size: 94 KiB |
BIN
docs/images/screenshots/pbs-gui-system-config.png
Normal file
After Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 26 KiB |
BIN
docs/images/screenshots/pbs-gui-traffic-control-add.png
Normal file
After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 20 KiB |
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 132 KiB |
@ -50,6 +50,7 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
file-formats.rst
|
file-formats.rst
|
||||||
backup-protocol.rst
|
backup-protocol.rst
|
||||||
calendarevents.rst
|
calendarevents.rst
|
||||||
|
markdown-primer.rst
|
||||||
glossary.rst
|
glossary.rst
|
||||||
GFDL.rst
|
GFDL.rst
|
||||||
|
|
||||||
|
@ -19,24 +19,24 @@ for various management tasks such as disk management.
|
|||||||
`Proxmox Backup`_ without the server part.
|
`Proxmox Backup`_ without the server part.
|
||||||
|
|
||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||||
as well as all necessary packages for the `Proxmox Backup`_ server.
|
as well as all necessary packages for the `Proxmox Backup`_ Server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allow
|
The installer will guide you through the setup process and allow
|
||||||
you to partition the local disk(s), apply basic system configurations
|
you to partition the local disk(s), apply basic system configuration
|
||||||
(e.g. timezone, language, network), and install all required packages.
|
(for example timezone, language, network), and install all required packages.
|
||||||
The provided ISO will get you started in just a few minutes, and is the
|
The provided ISO will get you started in just a few minutes, and is the
|
||||||
recommended method for new and existing users.
|
recommended method for new and existing users.
|
||||||
|
|
||||||
Alternatively, `Proxmox Backup`_ server can be installed on top of an
|
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
|
||||||
existing Debian system.
|
existing Debian system.
|
||||||
|
|
||||||
Install `Proxmox Backup`_ with the Installer
|
Install `Proxmox Backup`_ Server using the Installer
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Download the ISO from |DOWNLOADS|.
|
Download the ISO from |DOWNLOADS|.
|
||||||
It includes the following:
|
It includes the following:
|
||||||
|
|
||||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
* The `Proxmox Backup`_ Server installer, which partitions the local
|
||||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||||
|
|
||||||
* Complete operating system (Debian Linux, 64-bit)
|
* Complete operating system (Debian Linux, 64-bit)
|
||||||
@ -63,7 +63,7 @@ standard Debian installation. After configuring the
|
|||||||
# apt-get update
|
# apt-get update
|
||||||
# apt-get install proxmox-backup-server
|
# apt-get install proxmox-backup-server
|
||||||
|
|
||||||
The commands above keep the current (Debian) kernel and install a minimal
|
The above commands keep the current (Debian) kernel and install a minimal
|
||||||
set of required packages.
|
set of required packages.
|
||||||
|
|
||||||
If you want to install the same set of packages as the installer
|
If you want to install the same set of packages as the installer
|
||||||
|
@ -4,15 +4,16 @@ Introduction
|
|||||||
What is Proxmox Backup Server?
|
What is Proxmox Backup Server?
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
Proxmox Backup Server is an enterprise-class, client-server backup solution that
|
||||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
is capable of backing up :term:`virtual machine<Virtual machine>`\ s,
|
||||||
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
:term:`container<Container>`\ s, and physical hosts. It is specially optimized
|
||||||
platform and allows you to back up your data securely, even between remote
|
for the `Proxmox Virtual Environment`_ platform and allows you to back up your
|
||||||
sites, providing easy management with a web-based user interface.
|
data securely, even between remote sites, providing easy management through a
|
||||||
|
web-based user interface.
|
||||||
|
|
||||||
It supports deduplication, compression, and authenticated
|
It supports deduplication, compression, and authenticated
|
||||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees
|
||||||
performance, low resource usage, and a safe, high-quality codebase.
|
high performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
Proxmox Backup uses state of the art cryptography for both client-server
|
Proxmox Backup uses state of the art cryptography for both client-server
|
||||||
communication and backup content :ref:`encryption <client_encryption>`. All
|
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||||
@ -28,23 +29,24 @@ Proxmox Backup Server uses a `client-server model`_. The server stores the
|
|||||||
backup data and provides an API to create and manage datastores. With the
|
backup data and provides an API to create and manage datastores. With the
|
||||||
API, it's also possible to manage disks and other server-side resources.
|
API, it's also possible to manage disks and other server-side resources.
|
||||||
|
|
||||||
The backup client uses this API to access the backed up data. With the command
|
The backup client uses this API to access the backed up data. You can use the
|
||||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
``proxmox-backup-client`` command line tool to create and restore file backups.
|
||||||
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
|
||||||
|
integrated client.
|
||||||
|
|
||||||
A single backup is allowed to contain several archives. For example, when you
|
A single backup is allowed to contain several archives. For example, when you
|
||||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
backup a :term:`virtual machine<Virtual machine>`, each disk is stored as a
|
||||||
inside that backup. The VM configuration itself is stored as an extra file.
|
separate archive inside that backup. The VM configuration itself is stored as
|
||||||
This way, it's easy to access and restore only important parts of the backup,
|
an extra file. This way, it's easy to access and restore only the important
|
||||||
without the need to scan the whole backup.
|
parts of the backup, without the need to scan the whole backup.
|
||||||
|
|
||||||
|
|
||||||
Main Features
|
Main Features
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||||
supported and you can easily backup :term:`virtual machine`\ s and
|
supported, and you can easily backup :term:`virtual machine<Virtual machine>`\ s and
|
||||||
:term:`container`\ s.
|
:term:`container<Container>`\ s.
|
||||||
|
|
||||||
:Performance: The whole software stack is written in :term:`Rust`,
|
:Performance: The whole software stack is written in :term:`Rust`,
|
||||||
in order to provide high speed and memory efficiency.
|
in order to provide high speed and memory efficiency.
|
||||||
@ -70,6 +72,10 @@ Main Features
|
|||||||
modern hardware. In addition to client-side encryption, all data is
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
|
:Tape backup: For long-term archiving of data, Proxmox Backup Server also
|
||||||
|
provides extensive support for backing up to tape and managing tape
|
||||||
|
libraries.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
|
|
||||||
@ -80,7 +86,7 @@ Main Features
|
|||||||
backup-clients.
|
backup-clients.
|
||||||
|
|
||||||
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
form of `Proxmox Backup Server Subscription Plans
|
the form of `Proxmox Backup Server Subscription Plans
|
||||||
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
@ -173,7 +179,7 @@ Bug Tracker
|
|||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||||
issue appears, file your report there. An issue can be a bug as well as a
|
issue appears, file your report there. An issue can be a bug, as well as a
|
||||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||||
of the issue and will send a notification once it has been solved.
|
of the issue and will send a notification once it has been solved.
|
||||||
|
|
||||||
@ -224,5 +230,6 @@ requirements.
|
|||||||
|
|
||||||
In July 2020, we released the first beta version of Proxmox Backup
|
In July 2020, we released the first beta version of Proxmox Backup
|
||||||
Server, followed by the first stable version in November 2020. With support for
|
Server, followed by the first stable version in November 2020. With support for
|
||||||
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
encryption and incremental, fully deduplicated backups, Proxmox Backup offers a
|
||||||
network load and saves valuable storage space.
|
secure environment, which significantly reduces network load and saves valuable
|
||||||
|
storage space.
|
||||||
|
@ -4,17 +4,17 @@
|
|||||||
ZFS on Linux
|
ZFS on Linux
|
||||||
------------
|
------------
|
||||||
|
|
||||||
ZFS is a combined file system and logical volume manager designed by
|
ZFS is a combined file system and logical volume manager, designed by
|
||||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||||
packages are included.
|
packages are included.
|
||||||
|
|
||||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||||
low budget hardware, but also high performance systems by leveraging
|
low budget hardware, and also high performance systems by leveraging
|
||||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
SSD caching or even SSD only setups. ZFS can replace expensive
|
||||||
hardware raid cards by moderate CPU and memory load combined with easy
|
hardware raid cards with moderate CPU and memory load, combined with easy
|
||||||
management.
|
management.
|
||||||
|
|
||||||
General ZFS advantages
|
General advantages of ZFS:
|
||||||
|
|
||||||
* Easy configuration and management with GUI and CLI.
|
* Easy configuration and management with GUI and CLI.
|
||||||
* Reliable
|
* Reliable
|
||||||
@ -34,18 +34,18 @@ General ZFS advantages
|
|||||||
Hardware
|
Hardware
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
ZFS depends heavily on memory, so it's recommended to have at least 8GB to
|
||||||
practice, use as much you can get for your hardware/budget. To prevent
|
start. In practice, use as much you can get for your hardware/budget. To prevent
|
||||||
data corruption, we recommend the use of high quality ECC RAM.
|
data corruption, we recommend the use of high quality ECC RAM.
|
||||||
|
|
||||||
If you use a dedicated cache and/or log disk, you should use an
|
If you use a dedicated cache and/or log disk, you should use an
|
||||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
enterprise class SSD (for example, Intel SSD DC S3700 Series). This can
|
||||||
increase the overall performance significantly.
|
increase the overall performance significantly.
|
||||||
|
|
||||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
IMPORTANT: Do not use ZFS on top of a hardware controller which has its
|
||||||
own cache management. ZFS needs to directly communicate with disks. An
|
own cache management. ZFS needs to directly communicate with disks. An
|
||||||
HBA adapter is the way to go, or something like LSI controller flashed
|
HBA adapter or something like an LSI controller flashed in ``IT`` mode is
|
||||||
in ``IT`` mode.
|
recommended.
|
||||||
|
|
||||||
|
|
||||||
ZFS Administration
|
ZFS Administration
|
||||||
@ -53,7 +53,7 @@ ZFS Administration
|
|||||||
|
|
||||||
This section gives you some usage examples for common tasks. ZFS
|
This section gives you some usage examples for common tasks. ZFS
|
||||||
itself is really powerful and provides many options. The main commands
|
itself is really powerful and provides many options. The main commands
|
||||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
to manage ZFS are `zfs` and `zpool`. Both commands come with extensive
|
||||||
manual pages, which can be read with:
|
manual pages, which can be read with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -123,7 +123,7 @@ Create a new pool with cache (L2ARC)
|
|||||||
It is possible to use a dedicated cache drive partition to increase
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
the performance (use SSD).
|
the performance (use SSD).
|
||||||
|
|
||||||
As `<device>` it is possible to use more devices, like it's shown in
|
For `<device>`, you can use multiple devices, as is shown in
|
||||||
"Create a new pool with RAID*".
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -136,7 +136,7 @@ Create a new pool with log (ZIL)
|
|||||||
It is possible to use a dedicated cache drive partition to increase
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
the performance (SSD).
|
the performance (SSD).
|
||||||
|
|
||||||
As `<device>` it is possible to use more devices, like it's shown in
|
For `<device>`, you can use multiple devices, as is shown in
|
||||||
"Create a new pool with RAID*".
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -146,8 +146,9 @@ As `<device>` it is possible to use more devices, like it's shown in
|
|||||||
Add cache and log to an existing pool
|
Add cache and log to an existing pool
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If you have a pool without cache and log. First partition the SSD in
|
You can add cache and log devices to a pool after its creation. In this example,
|
||||||
2 partition with `parted` or `gdisk`
|
we will use a single drive for both cache and log. First, you need to create
|
||||||
|
2 partitions on the SSD with `parted` or `gdisk`
|
||||||
|
|
||||||
.. important:: Always use GPT partition tables.
|
.. important:: Always use GPT partition tables.
|
||||||
|
|
||||||
@ -171,12 +172,12 @@ Changing a failed device
|
|||||||
Changing a failed bootable device
|
Changing a failed bootable device
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
Depending on how Proxmox Backup was installed, it is either using `grub` or
|
||||||
as bootloader.
|
`systemd-boot` as a bootloader.
|
||||||
|
|
||||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
In either case, the first steps of copying the partition table, reissuing GUIDs
|
||||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
and replacing the ZFS partition are the same. To make the system bootable from
|
||||||
different steps are needed which depend on the bootloader in use.
|
the new disk, different steps are needed which depend on the bootloader in use.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -190,12 +191,12 @@ With `systemd-boot`:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pve-efiboot-tool format <new disk's ESP>
|
# proxmox-boot-tool format <new ESP>
|
||||||
# pve-efiboot-tool init <new disk's ESP>
|
# proxmox-boot-tool init <new ESP>
|
||||||
|
|
||||||
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||||
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
bootable disks setup by the `Proxmox Backup`_ installer. For details, see
|
||||||
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
:ref:`Setting up a new partition for use as synced ESP <systembooting-proxmox-boot-setup>`.
|
||||||
|
|
||||||
With `grub`:
|
With `grub`:
|
||||||
|
|
||||||
@ -207,36 +208,31 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
|||||||
# grub-mkconfig -o /path/to/grub.cfg
|
# grub-mkconfig -o /path/to/grub.cfg
|
||||||
|
|
||||||
|
|
||||||
Activate E-Mail Notification
|
Activate e-mail notification
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
ZFS comes with an event daemon, which monitors events generated by the
|
ZFS comes with an event daemon ``ZED``, which monitors events generated by the
|
||||||
ZFS kernel module. The daemon can also send emails on ZFS events like
|
ZFS kernel module. The daemon can also send emails on ZFS events like pool
|
||||||
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``,
|
||||||
and you can install it using `apt-get`:
|
which should already be installed by default in `Proxmox Backup`_.
|
||||||
|
|
||||||
.. code-block:: console
|
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc`` with your
|
||||||
|
favorite editor. The required setting for email notification is
|
||||||
# apt-get install zfs-zed
|
``ZED_EMAIL_ADDR``, which is set to ``root`` by default.
|
||||||
|
|
||||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
|
||||||
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
ZED_EMAIL_ADDR="root"
|
ZED_EMAIL_ADDR="root"
|
||||||
|
|
||||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
Please note that `Proxmox Backup`_ forwards mails to `root` to the email address
|
||||||
configured for the root user.
|
configured for the root user.
|
||||||
|
|
||||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
|
||||||
other settings are optional.
|
|
||||||
|
|
||||||
Limit ZFS Memory Usage
|
Limit ZFS memory usage
|
||||||
^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
It is good to use at most 50 percent (which is the default) of the
|
It is good to use at most 50 percent (which is the default) of the
|
||||||
system memory for ZFS ARC to prevent performance shortage of the
|
system memory for ZFS ARC, to prevent performance degradation of the
|
||||||
host. Use your preferred editor to change the configuration in
|
host. Use your preferred editor to change the configuration in
|
||||||
`/etc/modprobe.d/zfs.conf` and insert:
|
`/etc/modprobe.d/zfs.conf` and insert:
|
||||||
|
|
||||||
@ -244,27 +240,42 @@ host. Use your preferred editor to change the configuration in
|
|||||||
|
|
||||||
options zfs zfs_arc_max=8589934592
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
This example setting limits the usage to 8GB.
|
The above example limits the usage to 8 GiB ('8 * 2^30^').
|
||||||
|
|
||||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
.. IMPORTANT:: In case your desired `zfs_arc_max` value is lower than or equal
|
||||||
|
to `zfs_arc_min` (which defaults to 1/32 of the system memory), `zfs_arc_max`
|
||||||
|
will be ignored. Thus, for it to work in this case, you must set
|
||||||
|
`zfs_arc_min` to at most `zfs_arc_max - 1`. This would require updating the
|
||||||
|
configuration in `/etc/modprobe.d/zfs.conf`, with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
options zfs zfs_arc_min=8589934591
|
||||||
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
|
This example setting limits the usage to 8 GiB ('8 * 2^30^') on
|
||||||
|
systems with more than 256 GiB of total memory, where simply setting
|
||||||
|
`zfs_arc_max` alone would not work.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If your root file system is ZFS, you must update your initramfs
|
||||||
|
every time this value changes.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# update-initramfs -u
|
# update-initramfs -u
|
||||||
|
|
||||||
|
|
||||||
SWAP on ZFS
|
Swap on ZFS
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
|
||||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
Swap-space created on a zvol may cause some issues, such as blocking the
|
||||||
server or generating a high IO load, often seen when starting a Backup
|
server or generating a high IO load.
|
||||||
to an external Storage.
|
|
||||||
|
|
||||||
We strongly recommend to use enough memory, so that you normally do not
|
We strongly recommend using enough memory, so that you normally do not
|
||||||
run into low memory situations. Should you need or want to add swap, it is
|
run into low memory situations. Should you need or want to add swap, it is
|
||||||
preferred to create a partition on a physical disk and use it as swap device.
|
preferred to create a partition on a physical disk and use it as a swap device.
|
||||||
You can leave some space free for this purpose in the advanced options of the
|
You can leave some space free for this purpose in the advanced options of the
|
||||||
installer. Additionally, you can lower the `swappiness` value.
|
installer. Additionally, you can lower the `swappiness` value.
|
||||||
A good value for servers is 10:
|
A good value for servers is 10:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -291,21 +302,24 @@ an editor of your choice and add the following line:
|
|||||||
vm.swappiness = 100 The kernel will swap aggressively.
|
vm.swappiness = 100 The kernel will swap aggressively.
|
||||||
==================== ===============================================================
|
==================== ===============================================================
|
||||||
|
|
||||||
ZFS Compression
|
ZFS compression
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
To activate compression:
|
To activate compression:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# zpool set compression=lz4 <pool>
|
# zpool set compression=lz4 <pool>
|
||||||
|
|
||||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
Other algorithms such as `lzjb`, `zstd` and `gzip-N` (where `N` is an integer from `1-9`
|
||||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
representing the compression ratio, where 1 is fastest and 9 is best
|
||||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
compression) are also available. Depending on the algorithm and how
|
||||||
I/O performance.
|
compressible the data is, having compression enabled can even increase I/O
|
||||||
|
performance.
|
||||||
|
|
||||||
You can disable compression at any time with:
|
You can disable compression at any time with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# zfs set compression=off <dataset>
|
# zfs set compression=off <dataset>
|
||||||
@ -314,26 +328,26 @@ Only new blocks will be affected by this change.
|
|||||||
|
|
||||||
.. _local_zfs_special_device:
|
.. _local_zfs_special_device:
|
||||||
|
|
||||||
ZFS Special Device
|
ZFS special device
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
Since version 0.8.0, ZFS supports `special` devices. A `special` device in a
|
||||||
pool is used to store metadata, deduplication tables, and optionally small
|
pool is used to store metadata, deduplication tables, and optionally small
|
||||||
file blocks.
|
file blocks.
|
||||||
|
|
||||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||||
hard disks with a lot of metadata changes. For example workloads that involve
|
hard disks with a lot of metadata changes. For example, workloads that involve
|
||||||
creating, updating or deleting a large number of files will benefit from the
|
creating, updating or deleting a large number of files will benefit from the
|
||||||
presence of a `special` device. ZFS datasets can also be configured to store
|
presence of a `special` device. ZFS datasets can also be configured to store
|
||||||
whole small files on the `special` device which can further improve the
|
small files on the `special` device, which can further improve the
|
||||||
performance. Use fast SSDs for the `special` device.
|
performance. Use fast SSDs for the `special` device.
|
||||||
|
|
||||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||||
pool, since the `special` device is a point of failure for the whole pool.
|
pool, since the `special` device is a point of failure for the entire pool.
|
||||||
|
|
||||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||||
|
|
||||||
Create a pool with `special` device and RAID-1:
|
To create a pool with `special` device and RAID-1:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -346,8 +360,8 @@ Adding a `special` device to an existing pool with RAID-1:
|
|||||||
# zpool add <pool> special mirror <device1> <device2>
|
# zpool add <pool> special mirror <device1> <device2>
|
||||||
|
|
||||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||||
`0` to disable storing small file blocks on the `special` device or a power of
|
`0` to disable storing small file blocks on the `special` device, or a power of
|
||||||
two in the range between `512B` to `128K`. After setting the property new file
|
two in the range between `512B` to `128K`. After setting this property, new file
|
||||||
blocks smaller than `size` will be allocated on the `special` device.
|
blocks smaller than `size` will be allocated on the `special` device.
|
||||||
|
|
||||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||||
@ -355,10 +369,10 @@ blocks smaller than `size` will be allocated on the `special` device.
|
|||||||
the `special` device, so be careful!
|
the `special` device, so be careful!
|
||||||
|
|
||||||
Setting the `special_small_blocks` property on a pool will change the default
|
Setting the `special_small_blocks` property on a pool will change the default
|
||||||
value of that property for all child ZFS datasets (for example all containers
|
value of that property for all child ZFS datasets (for example, all containers
|
||||||
in the pool will opt in for small file blocks).
|
in the pool will opt in for small file blocks).
|
||||||
|
|
||||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
Opt in for all files smaller than 4K-blocks pool-wide:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -379,10 +393,15 @@ Opt out from small file blocks for a single dataset:
|
|||||||
Troubleshooting
|
Troubleshooting
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Corrupted cachefile
|
Corrupt cache file
|
||||||
|
""""""""""""""""""
|
||||||
|
|
||||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
`zfs-import-cache.service` imports ZFS pools using the ZFS cache file. If this
|
||||||
boot until mounted manually later.
|
file becomes corrupted, the service won't be able to import the pools that it's
|
||||||
|
unable to read from it.
|
||||||
|
|
||||||
|
As a result, in case of a corrupted ZFS cache file, some volumes may not be
|
||||||
|
mounted during boot and must be mounted manually later.
|
||||||
|
|
||||||
For each pool, run:
|
For each pool, run:
|
||||||
|
|
||||||
@ -390,16 +409,13 @@ For each pool, run:
|
|||||||
|
|
||||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||||
|
|
||||||
and afterwards update the `initramfs` by running:
|
then, update the `initramfs` by running:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# update-initramfs -u -k all
|
# update-initramfs -u -k all
|
||||||
|
|
||||||
and finally reboot your node.
|
and finally, reboot the node.
|
||||||
|
|
||||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
|
||||||
doesn't import the pools that aren't present in the cachefile.
|
|
||||||
|
|
||||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||||
which searches and imports pools via device scanning (usually slower).
|
which searches and imports pools via device scanning (usually slower).
|
||||||
|
@ -14,15 +14,15 @@ following retention options are available:
|
|||||||
|
|
||||||
``keep-hourly <N>``
|
``keep-hourly <N>``
|
||||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||||
backup for a single hour, only the latest is kept.
|
backup for a single hour, only the latest is retained.
|
||||||
|
|
||||||
``keep-daily <N>``
|
``keep-daily <N>``
|
||||||
Keep backups for the last ``<N>`` days. If there is more than one
|
Keep backups for the last ``<N>`` days. If there is more than one
|
||||||
backup for a single day, only the latest is kept.
|
backup for a single day, only the latest is retained.
|
||||||
|
|
||||||
``keep-weekly <N>``
|
``keep-weekly <N>``
|
||||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||||
backup for a single week, only the latest is kept.
|
backup for a single week, only the latest is retained.
|
||||||
|
|
||||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||||
uses the `ISO week date`_ system and handles weeks at
|
uses the `ISO week date`_ system and handles weeks at
|
||||||
@ -30,17 +30,17 @@ following retention options are available:
|
|||||||
|
|
||||||
``keep-monthly <N>``
|
``keep-monthly <N>``
|
||||||
Keep backups for the last ``<N>`` months. If there is more than one
|
Keep backups for the last ``<N>`` months. If there is more than one
|
||||||
backup for a single month, only the latest is kept.
|
backup for a single month, only the latest is retained.
|
||||||
|
|
||||||
``keep-yearly <N>``
|
``keep-yearly <N>``
|
||||||
Keep backups for the last ``<N>`` years. If there is more than one
|
Keep backups for the last ``<N>`` years. If there is more than one
|
||||||
backup for a single year, only the latest is kept.
|
backup for a single year, only the latest is retained.
|
||||||
|
|
||||||
The retention options are processed in the order given above. Each option
|
The retention options are processed in the order given above. Each option
|
||||||
only covers backups within its time period. The next option does not take care
|
only covers backups within its time period. The next option does not take care
|
||||||
of already covered backups. It will only consider older backups.
|
of already covered backups. It will only consider older backups.
|
||||||
|
|
||||||
Unfinished and incomplete backups will be removed by the prune command unless
|
Unfinished and incomplete backups will be removed by the prune command, unless
|
||||||
they are newer than the last successful backup. In this case, the last failed
|
they are newer than the last successful backup. In this case, the last failed
|
||||||
backup is retained.
|
backup is retained.
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ Prune Simulator
|
|||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
||||||
to explore the effect of different retetion options with various backup
|
to explore the effect of different retention options with various backup
|
||||||
schedules.
|
schedules.
|
||||||
|
|
||||||
Manual Pruning
|
Manual Pruning
|
||||||
@ -59,10 +59,10 @@ Manual Pruning
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Prune and garbage collection options
|
:alt: Prune and garbage collection options
|
||||||
|
|
||||||
To access pruning functionality for a specific backup group, you can use the
|
To manually prune a specific backup group, you can use
|
||||||
prune command line option discussed in :ref:`backup-pruning`, or navigate to
|
``proxmox-backup-client``'s ``prune`` subcommand, discussed in
|
||||||
the **Content** tab of the datastore and click the scissors icon in the
|
:ref:`backup-pruning`, or navigate to the **Content** tab of the datastore and
|
||||||
**Actions** column of the relevant backup group.
|
click the scissors icon in the **Actions** column of the relevant backup group.
|
||||||
|
|
||||||
Prune Schedules
|
Prune Schedules
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
@ -81,7 +81,7 @@ Retention Settings Example
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
The backup frequency and retention of old backups may depend on how often data
|
The backup frequency and retention of old backups may depend on how often data
|
||||||
changes, and how important an older state may be, in a specific work load.
|
changes and how important an older state may be in a specific workload.
|
||||||
When backups act as a company's document archive, there may also be legal
|
When backups act as a company's document archive, there may also be legal
|
||||||
requirements for how long backup snapshots must be kept.
|
requirements for how long backup snapshots must be kept.
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ start garbage collection on an entire datastore and the ``status`` subcommand to
|
|||||||
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||||
|
|
||||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
GC** from the top panel of a datastore. From here, you can edit the schedule at
|
||||||
collection runs and manually start the operation.
|
which garbage collection runs and manually start the operation.
|
||||||
|
|
||||||
|
|
||||||
.. _maintenance_verification:
|
.. _maintenance_verification:
|
||||||
@ -139,13 +139,13 @@ Verification
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Adding a verify job
|
:alt: Adding a verify job
|
||||||
|
|
||||||
Proxmox Backup offers various verification options to ensure that backup data is
|
Proxmox Backup Server offers various verification options to ensure that backup
|
||||||
intact. Verification is generally carried out through the creation of verify
|
data is intact. Verification is generally carried out through the creation of
|
||||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
verify jobs. These are scheduled tasks that run verification at a given interval
|
||||||
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
(see :ref:`calendar-event-scheduling`). With these, you can also set whether
|
||||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
already verified snapshots are ignored, as well as set a time period, after
|
||||||
are checked again. The interface for creating verify jobs can be found under the
|
which snapshots are checked again. The interface for creating verify jobs can be
|
||||||
**Verify Jobs** tab of the datastore.
|
found under the **Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
if a previous verification was successful. This is because physical drives
|
if a previous verification was successful. This is because physical drives
|
||||||
@ -158,9 +158,9 @@ are checked again. The interface for creating verify jobs can be found under the
|
|||||||
data.
|
data.
|
||||||
|
|
||||||
Aside from using verify jobs, you can also run verification manually on entire
|
Aside from using verify jobs, you can also run verification manually on entire
|
||||||
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
|
datastores, backup groups or snapshots. To do this, navigate to the **Content**
|
||||||
tab of the datastore and either click *Verify All*, or select the *V.* icon from
|
tab of the datastore and either click *Verify All* or select the *V.* icon from
|
||||||
the *Actions* column in the table.
|
the **Actions** column in the table.
|
||||||
|
|
||||||
.. _maintenance_notification:
|
.. _maintenance_notification:
|
||||||
|
|
||||||
@ -170,8 +170,12 @@ Notifications
|
|||||||
Proxmox Backup Server can send you notification emails about automatically
|
Proxmox Backup Server can send you notification emails about automatically
|
||||||
scheduled verification, garbage-collection and synchronization tasks results.
|
scheduled verification, garbage-collection and synchronization tasks results.
|
||||||
|
|
||||||
By default, notifications are send to the email address configured for the
|
By default, notifications are sent to the email address configured for the
|
||||||
`root@pam` user. You can set that user for each datastore.
|
`root@pam` user. You can instead set this user for each datastore.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-options.png
|
||||||
|
:align: right
|
||||||
|
:alt: Datastore Options
|
||||||
|
|
||||||
You can also change the level of notification received per task type, the
|
You can also change the level of notification received per task type, the
|
||||||
following options are available:
|
following options are available:
|
||||||
@ -179,6 +183,23 @@ following options are available:
|
|||||||
* Always: send a notification for any scheduled task, independent of the
|
* Always: send a notification for any scheduled task, independent of the
|
||||||
outcome
|
outcome
|
||||||
|
|
||||||
* Errors: send a notification for any scheduled task resulting in an error
|
* Errors: send a notification for any scheduled task that results in an error
|
||||||
|
|
||||||
* Never: do not send any notification at all
|
* Never: do not send any notification at all
|
||||||
|
|
||||||
|
.. _maintenance_mode:
|
||||||
|
|
||||||
|
Maintenance Mode
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Proxmox Backup Server implements setting the `read-only` and `offline`
|
||||||
|
maintenance modes for a datastore.
|
||||||
|
|
||||||
|
Once enabled, depending on the mode, new reads and/or writes to the datastore
|
||||||
|
are blocked, allowing an administrator to safely execute maintenance tasks, for
|
||||||
|
example, on the underlying storage.
|
||||||
|
|
||||||
|
Internally Proxmox Backup Server tracks whether each datastore access is a
|
||||||
|
write or read operation, so that it can gracefully enter the respective mode,
|
||||||
|
by allowing conflicting operations that started before enabling the maintenance
|
||||||
|
mode to finish.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Managing Remotes
|
Managing Remotes & Sync
|
||||||
================
|
=======================
|
||||||
|
|
||||||
.. _backup_remote:
|
.. _backup_remote:
|
||||||
|
|
||||||
@ -17,8 +17,8 @@ configuration information for remotes is stored in the file
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a remote
|
:alt: Add a remote
|
||||||
|
|
||||||
To add a remote, you need its hostname or IP, a userid and password on the
|
To add a remote, you need its hostname or IP address, a userid and password on
|
||||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
the remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||||
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||||
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||||
|
|
||||||
@ -60,12 +60,13 @@ Sync Jobs
|
|||||||
|
|
||||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||||
a local datastore. You can manage sync jobs in the web interface, from the
|
a local datastore. You can manage sync jobs in the web interface, from the
|
||||||
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
|
**Sync Jobs** tab of the **Datastore** panel or from that of the Datastore
|
||||||
the ``proxmox-backup-manager sync-job`` command. The configuration information
|
itself. Alternatively, you can manage them with the ``proxmox-backup-manager
|
||||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
sync-job`` command. The configuration information for sync jobs is stored at
|
||||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||||
After creating a sync job, you can either start it manually from the GUI or
|
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||||
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
either start it manually from the GUI or provide it with a schedule (see
|
||||||
|
:ref:`calendar-event-scheduling`) to run regularly.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -79,17 +80,130 @@ provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regular
|
|||||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
# proxmox-backup-manager sync-job remove pbs2-local
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
For setting up sync jobs, the configuring user needs the following permissions:
|
To set up sync jobs, the configuring user needs the following permissions:
|
||||||
|
|
||||||
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
||||||
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
||||||
|
|
||||||
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
|
||||||
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
|
||||||
``root@pam``) or set to something other than the configuring user,
|
|
||||||
``Datastore.Modify`` is required as well.
|
|
||||||
|
|
||||||
.. note:: A sync job can only sync backup groups that the configured remote's
|
.. note:: A sync job can only sync backup groups that the configured remote's
|
||||||
user/API token can read. If a remote is configured with a user/API token that
|
user/API token can read. If a remote is configured with a user/API token that
|
||||||
only has ``Datastore.Backup`` privileges, only the limited set of accessible
|
only has ``Datastore.Backup`` privileges, only the limited set of accessible
|
||||||
snapshots owned by that user/API token can be synced.
|
snapshots owned by that user/API token can be synced.
|
||||||
|
|
||||||
|
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
||||||
|
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
||||||
|
``root@pam``) or is set to something other than the configuring user,
|
||||||
|
``Datastore.Modify`` is required as well.
|
||||||
|
|
||||||
|
If the ``group-filter`` option is set, only backup groups matching at least one
|
||||||
|
of the specified criteria are synced. The available criteria are:
|
||||||
|
|
||||||
|
* backup type, for example to only sync groups of the `ct` (Container) type:
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job update ID --group-filter type:ct
|
||||||
|
* full group identifier
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100
|
||||||
|
* regular expression matched against the full group identifier
|
||||||
|
|
||||||
|
.. todo:: add example for regex
|
||||||
|
|
||||||
|
The same filter is applied to local groups for handling of the
|
||||||
|
``remove-vanished`` option.
|
||||||
|
|
||||||
|
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
|
||||||
|
|
||||||
|
Namespace Support
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Sync jobs can be configured to not only sync datastores, but also sub-sets of
|
||||||
|
datastores in the form of namespaces or namespace sub-trees. The following
|
||||||
|
parameters influence how namespaces are treated as part of a sync job
|
||||||
|
execution:
|
||||||
|
|
||||||
|
- ``remote-ns``: the remote namespace anchor (default: the root namespace)
|
||||||
|
|
||||||
|
- ``ns``: the local namespace anchor (default: the root namespace)
|
||||||
|
|
||||||
|
- ``max-depth``: whether to recursively iterate over sub-namespaces of the remote
|
||||||
|
namespace anchor (default: `None`)
|
||||||
|
|
||||||
|
If ``max-depth`` is set to `0`, groups are synced from ``remote-ns`` into
|
||||||
|
``ns``, without any recursion. If it is set to `None` (left empty), recursion
|
||||||
|
depth will depend on the value of ``remote-ns`` and the remote side's
|
||||||
|
availability of namespace support:
|
||||||
|
|
||||||
|
- ``remote-ns`` set to something other than the root namespace: remote *must*
|
||||||
|
support namespaces, full recursion starting at ``remote-ns``.
|
||||||
|
|
||||||
|
- ``remote-ns`` set to root namespace and remote *supports* namespaces: full
|
||||||
|
recursion starting at root namespace.
|
||||||
|
|
||||||
|
- ``remote-ns`` set to root namespace and remote *does not support* namespaces:
|
||||||
|
backwards-compat mode, only root namespace will be synced into ``ns``, no
|
||||||
|
recursion.
|
||||||
|
|
||||||
|
Any other value of ``max-depth`` will limit recursion to at most ``max-depth``
|
||||||
|
levels, for example: ``remote-ns`` set to `location_a/department_b` and
|
||||||
|
``max-depth`` set to `1` will result in `location_a/department_b` and at most
|
||||||
|
one more level of sub-namespaces being synced.
|
||||||
|
|
||||||
|
The namespace tree starting at ``remote-ns`` will be mapped into ``ns`` up to a
|
||||||
|
depth of ``max-depth``.
|
||||||
|
|
||||||
|
For example, with the following namespaces at the remote side:
|
||||||
|
|
||||||
|
- `location_a`
|
||||||
|
|
||||||
|
- `location_a/department_x`
|
||||||
|
|
||||||
|
- `location_a/department_x/team_one`
|
||||||
|
|
||||||
|
- `location_a/department_x/team_two`
|
||||||
|
|
||||||
|
- `location_a/department_y`
|
||||||
|
|
||||||
|
- `location_a/department_y/team_one`
|
||||||
|
|
||||||
|
- `location_a/department_y/team_two`
|
||||||
|
|
||||||
|
- `location_b`
|
||||||
|
|
||||||
|
and ``remote-ns`` being set to `location_a/department_x` and ``ns`` set to
|
||||||
|
`location_a_dep_x` resulting in the following namespace tree on the sync
|
||||||
|
target:
|
||||||
|
|
||||||
|
- `location_a_dep_x` (containing the remote's `location_a/department_x`)
|
||||||
|
|
||||||
|
- `location_a_dep_x/team_one` (containing the remote's `location_a/department_x/team_one`)
|
||||||
|
|
||||||
|
- `location_a_dep_x/team_two` (containing the remote's `location_a/department_x/team_two`)
|
||||||
|
|
||||||
|
with the rest of the remote namespaces and groups not being synced (by this
|
||||||
|
sync job).
|
||||||
|
|
||||||
|
If a remote namespace is included in the sync job scope, but does not exist
|
||||||
|
locally, it will be created (provided the sync job owner has sufficient
|
||||||
|
privileges).
|
||||||
|
|
||||||
|
If the ``remove-vanished`` option is set, namespaces that are included in the
|
||||||
|
sync job scope but only exist locally are treated as vanished and removed
|
||||||
|
(provided the sync job owner has sufficient privileges).
|
||||||
|
|
||||||
|
.. note:: All other limitations on sync scope (such as remote user/API token
|
||||||
|
privileges, group filters) also apply for sync jobs involving one or
|
||||||
|
multiple namespaces.
|
||||||
|
|
||||||
|
Bandwidth Limit
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Syncing a datastore to an archive can produce lots of traffic and impact other
|
||||||
|
users of the network. So, to avoid network or storage congestion you can limit
|
||||||
|
the bandwidth of the sync job by setting the ``rate-in`` option either in the
|
||||||
|
web interface or using the ``proxmox-backup-manager`` command-line tool:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||||
|
178
docs/markdown-primer.rst
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
.. _markdown-primer:
|
||||||
|
|
||||||
|
Markdown Primer
|
||||||
|
===============
|
||||||
|
|
||||||
|
"Markdown is a text-to-HTML conversion tool for web writers. Markdown allows
|
||||||
|
you to write using an easy-to-read, easy-to-write plain text format, then
|
||||||
|
convertit to structurally valid XHTML (or HTML)."
|
||||||
|
|
||||||
|
-- John Gruber, https://daringfireball.net/projects/markdown/
|
||||||
|
|
||||||
|
|
||||||
|
The Proxmox Backup Server (PBS) web-interface has support for using Markdown to
|
||||||
|
rendering rich text formatting in node and virtual guest notes.
|
||||||
|
|
||||||
|
PBS supports CommonMark with most extensions of GFM (GitHub Flavoured Markdown),
|
||||||
|
like tables or task-lists.
|
||||||
|
|
||||||
|
.. _markdown_basics:
|
||||||
|
|
||||||
|
Markdown Basics
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Note that we only describe the basics here, please search the web for more
|
||||||
|
extensive resources, for example on https://www.markdownguide.org/
|
||||||
|
|
||||||
|
Headings
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
# This is a Heading h1
|
||||||
|
## This is a Heading h2
|
||||||
|
##### This is a Heading h5
|
||||||
|
|
||||||
|
|
||||||
|
Emphasis
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
Use ``*text*`` or ``_text_`` for emphasis.
|
||||||
|
|
||||||
|
Use ``**text**`` or ``__text__`` for bold, heavy-weight text.
|
||||||
|
|
||||||
|
Combinations are also possible, for example:
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
_You **can** combine them_
|
||||||
|
|
||||||
|
|
||||||
|
Links
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
You can use automatic detection of links, for example,
|
||||||
|
``https://forum.proxmox.com/`` would transform it into a clickable link.
|
||||||
|
|
||||||
|
You can also control the link text, for example:
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
Now, [the part in brackets will be the link text](https://forum.proxmox.com/).
|
||||||
|
|
||||||
|
Lists
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
Unordered Lists
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Use ``*`` or ``-`` for unordered lists, for example:
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
* Item 1
|
||||||
|
* Item 2
|
||||||
|
* Item 2a
|
||||||
|
* Item 2b
|
||||||
|
|
||||||
|
|
||||||
|
Adding an indentation can be used to created nested lists.
|
||||||
|
|
||||||
|
Ordered Lists
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
1. Item 1
|
||||||
|
1. Item 2
|
||||||
|
1. Item 3
|
||||||
|
1. Item 3a
|
||||||
|
1. Item 3b
|
||||||
|
|
||||||
|
NOTE: The integer of ordered lists does not need to be correct, they will be numbered automatically.
|
||||||
|
|
||||||
|
Task Lists
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
Task list use a empty box ``[ ]`` for unfinished tasks and a box with an `X` for finished tasks.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
- [X] First task already done!
|
||||||
|
- [X] Second one too
|
||||||
|
- [ ] This one is still to-do
|
||||||
|
- [ ] So is this one
|
||||||
|
|
||||||
|
Tables
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
Tables use the pipe symbol ``|`` to separate columns, and ``-`` to separate the
|
||||||
|
table header from the table body, in that separation one can also set the text
|
||||||
|
alignment, making one column left-, center-, or right-aligned.
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
| Left columns | Right columns | Some | More | Cols.| Centering Works Too
|
||||||
|
| ------------- |--------------:|--------|------|------|:------------------:|
|
||||||
|
| left foo | right foo | First | Row | Here | >center< |
|
||||||
|
| left bar | right bar | Second | Row | Here | 12345 |
|
||||||
|
| left baz | right baz | Third | Row | Here | Test |
|
||||||
|
| left zab | right zab | Fourth | Row | Here | ☁️☁️☁️ |
|
||||||
|
| left rab | right rab | And | Last | Here | The End |
|
||||||
|
|
||||||
|
Note that you do not need to align the columns nicely with white space, but that makes
|
||||||
|
editing tables easier.
|
||||||
|
|
||||||
|
Block Quotes
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can enter block quotes by prefixing a line with ``>``, similar as in plain-text emails.
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
> Markdown is a lightweight markup language with plain-text-formatting syntax,
|
||||||
|
> created in 2004 by John Gruber with Aaron Swartz.
|
||||||
|
>
|
||||||
|
>> Markdown is often used to format readme files, for writing messages in online discussion forums,
|
||||||
|
>> and to create rich text using a plain text editor.
|
||||||
|
|
||||||
|
Code and Snippets
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can use backticks to avoid processing for a few word or paragraphs. That is useful for
|
||||||
|
avoiding that a code or configuration hunk gets mistakenly interpreted as markdown.
|
||||||
|
|
||||||
|
Inline code
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Surrounding part of a line with single backticks allows to write code inline,
|
||||||
|
for examples:
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
This hosts IP address is `10.0.0.1`.
|
||||||
|
|
||||||
|
Whole blocks of code
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
For code blocks spanning several lines you can use triple-backticks to start
|
||||||
|
and end such a block, for example:
|
||||||
|
|
||||||
|
.. code-block:: md
|
||||||
|
|
||||||
|
```
|
||||||
|
# This is the network config I want to remember here
|
||||||
|
auto vmbr2
|
||||||
|
iface vmbr2 inet static
|
||||||
|
address 10.0.0.1/24
|
||||||
|
bridge-ports ens20
|
||||||
|
bridge-stp off
|
||||||
|
bridge-fd 0
|
||||||
|
bridge-vlan-aware yes
|
||||||
|
bridge-vids 2-4094
|
||||||
|
|
||||||
|
```
|
@ -3,6 +3,10 @@
|
|||||||
Network Management
|
Network Management
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-system-config.png
|
||||||
|
:align: right
|
||||||
|
:alt: System and Network Configuration Overview
|
||||||
|
|
||||||
Proxmox Backup Server provides both a web interface and a command line tool for
|
Proxmox Backup Server provides both a web interface and a command line tool for
|
||||||
network configuration. You can find the configuration options in the web
|
network configuration. You can find the configuration options in the web
|
||||||
interface under the **Network Interfaces** section of the **Configuration** menu
|
interface under the **Network Interfaces** section of the **Configuration** menu
|
||||||
@ -31,10 +35,6 @@ To get a list of available interfaces, use the following command:
|
|||||||
│ ens19 │ eth │ 1 │ manual │ │ │ │
|
│ ens19 │ eth │ 1 │ manual │ │ │ │
|
||||||
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
|
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-network-create-bond.png
|
|
||||||
:align: right
|
|
||||||
:alt: Add a network interface
|
|
||||||
|
|
||||||
To add a new network interface, use the ``create`` subcommand with the relevant
|
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||||
parameters. For example, you may want to set up a bond, for the purpose of
|
parameters. For example, you may want to set up a bond, for the purpose of
|
||||||
network redundancy. The following command shows a template for creating the bond shown
|
network redundancy. The following command shows a template for creating the bond shown
|
||||||
@ -44,6 +44,10 @@ in the list above:
|
|||||||
|
|
||||||
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
|
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-network-create-bond.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a network interface
|
||||||
|
|
||||||
You can make changes to the configuration of a network interface with the
|
You can make changes to the configuration of a network interface with the
|
||||||
``update`` subcommand:
|
``update`` subcommand:
|
||||||
|
|
||||||
@ -82,9 +86,12 @@ is:
|
|||||||
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||||
command, from the package ``ifupdown2``. This package is included within the
|
command, from the package ``ifupdown2``. This package is included within the
|
||||||
Proxmox Backup Server installation, however, you may have to install it yourself,
|
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||||
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
if you have installed Proxmox Backup Server on top of Debian or a Proxmox VE
|
||||||
|
version prior to version 7.
|
||||||
|
|
||||||
You can also configure DNS settings, from the **DNS** section
|
You can also configure DNS settings, from the **DNS** section
|
||||||
of **Configuration** or by using the ``dns`` subcommand of
|
of **Configuration** or by using the ``dns`` subcommand of
|
||||||
``proxmox-backup-manager``.
|
``proxmox-backup-manager``.
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: traffic-control.rst
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Most commands producing output supports the ``--output-format``
|
Most commands that produce output support the ``--output-format``
|
||||||
parameter. It accepts the following values:
|
parameter. This accepts the following values:
|
||||||
|
|
||||||
:``text``: Text format (default). Structured data is rendered as a table.
|
:``text``: Text format (default). Structured data is rendered as a table.
|
||||||
|
|
||||||
|
@ -27,6 +27,10 @@ update``.
|
|||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
updates.
|
updates.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-administration-apt-repos.png
|
||||||
|
:align: right
|
||||||
|
:alt: APT Repository Management in the Web Interface
|
||||||
|
|
||||||
.. _package_repos_secure_apt:
|
.. _package_repos_secure_apt:
|
||||||
|
|
||||||
SecureApt
|
SecureApt
|
||||||
|
@ -51,7 +51,7 @@ ENVIRONMENT
|
|||||||
:CHANGER: If set, replaces the `--device` option
|
:CHANGER: If set, replaces the `--device` option
|
||||||
|
|
||||||
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
|
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
|
||||||
configuration to find the associcated changer device.
|
configuration to find the associated changer device.
|
||||||
|
|
||||||
|
|
||||||
.. include:: ../pbs-copyright.rst
|
.. include:: ../pbs-copyright.rst
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||||
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||||
permissions. Operation requiring more permissions are forwarded to
|
permissions. Operations requiring more permissions are forwarded to
|
||||||
the local ``proxmox-backup`` service.
|
the local ``proxmox-backup`` service.
|
||||||
|
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
`Proxmox VE`_ Integration
|
`Proxmox VE`_ Integration
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
|
||||||
former as a storage in a Proxmox VE standalone or cluster setup.
|
setup, by adding it as a storage in Proxmox VE.
|
||||||
|
|
||||||
See also the `Proxmox VE Storage - Proxmox Backup Server
|
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||||
@ -14,8 +14,8 @@ of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
|||||||
Using the Proxmox VE Web-Interface
|
Using the Proxmox VE Web-Interface
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
Proxmox VE has native API and web interface integration of Proxmox Backup
|
||||||
Server since the `Proxmox VE 6.3 release
|
Server as of `Proxmox VE 6.3
|
||||||
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||||
|
|
||||||
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||||
@ -24,8 +24,8 @@ Using the Proxmox VE Command-Line
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
node. The following example uses ``store2`` as storage name, and
|
node. The following example uses ``store2`` as the storage's name, and
|
||||||
assumes the server address is ``localhost``, and you want to connect
|
assumes the server address is ``localhost`` and you want to connect
|
||||||
as ``user1@pbs``.
|
as ``user1@pbs``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -33,7 +33,7 @@ as ``user1@pbs``.
|
|||||||
# pvesm add pbs store2 --server localhost --datastore store2
|
# pvesm add pbs store2 --server localhost --datastore store2
|
||||||
# pvesm set store2 --username user1@pbs --password <secret>
|
# pvesm set store2 --username user1@pbs --password <secret>
|
||||||
|
|
||||||
.. note:: If you would rather not pass your password as plain text, you can pass
|
.. note:: If you would rather not enter your password as plain text, you can pass
|
||||||
the ``--password`` parameter, without any arguments. This will cause the
|
the ``--password`` parameter, without any arguments. This will cause the
|
||||||
program to prompt you for a password upon entering the command.
|
program to prompt you for a password upon entering the command.
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ relationship:
|
|||||||
|
|
||||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
After that you should be able to see storage status with:
|
After that, you should be able to view storage status with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
``pxar`` is a command line utility to create and manipulate archives in the
|
``pxar`` is a command line utility for creating and manipulating archives in the
|
||||||
:ref:`pxar-format`.
|
:ref:`pxar-format`.
|
||||||
It is inspired by `casync file archive format
|
It is inspired by `casync file archive format
|
||||||
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
||||||
which caters to a similar use-case.
|
which caters to a similar use-case.
|
||||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||||
Backup Server, for example, efficient storage of hardlinks.
|
Backup Server, for example, efficient storage of hard links.
|
||||||
The format is designed to reduce storage space needed on the server by achieving
|
The format is designed to reduce the required storage on the server by
|
||||||
a high level of deduplication.
|
achieving a high level of deduplication.
|
||||||
|
|
||||||
Creating an Archive
|
Creating an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
@ -24,10 +24,10 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
|||||||
the same name is already present in the target folder, the creation will
|
the same name is already present in the target folder, the creation will
|
||||||
fail.
|
fail.
|
||||||
|
|
||||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
By default, ``pxar`` will skip certain mount points and will not follow device
|
||||||
boundaries. This design decision is based on the primary use case of creating
|
boundaries. This design decision is based on the primary use case of creating
|
||||||
archives for backups. It makes sense to not back up the contents of certain
|
archives for backups. It makes sense to ignore the contents of certain
|
||||||
temporary or system specific files.
|
temporary or system specific files in a backup.
|
||||||
To alter this behavior and follow device boundaries, use the
|
To alter this behavior and follow device boundaries, use the
|
||||||
``--all-file-systems`` flag.
|
``--all-file-systems`` flag.
|
||||||
|
|
||||||
@ -41,40 +41,38 @@ by running:
|
|||||||
|
|
||||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||||
|
|
||||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
Be aware that the shell itself will try to expand glob patterns before invoking
|
||||||
invoking ``pxar``.
|
``pxar``. In order to avoid this, all globs have to be quoted correctly.
|
||||||
In order to avoid this, all globs have to be quoted correctly.
|
|
||||||
|
|
||||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||||
match more than one pattern. This allows you to use more complex
|
match more than one pattern. This allows you to use more complex
|
||||||
file exclusion/inclusion behavior. However, it is recommended to use
|
file inclusion/exclusion behavior. However, it is recommended to use
|
||||||
``.pxarexclude`` files instead for such cases.
|
``.pxarexclude`` files instead for such cases.
|
||||||
|
|
||||||
For example you might want to exclude all ``.txt`` files except for a specific
|
For example you might want to exclude all ``.txt`` files except a specific
|
||||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
one from the archive. This would be achieved via the negated match pattern,
|
||||||
by ``!``.
|
prefixed by ``!``. All the glob patterns are relative to the ``source``
|
||||||
All the glob patterns are relative to the ``source`` directory.
|
directory.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||||
|
|
||||||
.. NOTE:: The order of the glob match patterns matters as later ones override
|
.. NOTE:: The order of the glob match patterns matters, as later ones override
|
||||||
previous ones. Permutations of the same patterns lead to different results.
|
earlier ones. Permutations of the same patterns lead to different results.
|
||||||
|
|
||||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||||
command line, in a file called ``.pxarexclude-cli`` at the root of
|
command line, in a file called ``.pxarexclude-cli``, at the root of the archive.
|
||||||
the archive.
|
|
||||||
If a file with this name is already present in the source folder during archive
|
If a file with this name is already present in the source folder during archive
|
||||||
creation, this file is not included in the archive and the file containing the
|
creation, this file is not included in the archive, and the file containing the
|
||||||
new patterns is added to the archive instead, the original file is not altered.
|
new patterns is added to the archive instead. The original file is not altered.
|
||||||
|
|
||||||
A more convenient and persistent way to exclude files from the archive is by
|
A more convenient and persistent way to exclude files from the archive is by
|
||||||
placing the glob match patterns in ``.pxarexclude`` files.
|
placing the glob match patterns in ``.pxarexclude`` files.
|
||||||
It is possible to create and place these files in any directory of the filesystem
|
It is possible to create and place these files in any directory of the filesystem
|
||||||
tree.
|
tree.
|
||||||
These files must contain one pattern per line, again later patterns win over
|
These files must contain one pattern per line, and later patterns override
|
||||||
previous ones.
|
earlier ones.
|
||||||
The patterns control file exclusions of files present within the given directory
|
The patterns control file exclusions of files present within the given directory
|
||||||
or further below it in the tree.
|
or further below it in the tree.
|
||||||
The behavior is the same as described in :ref:`client_creating_backups`.
|
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||||
@ -89,7 +87,7 @@ with the following command:
|
|||||||
|
|
||||||
# pxar extract archive.pxar /path/to/target
|
# pxar extract archive.pxar /path/to/target
|
||||||
|
|
||||||
If no target is provided, the content of the archive is extracted to the current
|
If no target is provided, the contents of the archive is extracted to the current
|
||||||
working directory.
|
working directory.
|
||||||
|
|
||||||
In order to restore only parts of an archive, single files, and/or folders,
|
In order to restore only parts of an archive, single files, and/or folders,
|
||||||
@ -116,13 +114,13 @@ run the following command:
|
|||||||
# pxar list archive.pxar
|
# pxar list archive.pxar
|
||||||
|
|
||||||
This displays the full path of each file or directory with respect to the
|
This displays the full path of each file or directory with respect to the
|
||||||
archives root.
|
archive's root.
|
||||||
|
|
||||||
Mounting an Archive
|
Mounting an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
||||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
|
In order to mount an archive named ``archive.pxar`` to the mount point ``/mnt``,
|
||||||
run the command:
|
run the command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -130,7 +128,7 @@ run the command:
|
|||||||
# pxar mount archive.pxar /mnt
|
# pxar mount archive.pxar /mnt
|
||||||
|
|
||||||
Once the archive is mounted, you can access its content under the given
|
Once the archive is mounted, you can access its content under the given
|
||||||
mountpoint.
|
mount point.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
100
docs/storage.rst
@ -11,11 +11,16 @@ Disk Management
|
|||||||
:alt: List of disks
|
:alt: List of disks
|
||||||
|
|
||||||
Proxmox Backup Server comes with a set of disk utilities, which are
|
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||||
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
accessed using the ``disk`` subcommand or the web interface. This subcommand
|
||||||
disks, create various filesystems, and get information about the disks.
|
allows you to initialize disks, create various filesystems, and get information
|
||||||
|
about the disks.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-disks.png
|
||||||
|
:align: right
|
||||||
|
:alt: Web Interface Administration: Disks
|
||||||
|
|
||||||
To view the disks connected to the system, navigate to **Administration ->
|
To view the disks connected to the system, navigate to **Administration ->
|
||||||
Disks** in the web interface or use the ``list`` subcommand of
|
Storage/Disks** in the web interface or use the ``list`` subcommand of
|
||||||
``disk``:
|
``disk``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -42,9 +47,9 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
|||||||
:alt: Create a directory
|
:alt: Create a directory
|
||||||
|
|
||||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||||
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
|
||||||
web interface and creating one from there. The following command creates an
|
in the web interface and creating one from there. The following command creates
|
||||||
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||||
automatically create a datastore on the disk (in this case ``sdd``). This will
|
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||||
create a datastore at the location ``/mnt/datastore/store1``:
|
create a datastore at the location ``/mnt/datastore/store1``:
|
||||||
|
|
||||||
@ -57,7 +62,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
|
|||||||
:alt: Create ZFS
|
:alt: Create ZFS
|
||||||
|
|
||||||
You can also create a ``zpool`` with various raid levels from **Administration
|
You can also create a ``zpool`` with various raid levels from **Administration
|
||||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
|
||||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||||
mounts it under ``/mnt/datastore/zpool1``:
|
mounts it under ``/mnt/datastore/zpool1``:
|
||||||
|
|
||||||
@ -90,6 +95,10 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
|
|||||||
:term:`Datastore`
|
:term:`Datastore`
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-summary.png
|
||||||
|
:align: right
|
||||||
|
:alt: Datastore Usage Overview
|
||||||
|
|
||||||
A datastore refers to a location at which backups are stored. The current
|
A datastore refers to a location at which backups are stored. The current
|
||||||
implementation uses a directory inside a standard Unix file system (``ext4``,
|
implementation uses a directory inside a standard Unix file system (``ext4``,
|
||||||
``xfs`` or ``zfs``) to store the backup data.
|
``xfs`` or ``zfs``) to store the backup data.
|
||||||
@ -102,7 +111,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
|||||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||||
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||||
directory entries. This requirement excludes certain filesystems and
|
directory entries. This requirement excludes certain filesystems and
|
||||||
filesystem configuration from being supported for a datastore. For example,
|
filesystem configurations from being supported for a datastore. For example,
|
||||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
|
|
||||||
@ -111,23 +120,24 @@ Datastore Configuration
|
|||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-datastore-content.png
|
.. image:: images/screenshots/pbs-gui-datastore-content.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Datastore Overview
|
:alt: Datastore Content Overview
|
||||||
|
|
||||||
You can configure multiple datastores. Minimum one datastore needs to be
|
You can configure multiple datastores. A minimum of one datastore needs to be
|
||||||
configured. The datastore is identified by a simple *name* and points to a
|
configured. The datastore is identified by a simple *name* and points to a
|
||||||
directory on the filesystem. Each datastore also has associated retention
|
directory on the filesystem. Each datastore also has associated retention
|
||||||
settings of how many backup snapshots for each interval of ``hourly``,
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
:ref:`garbage collection <client_garbage-collection>` can also be configured to
|
||||||
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
run periodically, based on a configured schedule (see
|
||||||
|
:ref:`calendar-event-scheduling`) per datastore.
|
||||||
|
|
||||||
|
|
||||||
.. _storage_datastore_create:
|
.. _storage_datastore_create:
|
||||||
|
|
||||||
Creating a Datastore
|
Creating a Datastore
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
.. image:: images/screenshots/pbs-gui-datastore-create.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Create a datastore
|
:alt: Create a datastore
|
||||||
|
|
||||||
@ -146,7 +156,8 @@ window:
|
|||||||
* *Comment* can be used to add some contextual information to the datastore.
|
* *Comment* can be used to add some contextual information to the datastore.
|
||||||
|
|
||||||
Alternatively you can create a new datastore from the command line. The
|
Alternatively you can create a new datastore from the command line. The
|
||||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
following command creates a new datastore called ``store1`` on
|
||||||
|
:file:`/backup/disk1/store1`
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -156,7 +167,7 @@ following command creates a new datastore called ``store1`` on :file:`/backup/di
|
|||||||
Managing Datastores
|
Managing Datastores
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
To list existing datastores from the command line run:
|
To list existing datastores from the command line, run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -216,8 +227,9 @@ After creating a datastore, the following default layout will appear:
|
|||||||
|
|
||||||
`.lock` is an empty file used for process locking.
|
`.lock` is an empty file used for process locking.
|
||||||
|
|
||||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
The `.chunks` directory contains folders, starting from `0000` and increasing in
|
||||||
directories will store the chunked data after a backup operation has been executed.
|
hexadecimal values until `ffff`. These directories will store the chunked data,
|
||||||
|
categorized by checksum, after a backup operation has been executed.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -249,3 +261,57 @@ directories will store the chunked data after a backup operation has been execut
|
|||||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||||
|
|
||||||
|
|
||||||
|
Once you uploaded some backups, or created namespaces, you may see the Backup
|
||||||
|
Type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
|
||||||
|
|
||||||
|
.. _storage_namespaces:
|
||||||
|
|
||||||
|
Backup Namespaces
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A datastore can host many backups as long as the underlying storage is big
|
||||||
|
enough and provides the performance required for one's use case.
|
||||||
|
But, without any hierarchy or separation its easy to run into naming conflicts,
|
||||||
|
especially when using the same datastore for multiple Proxmox VE instances or
|
||||||
|
multiple users.
|
||||||
|
|
||||||
|
The backup namespace hierarchy allows you to clearly separate different users
|
||||||
|
or backup sources in general, avoiding naming conflicts and providing
|
||||||
|
well-organized backup content view.
|
||||||
|
|
||||||
|
Each namespace level can host any backup type, CT, VM or Host but also other
|
||||||
|
namespaces, up to a depth of 8 level, where the root namespace is the first
|
||||||
|
level.
|
||||||
|
|
||||||
|
|
||||||
|
Namespace Permissions
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You can make the permission configuration of a datastore more fine-grained by
|
||||||
|
setting permissions only on a specific namespace.
|
||||||
|
|
||||||
|
To see a datastore you need permission that has at least one of `AUDIT`,
|
||||||
|
`MODIFY`, `READ` or `BACKUP` privilege on any namespace it contains.
|
||||||
|
|
||||||
|
To create or delete a namespace you require the modify privilege on the parent
|
||||||
|
namespace. So, to initially create namespaces you need to have a permission
|
||||||
|
with a access role that includes the `MODIFY` privilege on the datastore itself.
|
||||||
|
|
||||||
|
For backup groups the existing privilege rules still apply, you either need a
|
||||||
|
powerful permission or be the owner of the backup group, nothing changed here.
|
||||||
|
|
||||||
|
.. todo:: continue
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-options.png
|
||||||
|
:align: right
|
||||||
|
:alt: Datastore Options
|
||||||
|
|
||||||
|
There are a few per-datastore options:
|
||||||
|
|
||||||
|
* :ref:`Notifications <maintenance_notification>`
|
||||||
|
* :ref:`Maintenance Mode <maintenance_mode>`
|
||||||
|
* Verification of incoming backups
|
||||||
|
@ -4,8 +4,8 @@ Host System Administration
|
|||||||
==========================
|
==========================
|
||||||
|
|
||||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||||
distribution. That means that you have access to the whole world of
|
distribution. This means that you have access to the entire range of
|
||||||
Debian packages, and the base system is well documented. The `Debian
|
Debian packages, and that the base system is well documented. The `Debian
|
||||||
Administrator's Handbook`_ is available online, and provides a
|
Administrator's Handbook`_ is available online, and provides a
|
||||||
comprehensive introduction to the Debian operating system.
|
comprehensive introduction to the Debian operating system.
|
||||||
|
|
||||||
@ -15,17 +15,21 @@ through that channel. In addition, we provide our own package
|
|||||||
repository to roll out all Proxmox related packages. This includes
|
repository to roll out all Proxmox related packages. This includes
|
||||||
updates to some Debian packages when necessary.
|
updates to some Debian packages when necessary.
|
||||||
|
|
||||||
We also deliver a specially optimized Linux kernel, where we enable
|
We also deliver a specially optimized Linux kernel, based on the Ubuntu
|
||||||
all required virtualization and container features. That kernel
|
kernel. That kernel includes drivers for ZFS_.
|
||||||
includes drivers for ZFS_, and several hardware drivers. For example,
|
|
||||||
we ship Intel network card drivers to support their newest hardware.
|
|
||||||
|
|
||||||
The following sections will concentrate on backup related topics. They
|
The following sections will concentrate on backup related topics. They
|
||||||
either explain things which are different on `Proxmox Backup`_, or
|
will explain things which are different on `Proxmox Backup`_, or
|
||||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||||
please refer to the standard Debian documentation.
|
please refer to the standard Debian documentation.
|
||||||
|
|
||||||
|
|
||||||
.. include:: local-zfs.rst
|
.. include:: local-zfs.rst
|
||||||
|
|
||||||
|
.. include:: system-booting.rst
|
||||||
|
|
||||||
|
.. include:: certificate-management.rst
|
||||||
|
|
||||||
.. include:: services.rst
|
.. include:: services.rst
|
||||||
|
|
||||||
|
.. include:: command-line-tools.rst
|
||||||
|
379
docs/system-booting.rst
Normal file
@ -0,0 +1,379 @@
|
|||||||
|
|
||||||
|
.. _chapter-systembooting:
|
||||||
|
|
||||||
|
Host Bootloader
|
||||||
|
---------------
|
||||||
|
|
||||||
|
`Proxmox Backup`_ currently uses one of two bootloaders depending on the disk setup
|
||||||
|
selected in the installer.
|
||||||
|
|
||||||
|
For EFI Systems installed with ZFS as the root filesystem ``systemd-boot`` is
|
||||||
|
used. All other deployments use the standard ``grub`` bootloader (this usually
|
||||||
|
also applies to systems which are installed on top of Debian).
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-installer-part-scheme:
|
||||||
|
|
||||||
|
Partitioning Scheme Used by the Installer
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The `Proxmox Backup`_ installer creates 3 partitions on all disks selected for
|
||||||
|
installation.
|
||||||
|
|
||||||
|
The created partitions are:
|
||||||
|
|
||||||
|
* a 1 MB BIOS Boot Partition (gdisk type EF02)
|
||||||
|
|
||||||
|
* a 512 MB EFI System Partition (ESP, gdisk type EF00)
|
||||||
|
|
||||||
|
* a third partition spanning the set ``hdsize`` parameter or the remaining space
|
||||||
|
used for the chosen storage type
|
||||||
|
|
||||||
|
Systems using ZFS as root filesystem are booted with a kernel and initrd image
|
||||||
|
stored on the 512 MB EFI System Partition. For legacy BIOS systems, ``grub`` is
|
||||||
|
used, for EFI systems ``systemd-boot`` is used. Both are installed and configured
|
||||||
|
to point to the ESPs.
|
||||||
|
|
||||||
|
``grub`` in BIOS mode (``--target i386-pc``) is installed onto the BIOS Boot
|
||||||
|
Partition of all selected disks on all systems booted with ``grub`` (These are
|
||||||
|
all installs with root on ``ext4`` or ``xfs`` and installs with root on ZFS on
|
||||||
|
non-EFI systems).
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-proxmox-boot-tool:
|
||||||
|
|
||||||
|
Synchronizing the content of the ESP with ``proxmox-boot-tool``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
``proxmox-boot-tool`` is a utility used to keep the contents of the EFI System
|
||||||
|
Partitions properly configured and synchronized. It copies certain kernel
|
||||||
|
versions to all ESPs and configures the respective bootloader to boot from
|
||||||
|
the ``vfat`` formatted ESPs. In the context of ZFS as root filesystem this means
|
||||||
|
that you can use all optional features on your root pool instead of the subset
|
||||||
|
which is also present in the ZFS implementation in ``grub`` or having to create a
|
||||||
|
separate small boot-pool (see: `Booting ZFS on root with grub
|
||||||
|
<https://github.com/zfsonlinux/zfs/wiki/Debian-Stretch-Root-on-ZFS>`_).
|
||||||
|
|
||||||
|
In setups with redundancy all disks are partitioned with an ESP, by the
|
||||||
|
installer. This ensures the system boots even if the first boot device fails
|
||||||
|
or if the BIOS can only boot from a particular disk.
|
||||||
|
|
||||||
|
The ESPs are not kept mounted during regular operation. This helps to prevent
|
||||||
|
filesystem corruption to the ``vfat`` formatted ESPs in case of a system crash,
|
||||||
|
and removes the need to manually adapt ``/etc/fstab`` in case the primary boot
|
||||||
|
device fails.
|
||||||
|
|
||||||
|
``proxmox-boot-tool`` handles the following tasks:
|
||||||
|
|
||||||
|
* formatting and setting up a new partition
|
||||||
|
* copying and configuring new kernel images and initrd images to all listed ESPs
|
||||||
|
* synchronizing the configuration on kernel upgrades and other maintenance tasks
|
||||||
|
* managing the list of kernel versions which are synchronized
|
||||||
|
* configuring the boot-loader to boot a particular kernel version (pinning)
|
||||||
|
|
||||||
|
|
||||||
|
You can view the currently configured ESPs and their state by running:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool status
|
||||||
|
|
||||||
|
.. _systembooting-proxmox-boot-setup:
|
||||||
|
|
||||||
|
Setting up a new partition for use as synced ESP
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To format and initialize a partition as synced ESP, e.g., after replacing a
|
||||||
|
failed vdev in an rpool, ``proxmox-boot-tool`` from ``pve-kernel-helper`` can be used.
|
||||||
|
|
||||||
|
WARNING: the ``format`` command will format the ``<partition>``, make sure to pass
|
||||||
|
in the right device/partition!
|
||||||
|
|
||||||
|
For example, to format an empty partition ``/dev/sda2`` as ESP, run the following:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool format /dev/sda2
|
||||||
|
|
||||||
|
To setup an existing, unmounted ESP located on ``/dev/sda2`` for inclusion in
|
||||||
|
`Proxmox Backup`_'s kernel update synchronization mechanism, use the following:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool init /dev/sda2
|
||||||
|
|
||||||
|
Afterwards `/etc/kernel/proxmox-boot-uuids`` should contain a new line with the
|
||||||
|
UUID of the newly added partition. The ``init`` command will also automatically
|
||||||
|
trigger a refresh of all configured ESPs.
|
||||||
|
|
||||||
|
.. _systembooting-proxmox-boot-refresh:
|
||||||
|
|
||||||
|
Updating the configuration on all ESPs
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To copy and configure all bootable kernels and keep all ESPs listed in
|
||||||
|
``/etc/kernel/proxmox-boot-uuids`` in sync you just need to run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool refresh
|
||||||
|
|
||||||
|
(The equivalent to running ``update-grub`` systems with ``ext4`` or ``xfs`` on root).
|
||||||
|
|
||||||
|
This is necessary should you make changes to the kernel commandline, or want to
|
||||||
|
sync all kernels and initrds.
|
||||||
|
|
||||||
|
.. NOTE:: Both ``update-initramfs`` and ``apt`` (when necessary) will automatically
|
||||||
|
trigger a refresh.
|
||||||
|
|
||||||
|
Kernel Versions considered by ``proxmox-boot-tool``
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The following kernel versions are configured by default:
|
||||||
|
|
||||||
|
* the currently running kernel
|
||||||
|
* the version being newly installed on package updates
|
||||||
|
* the two latest already installed kernels
|
||||||
|
* the latest version of the second-to-last kernel series (e.g. 5.0, 5.3), if applicable
|
||||||
|
* any manually selected kernels
|
||||||
|
|
||||||
|
Manually keeping a kernel bootable
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Should you wish to add a certain kernel and initrd image to the list of
|
||||||
|
bootable kernels use ``proxmox-boot-tool kernel add``.
|
||||||
|
|
||||||
|
For example run the following to add the kernel with ABI version ``5.0.15-1-pve``
|
||||||
|
to the list of kernels to keep installed and synced to all ESPs:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel add 5.0.15-1-pve
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-boot-tool kernel list`` will list all kernel versions currently selected
|
||||||
|
for booting:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel list
|
||||||
|
Manually selected kernels:
|
||||||
|
5.0.15-1-pve
|
||||||
|
|
||||||
|
Automatically selected kernels:
|
||||||
|
5.0.12-1-pve
|
||||||
|
4.15.18-18-pve
|
||||||
|
|
||||||
|
Run ``proxmox-boot-tool kernel remove`` to remove a kernel from the list of
|
||||||
|
manually selected kernels, for example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel remove 5.0.15-1-pve
|
||||||
|
|
||||||
|
|
||||||
|
.. NOTE:: It's required to run ``proxmox-boot-tool refresh`` to update all EFI System
|
||||||
|
Partitions (ESPs) after a manual kernel addition or removal from above.
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-determine-bootloader:
|
||||||
|
|
||||||
|
Determine which Bootloader is Used
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/boot-grub.png
|
||||||
|
:target: _images/boot-grub.png
|
||||||
|
:align: left
|
||||||
|
:alt: Grub boot screen
|
||||||
|
|
||||||
|
The simplest and most reliable way to determine which bootloader is used, is to
|
||||||
|
watch the boot process of the `Proxmox Backup`_ node.
|
||||||
|
|
||||||
|
|
||||||
|
You will either see the blue box of ``grub`` or the simple black on white
|
||||||
|
``systemd-boot``.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/boot-systemdboot.png
|
||||||
|
:target: _images/boot-systemdboot.png
|
||||||
|
:align: right
|
||||||
|
:alt: systemd-boot screen
|
||||||
|
|
||||||
|
Determining the bootloader from a running system might not be 100% accurate. The
|
||||||
|
safest way is to run the following command:
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# efibootmgr -v
|
||||||
|
|
||||||
|
|
||||||
|
If it returns a message that EFI variables are not supported, ``grub`` is used in
|
||||||
|
BIOS/Legacy mode.
|
||||||
|
|
||||||
|
If the output contains a line that looks similar to the following, ``grub`` is
|
||||||
|
used in UEFI mode.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
Boot0005* proxmox [...] File(\EFI\proxmox\grubx64.efi)
|
||||||
|
|
||||||
|
|
||||||
|
If the output contains a line similar to the following, ``systemd-boot`` is used.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
Boot0006* Linux Boot Manager [...] File(\EFI\systemd\systemd-bootx64.efi)
|
||||||
|
|
||||||
|
|
||||||
|
By running:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool status
|
||||||
|
|
||||||
|
|
||||||
|
you can find out if ``proxmox-boot-tool`` is configured, which is a good
|
||||||
|
indication of how the system is booted.
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-grub:
|
||||||
|
|
||||||
|
Grub
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
``grub`` has been the de-facto standard for booting Linux systems for many years
|
||||||
|
and is quite well documented
|
||||||
|
(see the `Grub Manual
|
||||||
|
<https://www.gnu.org/software/grub/manual/grub/grub.html>`_).
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
Changes to the ``grub`` configuration are done via the defaults file
|
||||||
|
``/etc/default/grub`` or config snippets in ``/etc/default/grub.d``. To regenerate
|
||||||
|
the configuration file after a change to the configuration run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# update-grub
|
||||||
|
|
||||||
|
.. NOTE:: Systems using ``proxmox-boot-tool`` will call
|
||||||
|
``proxmox-boot-tool refresh`` upon ``update-grub``
|
||||||
|
|
||||||
|
.. _systembooting-systemdboot:
|
||||||
|
|
||||||
|
Systemd-boot
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
``systemd-boot`` is a lightweight EFI bootloader. It reads the kernel and initrd
|
||||||
|
images directly from the EFI Service Partition (ESP) where it is installed.
|
||||||
|
The main advantage of directly loading the kernel from the ESP is that it does
|
||||||
|
not need to reimplement the drivers for accessing the storage. In `Proxmox
|
||||||
|
Backup`_ :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
|
||||||
|
keep the configuration on the ESPs synchronized.
|
||||||
|
|
||||||
|
.. _systembooting-systemd-boot-config:
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
``systemd-boot`` is configured via the file ``loader/loader.conf`` in the root
|
||||||
|
directory of an EFI System Partition (ESP). See the ``loader.conf(5)`` manpage
|
||||||
|
for details.
|
||||||
|
|
||||||
|
Each bootloader entry is placed in a file of its own in the directory
|
||||||
|
``loader/entries/``
|
||||||
|
|
||||||
|
An example entry.conf looks like this (``/`` refers to the root of the ESP):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
title Proxmox
|
||||||
|
version 5.0.15-1-pve
|
||||||
|
options root=ZFS=rpool/ROOT/pve-1 boot=zfs
|
||||||
|
linux /EFI/proxmox/5.0.15-1-pve/vmlinuz-5.0.15-1-pve
|
||||||
|
initrd /EFI/proxmox/5.0.15-1-pve/initrd.img-5.0.15-1-pve
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-edit-kernel-cmdline:
|
||||||
|
|
||||||
|
Editing the Kernel Commandline
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can modify the kernel commandline in the following places, depending on the
|
||||||
|
bootloader used:
|
||||||
|
|
||||||
|
Grub
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
The kernel commandline needs to be placed in the variable
|
||||||
|
``GRUB_CMDLINE_LINUX_DEFAULT`` in the file ``/etc/default/grub``. Running
|
||||||
|
``update-grub`` appends its content to all ``linux`` entries in
|
||||||
|
``/boot/grub/grub.cfg``.
|
||||||
|
|
||||||
|
Systemd-boot
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The kernel commandline needs to be placed as one line in ``/etc/kernel/cmdline``.
|
||||||
|
To apply your changes, run ``proxmox-boot-tool refresh``, which sets it as the
|
||||||
|
``option`` line for all config files in ``loader/entries/proxmox-*.conf``.
|
||||||
|
|
||||||
|
|
||||||
|
.. _systembooting-kernel-pin:
|
||||||
|
|
||||||
|
Override the Kernel-Version for next Boot
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To select a kernel that is not currently the default kernel, you can either:
|
||||||
|
|
||||||
|
* use the boot loader menu that is displayed at the beginning of the boot
|
||||||
|
process
|
||||||
|
* use the ``proxmox-boot-tool`` to ``pin`` the system to a kernel version either
|
||||||
|
once or permanently (until pin is reset).
|
||||||
|
|
||||||
|
This should help you work around incompatibilities between a newer kernel
|
||||||
|
version and the hardware.
|
||||||
|
|
||||||
|
.. NOTE:: Such a pin should be removed as soon as possible so that all current
|
||||||
|
security patches of the latest kernel are also applied to the system.
|
||||||
|
|
||||||
|
For example: To permanently select the version ``5.15.30-1-pve`` for booting you
|
||||||
|
would run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel pin 5.15.30-1-pve
|
||||||
|
|
||||||
|
|
||||||
|
.. TIP:: The pinning functionality works for all `Proxmox Backup`_ systems, not only those using
|
||||||
|
``proxmox-boot-tool`` to synchronize the contents of the ESPs, if your system
|
||||||
|
does not use ``proxmox-boot-tool`` for synchronizing you can also skip the
|
||||||
|
``proxmox-boot-tool refresh`` call in the end.
|
||||||
|
|
||||||
|
You can also set a kernel version to be booted on the next system boot only.
|
||||||
|
This is for example useful to test if an updated kernel has resolved an issue,
|
||||||
|
which caused you to ``pin`` a version in the first place:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel pin 5.15.30-1-pve --next-boot
|
||||||
|
|
||||||
|
|
||||||
|
To remove any pinned version configuration use the ``unpin`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool kernel unpin
|
||||||
|
|
||||||
|
While ``unpin`` has a ``--next-boot`` option as well, it is used to clear a pinned
|
||||||
|
version set with ``--next-boot``. As that happens already automatically on boot,
|
||||||
|
invonking it manually is of little use.
|
||||||
|
|
||||||
|
After setting, or clearing pinned versions you also need to synchronize the
|
||||||
|
content and configuration on the ESPs by running the ``refresh`` subcommand.
|
||||||
|
|
||||||
|
.. TIP:: You will be prompted to automatically do for ``proxmox-boot-tool`` managed
|
||||||
|
systems if you call the tool interactively.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-boot-tool refresh
|
@ -500,7 +500,7 @@ a single media pool, so a job only uses tapes from that pool.
|
|||||||
is less space efficient, because the media from the last set
|
is less space efficient, because the media from the last set
|
||||||
may not be fully written, leaving the remaining space unused.
|
may not be fully written, leaving the remaining space unused.
|
||||||
|
|
||||||
The advantage is that this procudes media sets of minimal
|
The advantage is that this produces media sets of minimal
|
||||||
size. Small sets are easier to handle, can be moved more conveniently
|
size. Small sets are easier to handle, can be moved more conveniently
|
||||||
to an off-site vault, and can be restored much faster.
|
to an off-site vault, and can be restored much faster.
|
||||||
|
|
||||||
@ -519,8 +519,9 @@ a single media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
This balances between space efficiency and media count.
|
This balances between space efficiency and media count.
|
||||||
|
|
||||||
.. NOTE:: Retention period starts when the calendar event
|
.. NOTE:: Retention period starts on the creation time of the next
|
||||||
triggers.
|
media-set or, if that does not exist, when the calendar event
|
||||||
|
triggers the next time after the current media-set start time.
|
||||||
|
|
||||||
Additionally, the following events may allocate a new media set:
|
Additionally, the following events may allocate a new media set:
|
||||||
|
|
||||||
@ -564,13 +565,6 @@ a single media pool, so a job only uses tapes from that pool.
|
|||||||
the password. Please make sure to remember the password, in case
|
the password. Please make sure to remember the password, in case
|
||||||
you need to restore the key.
|
you need to restore the key.
|
||||||
|
|
||||||
|
|
||||||
.. NOTE:: We use global content namespace, meaning we do not store the
|
|
||||||
source datastore name. Because of this, it is impossible to distinguish
|
|
||||||
store1:/vm/100 from store2:/vm/100. Please use different media pools
|
|
||||||
if the sources are from different namespaces with conflicting names
|
|
||||||
(for example, if the sources are from different Proxmox VE clusters).
|
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
|
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Tape Backup: Add a media pool
|
:alt: Tape Backup: Add a media pool
|
||||||
@ -687,6 +681,16 @@ To remove a job, please use:
|
|||||||
|
|
||||||
# proxmox-tape backup-job remove job2
|
# proxmox-tape backup-job remove job2
|
||||||
|
|
||||||
|
By default, all (recursive) namespaces of the datastore are included in a tape
|
||||||
|
backup. You can specify a single namespace with ``ns`` and a depth with
|
||||||
|
``max-depth``. For example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job update job2 --ns mynamespace --max-depth 3
|
||||||
|
|
||||||
|
If no `max-depth` is given, it will include all recursive namespaces.
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Tape Backup: Add a backup job
|
:alt: Tape Backup: Add a backup job
|
||||||
@ -803,6 +807,16 @@ The following options are available:
|
|||||||
media set into import-export slots. The operator can then pick up
|
media set into import-export slots. The operator can then pick up
|
||||||
those tapes and move them to a media vault.
|
those tapes and move them to a media vault.
|
||||||
|
|
||||||
|
--ns The namespace to backup.
|
||||||
|
|
||||||
|
If you only want to backup a specific namespace. If omitted, the root
|
||||||
|
namespaces is assumed.
|
||||||
|
|
||||||
|
--max-depth The depth to recurse namespaces.
|
||||||
|
|
||||||
|
``0`` means no recursion at all (only the given namespace). If omitted,
|
||||||
|
all namespaces are recursed (below the the given one).
|
||||||
|
|
||||||
|
|
||||||
Restore from Tape
|
Restore from Tape
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
@ -837,6 +851,53 @@ data disk (datastore):
|
|||||||
|
|
||||||
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
|
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
|
||||||
|
|
||||||
|
Single Snapshot Restore
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Sometimes it is not necessary to restore a whole media-set, but only some
|
||||||
|
specific snapshots from the tape. This can be achieved with the ``snapshots``
|
||||||
|
parameter:
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
// proxmox-tape restore <media-set-uuid> <datastore> [<snapshot>]
|
||||||
|
|
||||||
|
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore sourcestore:host/hostname/2022-01-01T00:01:00Z
|
||||||
|
|
||||||
|
This first restores the snapshot to a temporary location, then restores the relevant
|
||||||
|
chunk archives, and finally restores the snapshot data to the target datastore.
|
||||||
|
|
||||||
|
The ``snapshot`` parameter can be given multiple times, so one can restore
|
||||||
|
multiple snapshots with one restore action.
|
||||||
|
|
||||||
|
.. NOTE:: When using the single snapshot restore, the tape must be traversed
|
||||||
|
more than once, which, if you restore many snapshots at once, can take longer
|
||||||
|
than restoring the whole datastore.
|
||||||
|
|
||||||
|
Namespaces
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
It is also possible to select and map specific namespaces from a media-set
|
||||||
|
during a restore. This is possible with the ``namespaces`` parameter.
|
||||||
|
The format of the parameter is
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
store=<source-datastore>[,source=<source-ns>][,target=<target-ns>][,max-depth=<depth>]
|
||||||
|
|
||||||
|
If ``source`` or ``target`` is not given, the root namespace is assumed.
|
||||||
|
When no ``max-depth`` is given, the source namespace will be fully recursed.
|
||||||
|
|
||||||
|
An example restore command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore --namespaces store=sourcedatastore,source=ns1,target=ns2,max-depth=2
|
||||||
|
|
||||||
|
The parameter can be given multiple times. It can also be combined with the
|
||||||
|
``snapshots`` parameter to only restore those snapshots and map them to different
|
||||||
|
namespaces.
|
||||||
|
|
||||||
Update Inventory
|
Update Inventory
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
@ -978,3 +1039,76 @@ This command does the following:
|
|||||||
- run drive cleaning operation
|
- run drive cleaning operation
|
||||||
|
|
||||||
- unload the cleaning tape (to slot 3)
|
- unload the cleaning tape (to slot 3)
|
||||||
|
|
||||||
|
Example Setups
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Here are a few example setups for how to manage media pools and schedules.
|
||||||
|
This is not an exhaustive list, and there are many more possible combinations
|
||||||
|
of useful settings.
|
||||||
|
|
||||||
|
Single Continued Media Set
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The most simple setup: always continue the media-set and never expire.
|
||||||
|
|
||||||
|
Allocation policy:
|
||||||
|
continue
|
||||||
|
|
||||||
|
Retention policy:
|
||||||
|
keep
|
||||||
|
|
||||||
|
This setup has the advantage of being easy to manage and is re-using the benefits
|
||||||
|
from deduplication as much as possible. But, it's also prone to a failure of
|
||||||
|
any single tape, which would render all backups referring to chunks from that
|
||||||
|
tape unusable.
|
||||||
|
|
||||||
|
If you want to start a new media-set manually, you can set the currently
|
||||||
|
writable media of the set either to 'full', or set the location to an
|
||||||
|
offsite vault.
|
||||||
|
|
||||||
|
Weekday Scheme
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A slightly more complex scheme, where the goal is to have an independent
|
||||||
|
tape or media set for each weekday, for example from Monday to Friday.
|
||||||
|
This can be solved by having a separate media pool for each day, so 'Monday',
|
||||||
|
'Tuesday', etc.
|
||||||
|
|
||||||
|
Allocation policy:
|
||||||
|
should be 'mon' for the 'Monday' pool, 'tue' for the Tuesday pool and so on.
|
||||||
|
|
||||||
|
Retention policy:
|
||||||
|
overwrite
|
||||||
|
|
||||||
|
There should be a (or more) tape-backup jobs for each pool on the corresponding
|
||||||
|
weekday. This scheme is still very manageable with one media set per weekday,
|
||||||
|
and could be easily moved off-site.
|
||||||
|
|
||||||
|
Multiple Pools with Different Policies
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Complex setups are also possible with multiple media pools configured with
|
||||||
|
different allocation and retention policies.
|
||||||
|
|
||||||
|
An example would be to have two media pools. The first configured with weekly
|
||||||
|
allocation and a few weeks of retention:
|
||||||
|
|
||||||
|
Allocation policy:
|
||||||
|
mon
|
||||||
|
|
||||||
|
Retention policy:
|
||||||
|
3 weeks
|
||||||
|
|
||||||
|
The second pool configured yearly allocation that does not expire:
|
||||||
|
|
||||||
|
Allocation policy:
|
||||||
|
yearly
|
||||||
|
|
||||||
|
Retention policy:
|
||||||
|
keep
|
||||||
|
|
||||||
|
In combination with suited prune settings and tape backup schedules, this
|
||||||
|
achieves long-term storage of some backups, while keeping the current
|
||||||
|
backups on smaller media sets that get expired every three plus the current
|
||||||
|
week (~ 4 weeks).
|
||||||
|
@ -8,7 +8,7 @@ Datastores
|
|||||||
|
|
||||||
A Datastore is the logical place where :ref:`Backup Snapshots
|
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||||
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
manifest, blobs, and dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||||
stored in the following directory structure:
|
stored in the following directory structure:
|
||||||
|
|
||||||
<datastore-root>/<type>/<id>/<time>/
|
<datastore-root>/<type>/<id>/<time>/
|
||||||
@ -32,8 +32,8 @@ The chunks of a datastore are found in
|
|||||||
|
|
||||||
<datastore-root>/.chunks/
|
<datastore-root>/.chunks/
|
||||||
|
|
||||||
This chunk directory is further subdivided by the first four byte of the chunks
|
This chunk directory is further subdivided by the first four bytes of the
|
||||||
checksum, so the chunk with the checksum
|
chunk's checksum, so a chunk with the checksum
|
||||||
|
|
||||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ per directory can be bad for file system performance.
|
|||||||
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
||||||
is created.
|
is created.
|
||||||
|
|
||||||
Fixed-sized Chunks
|
Fixed-Sized Chunks
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
For block based backups (like VMs), fixed-sized chunks are used. The content
|
For block based backups (like VMs), fixed-sized chunks are used. The content
|
||||||
@ -58,10 +58,10 @@ often tries to allocate files in contiguous pieces, so new files get new
|
|||||||
blocks, and changing existing files changes only their own blocks.
|
blocks, and changing existing files changes only their own blocks.
|
||||||
|
|
||||||
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
||||||
can track the changed blocks of an image. Since these bitmap are also a
|
can track the changed blocks of an image. Since these bitmaps are also a
|
||||||
representation of the image split into chunks, there is a direct relation
|
representation of the image split into chunks, there is a direct relation
|
||||||
between dirty blocks of the image and chunks which need to get uploaded, so
|
between the dirty blocks of the image and chunks which need to be uploaded.
|
||||||
only modified chunks of the disk have to be uploaded for a backup.
|
Thus, only modified chunks of the disk need to be uploaded to a backup.
|
||||||
|
|
||||||
Since the image is always split into chunks of the same size, unchanged blocks
|
Since the image is always split into chunks of the same size, unchanged blocks
|
||||||
will result in identical checksums for those chunks, so such chunks do not need
|
will result in identical checksums for those chunks, so such chunks do not need
|
||||||
@ -71,24 +71,24 @@ changed blocks.
|
|||||||
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
||||||
does not rely on storage snapshots either.
|
does not rely on storage snapshots either.
|
||||||
|
|
||||||
Dynamically sized Chunks
|
Dynamically Sized Chunks
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If one does not want to backup block-based systems but rather file-based
|
When working with file-based systems rather than block-based systems,
|
||||||
systems, using fixed-sized chunks is not a good idea, since every time a file
|
using fixed-sized chunks is not a good idea, since every time a file
|
||||||
would change in size, the remaining data gets shifted around and this would
|
would change in size, the remaining data would be shifted around,
|
||||||
result in many chunks changing, reducing the amount of deduplication.
|
resulting in many chunks changing and the amount of deduplication being reduced.
|
||||||
|
|
||||||
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
||||||
instead. Instead of splitting an image into fixed sizes, it first generates a
|
instead. Instead of splitting an image into fixed sizes, it first generates a
|
||||||
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
||||||
over this on-the-fly generated archive to calculate chunk boundaries.
|
over this on-the-fly generated archive to calculate chunk boundaries.
|
||||||
|
|
||||||
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
||||||
by continuously calculating a checksum while iterating over the data, and on
|
by continuously calculating a checksum while iterating over the data, and on
|
||||||
certain conditions it triggers a hash boundary.
|
certain conditions, it triggers a hash boundary.
|
||||||
|
|
||||||
Assuming that most files of the system that is to be backed up have not
|
Assuming that most files on the system that is to be backed up have not
|
||||||
changed, eventually the algorithm triggers the boundary on the same data as a
|
changed, eventually the algorithm triggers the boundary on the same data as a
|
||||||
previous backup, resulting in chunks that can be reused.
|
previous backup, resulting in chunks that can be reused.
|
||||||
|
|
||||||
@ -100,8 +100,8 @@ can be encrypted, and they are handled in a slightly different manner than
|
|||||||
normal chunks.
|
normal chunks.
|
||||||
|
|
||||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
chunk content, but with the plain-text content concatenated with the encryption
|
chunk content, but with the plain-text content, concatenated with the encryption
|
||||||
key. This way, two chunks of the same data encrypted with different keys
|
key. This way, two chunks with the same data but encrypted with different keys
|
||||||
generate two different checksums and no collisions occur for multiple
|
generate two different checksums and no collisions occur for multiple
|
||||||
encryption keys.
|
encryption keys.
|
||||||
|
|
||||||
@ -112,14 +112,14 @@ the previous backup, do not need to be encrypted and uploaded.
|
|||||||
Caveats and Limitations
|
Caveats and Limitations
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
Notes on hash collisions
|
Notes on Hash Collisions
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
||||||
more) inputs generate the same checksum. For SHA-256, this chance is
|
more) inputs generate the same checksum. For SHA-256, this chance is
|
||||||
negligible. To calculate such a collision, one can use the ideas of the
|
negligible. To calculate the chances of such a collision, one can use the ideas
|
||||||
'birthday problem' from probability theory. For big numbers, this is actually
|
of the 'birthday problem' from probability theory. For big numbers, this is
|
||||||
infeasible to calculate with regular computers, but there is a good
|
actually unfeasible to calculate with regular computers, but there is a good
|
||||||
approximation:
|
approximation:
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
@ -127,7 +127,7 @@ approximation:
|
|||||||
p(n, d) = 1 - e^{-n^2/(2d)}
|
p(n, d) = 1 - e^{-n^2/(2d)}
|
||||||
|
|
||||||
Where `n` is the number of tries, and `d` is the number of possibilities.
|
Where `n` is the number of tries, and `d` is the number of possibilities.
|
||||||
For a concrete example lets assume a large datastore of 1 PiB, and an average
|
For a concrete example, lets assume a large datastore of 1 PiB and an average
|
||||||
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
||||||
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
||||||
will see that the probability of a collision in that scenario is:
|
will see that the probability of a collision in that scenario is:
|
||||||
@ -136,94 +136,96 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
3.1115 * 10^{-61}
|
3.1115 * 10^{-61}
|
||||||
|
|
||||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||||
a collision is about the same as winning 13 such lotto games *in a row*.
|
chance of a collision is about the same as winning 13 such lottery games *in a
|
||||||
|
row*.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
|
|
||||||
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
||||||
an upper limit for how big the chunk are, this is not a problem, since a
|
an upper limit for how big the chunks are, this is not a problem, because a
|
||||||
potential attacker cannot arbitrarily add content to the data beyond that
|
potential attacker cannot arbitrarily add content to the data beyond that
|
||||||
limit.
|
limit.
|
||||||
|
|
||||||
File-based Backup
|
File-Based Backup
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Since dynamically sized chunks (for file-based backups) are created on a custom
|
Since dynamically sized chunks (for file-based backups) are created on a custom
|
||||||
archive format (pxar) and not over the files directly, there is no relation
|
archive format (pxar) and not over the files directly, there is no relation
|
||||||
between files and the chunks. This means that the Proxmox Backup client has to
|
between the files and chunks. This means that the Proxmox Backup Client has to
|
||||||
read all files again for every backup, otherwise it would not be possible to
|
read all files again for every backup, otherwise it would not be possible to
|
||||||
generate a consistent independent pxar archive where the original chunks can be
|
generate a consistent, independent pxar archive where the original chunks can be
|
||||||
reused. Note that there will be still only new or change chunks be uploaded.
|
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
||||||
|
|
||||||
Verification of encrypted chunks
|
Verification of Encrypted Chunks
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||||
available, making it impossible for the server (without the encryption key), to
|
available, making it impossible for the server (without the encryption key) to
|
||||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Index files(.fidx, .didx) contain information about how to rebuild a file, more
|
Index files(*.fidx*, *.didx*) contain information about how to rebuild a file.
|
||||||
precisely, they contain an ordered list of references to the chunks the original
|
More precisely, they contain an ordered list of references to the chunks that
|
||||||
file was split up in. If there is something wrong with a snapshot it might be
|
the original file was split into. If there is something wrong with a snapshot,
|
||||||
useful to find out which chunks are referenced in this specific snapshot, and
|
it might be useful to find out which chunks are referenced in it, and check
|
||||||
check wheather all of them are present and intact. The command for getting the
|
whether they are present and intact. The ``proxmox-backup-debug`` command line
|
||||||
list of referenced chunks could look something like this:
|
tool can be used to inspect such files and recover their contents. For example,
|
||||||
|
to get a list of the referenced chunks of a *.fidx* index:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
||||||
|
|
||||||
The same command can be used to look at .blob file, without ``--decode`` just
|
The same command can be used to inspect *.blob* files. Without the ``--decode``
|
||||||
the size and the encryption type, if any, is printed. If ``--decode`` is set the
|
parameter, just the size and the encryption type, if any, are printed. If
|
||||||
blob file is decoded into the specified file('-' will decode it directly into
|
``--decode`` is set, the blob file is decoded into the specified file ('-' will
|
||||||
stdout).
|
decode it directly to stdout).
|
||||||
|
|
||||||
|
The following example would print the decoded contents of
|
||||||
|
`qemu-server.conf.blob`. If the file you're trying to inspect is encrypted, a
|
||||||
|
path to the key file must be provided using ``--keyfile``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
||||||
|
|
||||||
would print the decoded contents of `qemu-server.conf.blob`. If the file you're
|
You can also check in which index files a specific chunk file is referenced
|
||||||
trying to inspect is encrypted, a path to the keyfile has to be provided using
|
|
||||||
``--keyfile``.
|
|
||||||
|
|
||||||
Checking in which index files a specific chunk file is referenced can be done
|
|
||||||
with:
|
with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
||||||
|
|
||||||
Here ``--reference-filter`` specifies where index files should be searched, this
|
Here ``--reference-filter`` specifies where index files should be searched. This
|
||||||
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
||||||
changed you can explicitly specify the digest using ``--digest``, by default the
|
changed, you can explicitly specify the digest using ``--digest``. By default, the
|
||||||
chunk filename is used as the digest to look for. Specifying no
|
chunk filename is used as the digest to look for. If no ``--reference-filter``
|
||||||
``--reference-filter`` will just print the CRC and encryption status of the
|
is specified, it will only print the CRC and encryption status of the chunk. You
|
||||||
chunk. You can also decode chunks, to do so ``--decode`` has to be set. If the
|
can also decode chunks, by setting the ``--decode`` flag. If the chunk is
|
||||||
chunk is encrypted a ``--keyfile`` has to be provided for decoding.
|
encrypted, a ``--keyfile`` must be provided, in order to decode it.
|
||||||
|
|
||||||
Restore without a running PBS
|
Restore without a Running Proxmox Backup Server
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
It is possible to restore spefiic files of snapshots without a running PBS using
|
It's possible to restore specific files from a snapshot, without a running
|
||||||
the `recover` sub-command, provided you have access to the intact index and
|
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
|
||||||
chunk files. Note that you also need the corresponding key file if the backup
|
have access to the intact index and chunk files. Note that you also need the
|
||||||
was encrypted.
|
corresponding key file if the backup was encrypted.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
||||||
|
|
||||||
In above example the `/path/to/.chunks` argument is the path to the directory
|
In the above example, the `/path/to/.chunks` argument is the path to the
|
||||||
that contains contains the chunks, and `drive-scsi0.img.fidx` is the index-file
|
directory that contains the chunks, and `drive-scsi0.img.fidx` is the index file
|
||||||
of the file you'd lile to restore. Both paths can be absolute or relative. With
|
of the file you'd like to restore. Both paths can be absolute or relative. With
|
||||||
``--skip-crc`` it is possible to disable the crc checks of the chunks, this will
|
``--skip-crc``, it's possible to disable the CRC checks of the chunks. This
|
||||||
speed up the process slightly and allows for trying to restore (partially)
|
will speed up the process slightly and allow for trying to restore (partially)
|
||||||
corrupt chunks. It's recommended to always try without the skip-CRC option
|
corrupt chunks. It's recommended to always try without the skip-CRC option
|
||||||
first.
|
first.
|
||||||
|
|
||||||
|
@ -41,26 +41,35 @@ Binary Data (BLOBs)
|
|||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This type is used to store smaller (< 16MB) binary data such as
|
This type is used to store smaller (< 16MB) binary data such as
|
||||||
configuration files. Larger files should be stored as image archive.
|
configuration files. Larger files should be stored as image archives.
|
||||||
|
|
||||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||||
file archive to store whole directory trees.
|
file archive to store entire directory trees.
|
||||||
|
|
||||||
|
|
||||||
Catalog File: ``catalog.pcat1``
|
Catalog File: ``catalog.pcat1``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The catalog file is an index for file archives. It contains
|
The catalog file is an index for file archives. It contains
|
||||||
the list of files and is used to speed up search operations.
|
the list of included files and is used to speed up search operations.
|
||||||
|
|
||||||
|
|
||||||
The Manifest: ``index.json``
|
The Manifest: ``index.json``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The manifest contains the list of all backup files, their
|
The manifest contains a list of all backed up files, and their
|
||||||
sizes and checksums. It is used to verify the consistency of a
|
sizes and checksums. It is used to verify the consistency of a
|
||||||
backup.
|
backup.
|
||||||
|
|
||||||
|
Backup Namespace
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Namespaces allow for the reuse of a single chunk store deduplication domain for
|
||||||
|
multiple sources, while avoiding naming conflicts and getting more fine-grained
|
||||||
|
access control.
|
||||||
|
|
||||||
|
Essentially they're implemented as simple directory structure and need no
|
||||||
|
separate configuration.
|
||||||
|
|
||||||
Backup Type
|
Backup Type
|
||||||
-----------
|
-----------
|
||||||
@ -68,38 +77,40 @@ Backup Type
|
|||||||
The backup server groups backups by *type*, where *type* is one of:
|
The backup server groups backups by *type*, where *type* is one of:
|
||||||
|
|
||||||
``vm``
|
``vm``
|
||||||
This type is used for :term:`virtual machine`\ s. Typically
|
This type is used for :term:`virtual machine<Virtual machine>`\ s. It
|
||||||
consists of the virtual machine's configuration file and an image archive
|
typically consists of the virtual machine's configuration file and an image
|
||||||
for each disk.
|
archive for each disk.
|
||||||
|
|
||||||
``ct``
|
``ct``
|
||||||
This type is used for :term:`container`\ s. Consists of the container's
|
This type is used for :term:`container<Container>`\ s. It consists of the
|
||||||
configuration and a single file archive for the filesystem content.
|
container's configuration and a single file archive for the filesystem's
|
||||||
|
contents.
|
||||||
|
|
||||||
``host``
|
``host``
|
||||||
This type is used for backups created from within the backed up machine.
|
This type is used for file/directory backups created from within a machine.
|
||||||
Typically this would be a physical host but could also be a virtual machine
|
Typically this would be a physical host, but could also be a virtual machine
|
||||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
or container. Such backups may contain file and image archives; there are no
|
||||||
|
restrictions in this regard.
|
||||||
|
|
||||||
Backup ID
|
Backup ID
|
||||||
---------
|
---------
|
||||||
|
|
||||||
A unique ID. Usually the virtual machine or container ID. ``host``
|
A unique ID for a specific Backup Type and Backup Namespace. Usually the
|
||||||
type backups normally use the hostname.
|
virtual machine or container ID. ``host`` type backups normally use the
|
||||||
|
hostname.
|
||||||
|
|
||||||
Backup Time
|
Backup Time
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
The time when the backup was made.
|
The time when the backup was made with second resolution.
|
||||||
|
|
||||||
|
|
||||||
Backup Group
|
Backup Group
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
The tuple ``<type>/<id>`` is called a backup group. Such a group may contain
|
||||||
may contain one or more backup snapshots.
|
one or more backup snapshots.
|
||||||
|
|
||||||
|
|
||||||
.. _term_backup_snapshot:
|
.. _term_backup_snapshot:
|
||||||
|
|
||||||
@ -115,7 +126,7 @@ uniquely identifies a specific backup within a datastore.
|
|||||||
vm/104/2019-10-09T08:01:06Z
|
vm/104/2019-10-09T08:01:06Z
|
||||||
host/elsa/2019-11-08T09:48:14Z
|
host/elsa/2019-11-08T09:48:14Z
|
||||||
|
|
||||||
As you can see, the time format is RFC3399_ with Coordinated
|
As you can see, the time format is RFC3339_ with Coordinated
|
||||||
Universal Time (UTC_, identified by the trailing *Z*).
|
Universal Time (UTC_, identified by the trailing *Z*).
|
||||||
|
|
||||||
|
|
||||||
|
101
docs/traffic-control.rst
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
.. _sysadmin_traffic_control:
|
||||||
|
|
||||||
|
Traffic Control
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-traffic-control-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a traffic control limit
|
||||||
|
|
||||||
|
Creating and restoring backups can produce lots of traffic and impact other
|
||||||
|
users of the network or shared storages.
|
||||||
|
|
||||||
|
Proxmox Backup Server allows to limit network traffic for clients within
|
||||||
|
specified networks using a token bucket filter (TBF).
|
||||||
|
|
||||||
|
This allows you to avoid network congestion or to prioritize traffic from
|
||||||
|
certain hosts.
|
||||||
|
|
||||||
|
You can manage the traffic controls either over the web-interface or using the
|
||||||
|
``traffic-control`` commandos of the ``proxmox-backup-manager`` command-line
|
||||||
|
tool.
|
||||||
|
|
||||||
|
.. note:: Sync jobs on the server are not affected by its rate-in limits. If
|
||||||
|
you want to limit the incoming traffic that a pull-based sync job
|
||||||
|
generates, you need to setup a job-specific rate-in limit. See
|
||||||
|
:ref:`syncjobs`.
|
||||||
|
|
||||||
|
The following command adds a traffic control rule to limit all IPv4 clients
|
||||||
|
(network ``0.0.0.0/0``) to 100 MB/s:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control create rule0 --network 0.0.0.0/0 \
|
||||||
|
--rate-in 100MB --rate-out 100MB \
|
||||||
|
--comment "Default rate limit (100MB/s) for all clients"
|
||||||
|
|
||||||
|
.. note:: To limit both IPv4 and IPv6 network spaces you need to pass two
|
||||||
|
network parameters ``::/0`` and ``0.0.0.0/0``.
|
||||||
|
|
||||||
|
It is possible to restrict rules to certain time frames, for example the
|
||||||
|
company office hours:
|
||||||
|
|
||||||
|
.. tip:: You can use SI (base 10: KB, MB, ...) or IEC (base 2: KiB, MiB, ...)
|
||||||
|
units.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control update rule0 \
|
||||||
|
--timeframe "mon..fri 8-12" \
|
||||||
|
--timeframe "mon..fri 14:30-18"
|
||||||
|
|
||||||
|
If there are more rules, the server uses the rule with the smaller network. For
|
||||||
|
example, we can overwrite the setting for our private network (and the server
|
||||||
|
itself) with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control create rule1 \
|
||||||
|
--network 192.168.2.0/24 \
|
||||||
|
--network 127.0.0.0/8 \
|
||||||
|
--rate-in 20GB --rate-out 20GB \
|
||||||
|
--comment "Use 20GB/s for the local network"
|
||||||
|
|
||||||
|
.. note:: The behavior is undefined if there are several rules for the same network.
|
||||||
|
|
||||||
|
If there are multiple rules that match the same network all of them will be
|
||||||
|
applied, which means that the smallest one wins, as it's bucket fills up the
|
||||||
|
fastest.
|
||||||
|
|
||||||
|
To list the current rules use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control list
|
||||||
|
┌───────┬─────────────┬─────────────┬─────────────────────────┬────────────...─┐
|
||||||
|
│ name │ rate-in │ rate-out │ network │ timeframe ... │
|
||||||
|
╞═══════╪═════════════╪═════════════╪═════════════════════════╪════════════...═╡
|
||||||
|
│ rule0 │ 100 MB │ 100 MB │ ["0.0.0.0/0"] │ ["mon..fri ... │
|
||||||
|
├───────┼─────────────┼─────────────┼─────────────────────────┼────────────...─┤
|
||||||
|
│ rule1 │ 20 GB │ 20 GB │ ["192.168.2.0/24", ...] │ ... │
|
||||||
|
└───────┴─────────────┴─────────────┴─────────────────────────┴────────────...─┘
|
||||||
|
|
||||||
|
Rules can also be removed:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control remove rule1
|
||||||
|
|
||||||
|
|
||||||
|
To show the state (current data rate) of all configured rules use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager traffic-control traffic
|
||||||
|
┌───────┬─────────────┬──────────────┐
|
||||||
|
│ name │ cur-rate-in │ cur-rate-out │
|
||||||
|
╞═══════╪═════════════╪══════════════╡
|
||||||
|
│ rule0 │ 0 B │ 0 B │
|
||||||
|
├───────┼─────────────┼──────────────┤
|
||||||
|
│ rule1 │ 1.161 GiB │ 19.146 KiB │
|
||||||
|
└───────┴─────────────┴──────────────┘
|
@ -15,17 +15,19 @@ Proxmox Backup Server supports several authentication realms, and you need to
|
|||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as Linux system user (Users need to exist on the
|
authenticate as a Linux system user (users need to exist on the
|
||||||
system).
|
system).
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
``/etc/proxmox-backup/shadow.json``.
|
``/etc/proxmox-backup/shadow.json``.
|
||||||
|
|
||||||
After installation, there is a single user ``root@pam``, which
|
:openid: OpenID Connect server. Users can authenticate against an external
|
||||||
corresponds to the Unix superuser. User configuration information is stored in the file
|
OpenID Connect server.
|
||||||
``/etc/proxmox-backup/user.cfg``. You can use the
|
|
||||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
After installation, there is a single user, ``root@pam``, which corresponds to
|
||||||
users:
|
the Unix superuser. User configuration information is stored in the file
|
||||||
|
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
|
||||||
|
command line tool to list or manipulate users:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -40,13 +42,13 @@ users:
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
The superuser has full administration rights on everything, so you
|
The superuser has full administration rights on everything, so it's recommended
|
||||||
normally want to add other users with less privileges. You can add a new
|
to add other users with less privileges. You can add a new
|
||||||
user with the ``user create`` subcommand or through the web
|
user with the ``user create`` subcommand or through the web
|
||||||
interface, under the **User Management** tab of **Configuration -> Access
|
interface, under the **User Management** tab of **Configuration -> Access
|
||||||
Control**. The ``create`` subcommand lets you specify many options like
|
Control**. The ``create`` subcommand lets you specify many options like
|
||||||
``--email`` or ``--password``. You can update or change any user properties
|
``--email`` or ``--password``. You can update or change any user properties
|
||||||
using the ``update`` subcommand later (**Edit** in the GUI):
|
using the ``user update`` subcommand later (**Edit** in the GUI):
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -71,16 +73,16 @@ The resulting user list looks like this:
|
|||||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
Newly created users do not have any permissions. Please read the Access Control
|
Newly created users do not have any permissions. Please read the :ref:`user_acl`
|
||||||
section to learn how to set access permissions.
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
You can disable a user account by setting ``--enable`` to ``0``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user update john@pbs --enable 0
|
# proxmox-backup-manager user update john@pbs --enable 0
|
||||||
|
|
||||||
Or completely remove the user with:
|
Or completely remove a user with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -95,7 +97,7 @@ API Tokens
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: API Token Overview
|
:alt: API Token Overview
|
||||||
|
|
||||||
Any authenticated user can generate API tokens which can in turn be used to
|
Any authenticated user can generate API tokens, which can in turn be used to
|
||||||
configure various clients, instead of directly providing the username and
|
configure various clients, instead of directly providing the username and
|
||||||
password.
|
password.
|
||||||
|
|
||||||
@ -117,7 +119,7 @@ The API token is passed from the client to the server by setting the
|
|||||||
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
||||||
``TOKENID:TOKENSECRET``.
|
``TOKENID:TOKENSECRET``.
|
||||||
|
|
||||||
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
|
You can generate tokens from the GUI or by using ``proxmox-backup-manager``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -154,35 +156,134 @@ section to learn how to set access permissions.
|
|||||||
Access Control
|
Access Control
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
By default new users and API tokens do not have any permission. Instead you
|
By default, new users and API tokens do not have any permissions. Instead you
|
||||||
need to specify what is allowed and what is not. You can do this by assigning
|
need to specify what is allowed and what is not.
|
||||||
roles to users/tokens on specific objects like datastores or remotes. The
|
|
||||||
following roles exist:
|
Proxmox Backup Server uses a role and path based permission management system.
|
||||||
|
An entry in the permissions table allows a user, group or token to take on a
|
||||||
|
specific role when accessing an 'object' or 'path'. This means that such an
|
||||||
|
access rule can be represented as a triple of '(path, user, role)', '(path,
|
||||||
|
group, role)' or '(path, token, role)', with the role containing a set of
|
||||||
|
allowed actions, and the path representing the target of these actions.
|
||||||
|
|
||||||
|
Privileges
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Privileges are the atoms that access roles are made off. They are internally
|
||||||
|
used to enforce the actual permission checks in the API.
|
||||||
|
|
||||||
|
We currently support the following privileges:
|
||||||
|
|
||||||
|
**Sys.Audit**
|
||||||
|
Sys.Audit allows one to know about the system and its status.
|
||||||
|
|
||||||
|
**Sys.Modify**
|
||||||
|
Sys.Modify allows one to modify system-level configuration and apply updates.
|
||||||
|
|
||||||
|
**Sys.PowerManagement**
|
||||||
|
Sys.Modify allows one to to poweroff or reboot the system.
|
||||||
|
|
||||||
|
**Datastore.Audit**
|
||||||
|
Datastore.Audit allows one to know about a datastore, including reading the
|
||||||
|
configuration entry and listing its contents.
|
||||||
|
|
||||||
|
**Datastore.Allocate**
|
||||||
|
Datastore.Allocate allows one to create or deleting datastores.
|
||||||
|
|
||||||
|
**Datastore.Modify**
|
||||||
|
Datastore.Modify allows one to modify a datastore and its contents, and to
|
||||||
|
create or delete namespaces inside a datastore.
|
||||||
|
|
||||||
|
**Datastore.Read**
|
||||||
|
Datastore.Read allows one to read arbitrary backup contents, independent of
|
||||||
|
the backup group owner.
|
||||||
|
|
||||||
|
**Datastore.Verify**
|
||||||
|
Allows verifying the backup snapshots in a datastore.
|
||||||
|
|
||||||
|
**Datastore.Backup**
|
||||||
|
Datastore.Backup allows one create new backup snapshot and gives one also the
|
||||||
|
privileges of Datastore.Read and Datastore.Verify, but only if the backup
|
||||||
|
group is owned by the user or one of its tokens.
|
||||||
|
|
||||||
|
**Datastore.Prune**
|
||||||
|
Datastore.Prune allows one to delete snapshots, but additionally requires
|
||||||
|
backup ownership
|
||||||
|
|
||||||
|
**Permissions.Modify**
|
||||||
|
Permissions.Modify allows one to modifying ACLs
|
||||||
|
|
||||||
|
.. note:: One can always configure privileges for their own API tokens, as
|
||||||
|
they will clamped by the users privileges anyway.
|
||||||
|
|
||||||
|
**Remote.Audit**
|
||||||
|
Remote.Audit allows one to read the remote and the sync configuration entries
|
||||||
|
|
||||||
|
**Remote.Modify**
|
||||||
|
Remote.Modify allows one to modify the remote configuration
|
||||||
|
|
||||||
|
**Remote.Read**
|
||||||
|
Remote.Read allows one to read data from a configured `Remote`
|
||||||
|
|
||||||
|
**Sys.Console**
|
||||||
|
Sys.Console allows one to access to the system's console, note that for all
|
||||||
|
but `root@pam` a valid system login is still required.
|
||||||
|
|
||||||
|
**Tape.Audit**
|
||||||
|
Tape.Audit allows one to read the configuration and status of tape drives,
|
||||||
|
changers and backups
|
||||||
|
|
||||||
|
**Tape.Modify**
|
||||||
|
Tape.Modify allows one to modify the configuration of tape drives, changers
|
||||||
|
and backups
|
||||||
|
|
||||||
|
**Tape.Write**
|
||||||
|
Tape.Write allows one to write to a tape media
|
||||||
|
|
||||||
|
**Tape.Read**
|
||||||
|
Tape.Read allows one to read tape backup configuration and contents from a
|
||||||
|
tape media
|
||||||
|
|
||||||
|
**Realm.Allocate**
|
||||||
|
Realm.Allocate allows one to view, create, modify and delete authentication
|
||||||
|
realms for users
|
||||||
|
|
||||||
|
Access Roles
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
An access role combines one or more privileges into something that can be
|
||||||
|
assigned to an user or API token on an object path.
|
||||||
|
|
||||||
|
Currently there are only built-in roles, that means, you cannot create your
|
||||||
|
own, custom role.
|
||||||
|
|
||||||
|
The following roles exist:
|
||||||
|
|
||||||
**NoAccess**
|
**NoAccess**
|
||||||
Disable Access - nothing is allowed.
|
Disable Access - nothing is allowed.
|
||||||
|
|
||||||
**Admin**
|
**Admin**
|
||||||
Can do anything.
|
Can do anything, on the object path assigned.
|
||||||
|
|
||||||
**Audit**
|
**Audit**
|
||||||
Can view things, but is not allowed to change settings.
|
Can view the status and configuration of things, but is not allowed to change
|
||||||
|
settings.
|
||||||
|
|
||||||
**DatastoreAdmin**
|
**DatastoreAdmin**
|
||||||
Can do anything on datastores.
|
Can do anything on *existing* datastores.
|
||||||
|
|
||||||
**DatastoreAudit**
|
**DatastoreAudit**
|
||||||
Can view datastore settings and list content. But
|
Can view datastore metrics, settings and list content. But is not allowed to
|
||||||
is not allowed to read the actual data.
|
read the actual data.
|
||||||
|
|
||||||
**DatastoreReader**
|
**DatastoreReader**
|
||||||
Can Inspect datastore content and can do restores.
|
Can inspect a datastore's or namespaces content and do restores.
|
||||||
|
|
||||||
**DatastoreBackup**
|
**DatastoreBackup**
|
||||||
Can backup and restore owned backups.
|
Can backup and restore owned backups.
|
||||||
|
|
||||||
**DatastorePowerUser**
|
**DatastorePowerUser**
|
||||||
Can backup, restore, and prune owned backups.
|
Can backup, restore, and prune *owned* backups.
|
||||||
|
|
||||||
**RemoteAdmin**
|
**RemoteAdmin**
|
||||||
Can do anything on remotes.
|
Can do anything on remotes.
|
||||||
@ -193,7 +294,62 @@ following roles exist:
|
|||||||
**RemoteSyncOperator**
|
**RemoteSyncOperator**
|
||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
**TapeAdmin**
|
||||||
|
Can do anything related to tape backup
|
||||||
|
|
||||||
|
**TapeAudit**
|
||||||
|
Can view tape related metrics, configuration and status
|
||||||
|
|
||||||
|
**TapeOperator**
|
||||||
|
Can do tape backup and restore, but cannot change any configuration
|
||||||
|
|
||||||
|
**TapeReader**
|
||||||
|
Can read and inspect tape configuration and media content
|
||||||
|
|
||||||
|
Objects and Paths
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Access permissions are assigned to objects, such as a datastore, a namespace or
|
||||||
|
some system resources.
|
||||||
|
|
||||||
|
We use file system like paths to address these objects. These paths form a
|
||||||
|
natural tree, and permissions of higher levels (shorter paths) can optionally
|
||||||
|
be propagated down within this hierarchy.
|
||||||
|
|
||||||
|
Paths can be templated, that means they can refer to the actual id of an
|
||||||
|
configuration entry. When an API call requires permissions on a templated
|
||||||
|
path, the path may contain references to parameters of the API call. These
|
||||||
|
references are specified in curly braces.
|
||||||
|
|
||||||
|
Some examples are:
|
||||||
|
|
||||||
|
* `/datastore`: Access to *all* datastores on a Proxmox Backup server
|
||||||
|
* `/datastore/{store}`: Access to a specific datastore on a Proxmox Backup
|
||||||
|
server
|
||||||
|
* `/datastore/{store}/{ns}`: Access to a specific namespace on a specific
|
||||||
|
datastore
|
||||||
|
* `/remote`: Access to all remote entries
|
||||||
|
* `/system/network`: Access to configuring the host network
|
||||||
|
* `/tape/`: Access to tape devices, pools and jobs
|
||||||
|
* `/access/users`: User administration
|
||||||
|
* `/access/openid/{id}`: Administrative access to a specific OpenID Connect realm
|
||||||
|
|
||||||
|
Inheritance
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
As mentioned earlier, object paths form a file system like tree, and
|
||||||
|
permissions can be inherited by objects down that tree through the propagate
|
||||||
|
flag, which is set by default. We use the following inheritance rules:
|
||||||
|
|
||||||
|
* Permissions for API tokens are always clamped to the one of the user.
|
||||||
|
* Permissions on deeper, more specific levels replace those inherited from an
|
||||||
|
upper level.
|
||||||
|
|
||||||
|
|
||||||
|
Configuration & Management
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-permissions-add.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Add permissions for user
|
:alt: Add permissions for user
|
||||||
|
|
||||||
@ -236,7 +392,8 @@ You can list the ACLs of each user/token using the following command:
|
|||||||
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
||||||
└──────────┴───────────────────┴───────────┴────────────────┘
|
└──────────┴───────────────────┴───────────┴────────────────┘
|
||||||
|
|
||||||
A single user/token can be assigned multiple permission sets for different datastores.
|
A single user/token can be assigned multiple permission sets for different
|
||||||
|
datastores.
|
||||||
|
|
||||||
.. Note::
|
.. Note::
|
||||||
Naming convention is important here. For datastores on the host,
|
Naming convention is important here. For datastores on the host,
|
||||||
@ -247,11 +404,11 @@ A single user/token can be assigned multiple permission sets for different datas
|
|||||||
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||||
the remote.
|
the remote.
|
||||||
|
|
||||||
API Token permissions
|
API Token Permissions
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
API token permissions are calculated based on ACLs containing their ID
|
API token permissions are calculated based on ACLs containing their ID,
|
||||||
independent of those of their corresponding user. The resulting permission set
|
independently of those of their corresponding user. The resulting permission set
|
||||||
on a given path is then intersected with that of the corresponding user.
|
on a given path is then intersected with that of the corresponding user.
|
||||||
|
|
||||||
In practice this means:
|
In practice this means:
|
||||||
@ -259,17 +416,17 @@ In practice this means:
|
|||||||
#. API tokens require their own ACL entries
|
#. API tokens require their own ACL entries
|
||||||
#. API tokens can never do more than their corresponding user
|
#. API tokens can never do more than their corresponding user
|
||||||
|
|
||||||
Effective permissions
|
Effective Permissions
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To calculate and display the effective permission set of a user or API token
|
To calculate and display the effective permission set of a user or API token,
|
||||||
you can use the ``proxmox-backup-manager user permission`` command:
|
you can use the ``proxmox-backup-manager user permission`` command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
|
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
|
||||||
Privileges with (*) have the propagate flag set
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
Path: /datastore/store1
|
Path: /datastore/store1
|
||||||
- Datastore.Audit (*)
|
- Datastore.Audit (*)
|
||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
@ -277,17 +434,17 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
|||||||
- Datastore.Prune (*)
|
- Datastore.Prune (*)
|
||||||
- Datastore.Read (*)
|
- Datastore.Read (*)
|
||||||
- Datastore.Verify (*)
|
- Datastore.Verify (*)
|
||||||
|
|
||||||
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
|
||||||
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
|
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
|
||||||
Privileges with (*) have the propagate flag set
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
Path: /datastore/store1
|
Path: /datastore/store1
|
||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
|
|
||||||
.. _user_tfa:
|
.. _user_tfa:
|
||||||
|
|
||||||
Two-factor authentication
|
Two-Factor Authentication
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
@ -296,7 +453,7 @@ Introduction
|
|||||||
With simple authentication, only a password (single factor) is required to
|
With simple authentication, only a password (single factor) is required to
|
||||||
successfully claim an identity (authenticate), for example, to be able to log in
|
successfully claim an identity (authenticate), for example, to be able to log in
|
||||||
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||||
the password gets stolen or leaked, anybody can use it to log in - even if they
|
the password gets leaked or stolen, anybody can use it to log in - even if they
|
||||||
should not be allowed to do so.
|
should not be allowed to do so.
|
||||||
|
|
||||||
With two-factor authentication (TFA), a user is asked for an additional factor
|
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||||
@ -359,16 +516,18 @@ WebAuthn
|
|||||||
|
|
||||||
For WebAuthn to work, you need to have two things:
|
For WebAuthn to work, you need to have two things:
|
||||||
|
|
||||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
* A trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||||
While it probably works with an untrusted certificate, some browsers may warn
|
While it probably works with an untrusted certificate, some browsers may warn
|
||||||
or refuse WebAuthn operations if it is not trusted.
|
or refuse WebAuthn operations if it is not trusted.
|
||||||
|
|
||||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
* Setup the WebAuthn configuration (see **Configuration -> Authentication** in
|
||||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
the Proxmox Backup Server web interface). This can be auto-filled in most
|
||||||
|
setups.
|
||||||
|
|
||||||
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||||
configuration in the *Access Control* panel.
|
configuration in the **Two Factor Authentication** tab of the **Access Control**
|
||||||
|
panel.
|
||||||
|
|
||||||
.. _user_tfa_setup_recovery_keys:
|
.. _user_tfa_setup_recovery_keys:
|
||||||
|
|
||||||
@ -380,7 +539,8 @@ Recovery Keys
|
|||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
Recovery key codes do not need any preparation; you can simply create a set of
|
Recovery key codes do not need any preparation; you can simply create a set of
|
||||||
recovery keys in the *Access Control* panel.
|
recovery keys in the **Two Factor Authentication** tab of the **Access Control**
|
||||||
|
panel.
|
||||||
|
|
||||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||||
time.
|
time.
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
// chacha20-poly1305
|
// chacha20-poly1305
|
||||||
|
|
||||||
fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
|
fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
|
||||||
|
|
||||||
print!("{:<20} ", name);
|
print!("{:<20} ", name);
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
@ -14,20 +13,19 @@ fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
|
|||||||
loop {
|
loop {
|
||||||
bytes += bench();
|
bytes += bench();
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
if elapsed > duration { break; }
|
if elapsed > duration {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("{:>8.1} MB/s", (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!("{:>8.1} MB/s", (bytes as f64) / (elapsed * 1024.0 * 1024.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
|
let input = proxmox_sys::linux::random_data(1024 * 1024)?;
|
||||||
let input = proxmox::sys::linux::random_data(1024*1024)?;
|
|
||||||
|
|
||||||
rate_test("crc32", &|| {
|
rate_test("crc32", &|| {
|
||||||
let mut crchasher = crc32fast::Hasher::new();
|
let mut crchasher = crc32fast::Hasher::new();
|
||||||
@ -46,35 +44,23 @@ fn main() -> Result<(), Error> {
|
|||||||
input.len()
|
input.len()
|
||||||
});
|
});
|
||||||
|
|
||||||
let key = proxmox::sys::linux::random_data(32)?;
|
let key = proxmox_sys::linux::random_data(32)?;
|
||||||
|
|
||||||
let iv = proxmox::sys::linux::random_data(16)?;
|
let iv = proxmox_sys::linux::random_data(16)?;
|
||||||
|
|
||||||
let cipher = openssl::symm::Cipher::aes_256_gcm();
|
let cipher = openssl::symm::Cipher::aes_256_gcm();
|
||||||
|
|
||||||
rate_test("aes-256-gcm", &|| {
|
rate_test("aes-256-gcm", &|| {
|
||||||
let mut tag = [0u8;16];
|
let mut tag = [0u8; 16];
|
||||||
openssl::symm::encrypt_aead(
|
openssl::symm::encrypt_aead(cipher, &key, Some(&iv), b"", &input, &mut tag).unwrap();
|
||||||
cipher,
|
|
||||||
&key,
|
|
||||||
Some(&iv),
|
|
||||||
b"",
|
|
||||||
&input,
|
|
||||||
&mut tag).unwrap();
|
|
||||||
input.len()
|
input.len()
|
||||||
});
|
});
|
||||||
|
|
||||||
let cipher = openssl::symm::Cipher::chacha20_poly1305();
|
let cipher = openssl::symm::Cipher::chacha20_poly1305();
|
||||||
|
|
||||||
rate_test("chacha20-poly1305", &|| {
|
rate_test("chacha20-poly1305", &|| {
|
||||||
let mut tag = [0u8;16];
|
let mut tag = [0u8; 16];
|
||||||
openssl::symm::encrypt_aead(
|
openssl::symm::encrypt_aead(cipher, &key, Some(&iv[..12]), b"", &input, &mut tag).unwrap();
|
||||||
cipher,
|
|
||||||
&key,
|
|
||||||
Some(&iv[..12]),
|
|
||||||
b"",
|
|
||||||
&input,
|
|
||||||
&mut tag).unwrap();
|
|
||||||
input.len()
|
input.len()
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
use proxmox::api::{*, cli::*};
|
use proxmox_router::cli::*;
|
||||||
|
use proxmox_schema::*;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -15,9 +16,7 @@ use proxmox::api::{*, cli::*};
|
|||||||
/// Echo command. Print the passed text.
|
/// Echo command. Print the passed text.
|
||||||
///
|
///
|
||||||
/// Returns: nothing
|
/// Returns: nothing
|
||||||
fn echo_command(
|
fn echo_command(text: String) -> Result<(), Error> {
|
||||||
text: String,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
println!("{}", text);
|
println!("{}", text);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -36,9 +35,7 @@ fn echo_command(
|
|||||||
/// Hello command.
|
/// Hello command.
|
||||||
///
|
///
|
||||||
/// Returns: nothing
|
/// Returns: nothing
|
||||||
fn hello_command(
|
fn hello_command(verbose: Option<bool>) -> Result<(), Error> {
|
||||||
verbose: Option<bool>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if verbose.unwrap_or(false) {
|
if verbose.unwrap_or(false) {
|
||||||
println!("Hello, how are you!");
|
println!("Hello, how are you!");
|
||||||
} else {
|
} else {
|
||||||
@ -53,7 +50,6 @@ fn hello_command(
|
|||||||
///
|
///
|
||||||
/// Returns: nothing
|
/// Returns: nothing
|
||||||
fn quit_command() -> Result<(), Error> {
|
fn quit_command() -> Result<(), Error> {
|
||||||
|
|
||||||
println!("Goodbye.");
|
println!("Goodbye.");
|
||||||
|
|
||||||
std::process::exit(0);
|
std::process::exit(0);
|
||||||
@ -63,8 +59,9 @@ fn cli_definition() -> CommandLineInterface {
|
|||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND))
|
.insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND))
|
||||||
.insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND))
|
.insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND))
|
||||||
.insert("echo", CliCommand::new(&API_METHOD_ECHO_COMMAND)
|
.insert(
|
||||||
.arg_param(&["text"])
|
"echo",
|
||||||
|
CliCommand::new(&API_METHOD_ECHO_COMMAND).arg_param(&["text"]),
|
||||||
)
|
)
|
||||||
.insert_help();
|
.insert_help();
|
||||||
|
|
||||||
@ -72,7 +69,6 @@ fn cli_definition() -> CommandLineInterface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
|
|
||||||
let helper = CliHelper::new(cli_definition());
|
let helper = CliHelper::new(cli_definition());
|
||||||
|
|
||||||
let mut rl = rustyline::Editor::<CliHelper>::new();
|
let mut rl = rustyline::Editor::<CliHelper>::new();
|
||||||
|
@ -1,16 +1,15 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::{Authid, BackupNamespace, BackupType};
|
||||||
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Write for DummyWriter {
|
impl Write for DummyWriter {
|
||||||
|
|
||||||
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
|
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
|
||||||
self.bytes += data.len();
|
self.bytes += data.len();
|
||||||
Ok(data.len())
|
Ok(data.len())
|
||||||
@ -21,9 +20,7 @@ impl Write for DummyWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
|
|
||||||
let auth_id = Authid::root_auth_id();
|
let auth_id = Authid::root_auth_id();
|
||||||
@ -34,10 +31,17 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||||
|
|
||||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
let client = BackupReader::start(
|
||||||
.await?;
|
client,
|
||||||
|
None,
|
||||||
|
"store2",
|
||||||
|
&BackupNamespace::root(),
|
||||||
|
&(BackupType::Host, "elsa".to_string(), backup_time).into(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
@ -50,16 +54,19 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!(
|
||||||
|
"Downloaded {} bytes, {} MB/s",
|
||||||
|
bytes,
|
||||||
|
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = pbs_runtime::main(run()) {
|
if let Err(err) = proxmox_async::runtime::main(run()) {
|
||||||
eprintln!("ERROR: {}", err);
|
eprintln!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
println!("DONE");
|
println!("DONE");
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
use std::thread;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
// tar handle files that shrink during backup, by simply padding with zeros.
|
// tar handle files that shrink during backup, by simply padding with zeros.
|
||||||
//
|
//
|
||||||
@ -19,15 +19,15 @@ use std::io::Write;
|
|||||||
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||||
|
|
||||||
fn create_large_file(path: PathBuf) {
|
fn create_large_file(path: PathBuf) {
|
||||||
|
|
||||||
println!("TEST {:?}", path);
|
println!("TEST {:?}", path);
|
||||||
|
|
||||||
let mut file = std::fs::OpenOptions::new()
|
let mut file = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(&path).unwrap();
|
.open(&path)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let buffer = vec![0u8; 64*1024];
|
let buffer = vec![0u8; 64 * 1024];
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
for _ in 0..64 {
|
for _ in 0..64 {
|
||||||
@ -40,7 +40,6 @@ fn create_large_file(path: PathBuf) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
|
|
||||||
let base = PathBuf::from("dyntest1");
|
let base = PathBuf::from("dyntest1");
|
||||||
let _ = std::fs::create_dir(&base);
|
let _ = std::fs::create_dir(&base);
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
pbs_runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
pbs_runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -9,7 +9,7 @@ use tokio::net::{TcpListener, TcpStream};
|
|||||||
use pbs_buildcfg::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
pbs_runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
|
|||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
pbs_runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -2,7 +2,7 @@ extern crate proxmox_backup;
|
|||||||
|
|
||||||
// also see https://www.johndcook.com/blog/standard_deviation/
|
// also see https://www.johndcook.com/blog/standard_deviation/
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use pbs_datastore::Chunker;
|
use pbs_datastore::Chunker;
|
||||||
@ -21,7 +21,6 @@ struct ChunkWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ChunkWriter {
|
impl ChunkWriter {
|
||||||
|
|
||||||
fn new(chunk_size: usize) -> Self {
|
fn new(chunk_size: usize) -> Self {
|
||||||
ChunkWriter {
|
ChunkWriter {
|
||||||
chunker: Chunker::new(chunk_size),
|
chunker: Chunker::new(chunk_size),
|
||||||
@ -37,7 +36,6 @@ impl ChunkWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn record_stat(&mut self, chunk_size: f64) {
|
fn record_stat(&mut self, chunk_size: f64) {
|
||||||
|
|
||||||
self.chunk_count += 1;
|
self.chunk_count += 1;
|
||||||
|
|
||||||
if self.chunk_count == 1 {
|
if self.chunk_count == 1 {
|
||||||
@ -45,28 +43,30 @@ impl ChunkWriter {
|
|||||||
self.m_new = chunk_size;
|
self.m_new = chunk_size;
|
||||||
self.s_old = 0.0;
|
self.s_old = 0.0;
|
||||||
} else {
|
} else {
|
||||||
self.m_new = self.m_old + (chunk_size - self.m_old)/(self.chunk_count as f64);
|
self.m_new = self.m_old + (chunk_size - self.m_old) / (self.chunk_count as f64);
|
||||||
self.s_new = self.s_old +
|
self.s_new = self.s_old + (chunk_size - self.m_old) * (chunk_size - self.m_new);
|
||||||
(chunk_size - self.m_old)*(chunk_size - self.m_new);
|
|
||||||
// set up for next iteration
|
// set up for next iteration
|
||||||
self.m_old = self.m_new;
|
self.m_old = self.m_new;
|
||||||
self.s_old = self.s_new;
|
self.s_old = self.s_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
let variance = if self.chunk_count > 1 {
|
let variance = if self.chunk_count > 1 {
|
||||||
self.s_new/((self.chunk_count -1)as f64)
|
self.s_new / ((self.chunk_count - 1) as f64)
|
||||||
} else { 0.0 };
|
} else {
|
||||||
|
0.0
|
||||||
|
};
|
||||||
|
|
||||||
let std_deviation = variance.sqrt();
|
let std_deviation = variance.sqrt();
|
||||||
let deviation_per = (std_deviation*100.0)/self.m_new;
|
let deviation_per = (std_deviation * 100.0) / self.m_new;
|
||||||
println!("COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%", self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize);
|
println!(
|
||||||
|
"COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%",
|
||||||
|
self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Write for ChunkWriter {
|
impl Write for ChunkWriter {
|
||||||
|
|
||||||
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
|
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
|
||||||
|
|
||||||
let chunker = &mut self.chunker;
|
let chunker = &mut self.chunker;
|
||||||
|
|
||||||
let pos = chunker.scan(data);
|
let pos = chunker.scan(data);
|
||||||
@ -80,7 +80,6 @@ impl Write for ChunkWriter {
|
|||||||
|
|
||||||
self.last_chunk = self.chunk_offset;
|
self.last_chunk = self.chunk_offset;
|
||||||
Ok(pos)
|
Ok(pos)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
self.chunk_offset += data.len();
|
self.chunk_offset += data.len();
|
||||||
Ok(data.len())
|
Ok(data.len())
|
||||||
@ -93,23 +92,23 @@ impl Write for ChunkWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
|
|
||||||
let mut file = std::fs::File::open("/dev/urandom")?;
|
let mut file = std::fs::File::open("/dev/urandom")?;
|
||||||
|
|
||||||
let mut bytes = 0;
|
let mut bytes = 0;
|
||||||
|
|
||||||
let mut buffer = [0u8; 64*1024];
|
let mut buffer = [0u8; 64 * 1024];
|
||||||
|
|
||||||
let mut writer = ChunkWriter::new(4096*1024);
|
let mut writer = ChunkWriter::new(4096 * 1024);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
|
||||||
file.read_exact(&mut buffer)?;
|
file.read_exact(&mut buffer)?;
|
||||||
bytes += buffer.len();
|
bytes += buffer.len();
|
||||||
|
|
||||||
writer.write_all(&buffer)?;
|
writer.write_all(&buffer)?;
|
||||||
|
|
||||||
if bytes > 1024*1024*1024 { break; }
|
if bytes > 1024 * 1024 * 1024 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -3,24 +3,23 @@ extern crate proxmox_backup;
|
|||||||
use pbs_datastore::Chunker;
|
use pbs_datastore::Chunker;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
for i in 0..20*1024*1024 {
|
for i in 0..20 * 1024 * 1024 {
|
||||||
for j in 0..4 {
|
for j in 0..4 {
|
||||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
let byte = ((i >> (j << 3)) & 0xff) as u8;
|
||||||
//println!("BYTE {}", byte);
|
//println!("BYTE {}", byte);
|
||||||
buffer.push(byte);
|
buffer.push(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let mut chunker = Chunker::new(64*1024);
|
let mut chunker = Chunker::new(64 * 1024);
|
||||||
|
|
||||||
let count = 5;
|
let count = 5;
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
let mut chunk_count = 0;
|
let mut chunk_count = 0;
|
||||||
|
|
||||||
for _i in 0..count {
|
for _i in 0..count {
|
||||||
let mut pos = 0;
|
let mut pos = 0;
|
||||||
let mut _last = 0;
|
let mut _last = 0;
|
||||||
@ -39,11 +38,14 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
let mbytecount = ((count * buffer.len()) as f64) / (1024.0 * 1024.0);
|
||||||
let mbytecount = ((count*buffer.len()) as f64) / (1024.0*1024.0);
|
let avg_chunk_size = mbytecount / (chunk_count as f64);
|
||||||
let avg_chunk_size = mbytecount/(chunk_count as f64);
|
let mbytes_per_sec = mbytecount / elapsed;
|
||||||
let mbytes_per_sec = mbytecount/elapsed;
|
println!(
|
||||||
println!("SPEED = {} MB/s, avg chunk size = {} KB", mbytes_per_sec, avg_chunk_size*1024.0);
|
"SPEED = {} MB/s, avg chunk size = {} KB",
|
||||||
|
mbytes_per_sec,
|
||||||
|
avg_chunk_size * 1024.0
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
|
||||||
extern crate proxmox_backup;
|
extern crate proxmox_backup;
|
||||||
@ -13,13 +13,12 @@ use pbs_client::ChunkStream;
|
|||||||
// Note: I can currently get about 830MB/s
|
// Note: I can currently get about 830MB/s
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = pbs_runtime::main(run()) {
|
if let Err(err) = proxmox_async::runtime::main(run()) {
|
||||||
panic!("ERROR: {}", err);
|
panic!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
|
||||||
let file = tokio::fs::File::open("random-test.dat").await?;
|
let file = tokio::fs::File::open("random-test.dat").await?;
|
||||||
|
|
||||||
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
@ -34,7 +33,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
let mut repeat = 0;
|
let mut repeat = 0;
|
||||||
let mut stream_len = 0;
|
let mut stream_len = 0;
|
||||||
while let Some(chunk) = chunk_stream.try_next().await? {
|
while let Some(chunk) = chunk_stream.try_next().await? {
|
||||||
if chunk.len() > 16*1024*1024 {
|
if chunk.len() > 16 * 1024 * 1024 {
|
||||||
panic!("Chunk too large {}", chunk.len());
|
panic!("Chunk too large {}", chunk.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,10 +43,19 @@ async fn run() -> Result<(), Error> {
|
|||||||
println!("Got chunk {}", chunk.len());
|
println!("Got chunk {}", chunk.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
let speed =
|
||||||
println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed);
|
((stream_len * 1_000_000) / (1024 * 1024)) / (start_time.elapsed().as_micros() as usize);
|
||||||
println!("Average chunk size was {} bytes.", stream_len/repeat);
|
println!(
|
||||||
println!("time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
"Uploaded {} chunks in {} seconds ({} MB/s).",
|
||||||
|
repeat,
|
||||||
|
start_time.elapsed().as_secs(),
|
||||||
|
speed
|
||||||
|
);
|
||||||
|
println!("Average chunk size was {} bytes.", stream_len / repeat);
|
||||||
|
println!(
|
||||||
|
"time per request: {} microseconds.",
|
||||||
|
(start_time.elapsed().as_micros()) / (repeat as u128)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
|
use pbs_api_types::{Authid, BackupNamespace, BackupType};
|
||||||
use pbs_api_types::Authid;
|
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
|
||||||
|
|
||||||
async fn upload_speed() -> Result<f64, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
let datastore = "store2";
|
let datastore = "store2";
|
||||||
|
|
||||||
@ -16,9 +15,18 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
|
|
||||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::epoch_i64();
|
let backup_time = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
let client = BackupWriter::start(
|
||||||
|
client,
|
||||||
|
None,
|
||||||
|
datastore,
|
||||||
|
&BackupNamespace::root(),
|
||||||
|
&(BackupType::Host, "speedtest".to_string(), backup_time).into(),
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
println!("start upload speed test");
|
println!("start upload speed test");
|
||||||
let res = client.upload_speedtest(true).await?;
|
let res = client.upload_speedtest(true).await?;
|
||||||
@ -26,8 +34,8 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
match pbs_runtime::main(upload_speed()) {
|
match proxmox_async::runtime::main(upload_speed()) {
|
||||||
Ok(mbs) => {
|
Ok(mbs) => {
|
||||||
println!("average upload speed: {} MB/s", mbs);
|
println!("average upload speed: {} MB/s", mbs);
|
||||||
}
|
}
|
||||||
|
@ -7,14 +7,15 @@ description = "general API type helpers for PBS"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
hex = "0.4.3"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
percent-encoding = "2.1"
|
||||||
nix = "0.19.1"
|
regex = "1.5.5"
|
||||||
openssl = "0.10"
|
|
||||||
regex = "1.2"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_plain = "1"
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
proxmox-lang = "1.0.0"
|
||||||
|
proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] }
|
||||||
proxmox-systemd = { path = "../proxmox-systemd" }
|
proxmox-serde = "0.1"
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
proxmox-time = "1.1.1"
|
||||||
|
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde::de::{value, IntoDeserializer};
|
use serde::de::{value, IntoDeserializer};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_lang::constnamedbitmap;
|
||||||
use proxmox::api::schema::{
|
use proxmox_schema::{
|
||||||
ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||||
};
|
};
|
||||||
use proxmox::{constnamedbitmap, const_regex};
|
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
@ -74,9 +73,20 @@ constnamedbitmap! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
||||||
|
PRIVILEGES
|
||||||
|
.iter()
|
||||||
|
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
||||||
|
if value & privs != 0 {
|
||||||
|
priv_names.push(name);
|
||||||
|
}
|
||||||
|
priv_names
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Admin always has all privileges. It can do everything except a few actions
|
/// Admin always has all privileges. It can do everything except a few actions
|
||||||
/// which are limited to the 'root@pam` superuser
|
/// which are limited to the 'root@pam` superuser
|
||||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
@ -222,7 +232,6 @@ pub enum Role {
|
|||||||
TapeReader = ROLE_TAPE_READER,
|
TapeReader = ROLE_TAPE_READER,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl FromStr for Role {
|
impl FromStr for Role {
|
||||||
type Err = value::Error;
|
type Err = value::Error;
|
||||||
|
|
||||||
@ -231,26 +240,24 @@ impl FromStr for Role {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||||
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
|
||||||
|
|
||||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||||
"Access control path.")
|
|
||||||
.format(&ACL_PATH_FORMAT)
|
.format(&ACL_PATH_FORMAT)
|
||||||
.min_length(1)
|
.min_length(1)
|
||||||
.max_length(128)
|
.max_length(128)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||||
"Allow to propagate (inherit) permissions.")
|
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||||
.default(true)
|
.default(true)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||||
"Type of 'ugid' property.")
|
|
||||||
.format(&ApiStringFormat::Enum(&[
|
.format(&ApiStringFormat::Enum(&[
|
||||||
EnumEntry::new("user", "User"),
|
EnumEntry::new("user", "User"),
|
||||||
EnumEntry::new("group", "Group")]))
|
EnumEntry::new("group", "Group"),
|
||||||
|
]))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
78
pbs-api-types/src/common_regex.rs
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
//! Predefined Regular Expressions
|
||||||
|
//!
|
||||||
|
//! This is a collection of useful regular expressions
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
|
||||||
|
|
||||||
|
/// Returns the regular expression string to match IPv4 addresses
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
|
||||||
|
|
||||||
|
/// Returns the regular expression string to match IPv6 addresses
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPV6RE { () => (concat!(r"(?:",
|
||||||
|
r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
|
||||||
|
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the regular expression string to match IP addresses (v4 or v6)
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
|
||||||
|
|
||||||
|
/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
|
||||||
|
/// them, while for IPv4 they are forbidden.
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! IPRE_BRACKET { () => (
|
||||||
|
concat!(r"(?:",
|
||||||
|
IPV4RE!(),
|
||||||
|
r"|\[(?:",
|
||||||
|
IPV6RE!(),
|
||||||
|
r")\]",
|
||||||
|
r")"))
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
|
||||||
|
pub static ref IP_BRACKET_REGEX: Regex =
|
||||||
|
Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
|
||||||
|
pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
|
||||||
|
pub static ref SYSTEMD_DATETIME_REGEX: Regex =
|
||||||
|
Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_regexes() {
|
||||||
|
assert!(IP_REGEX.is_match("127.0.0.1"));
|
||||||
|
assert!(IP_REGEX.is_match("::1"));
|
||||||
|
assert!(IP_REGEX.is_match("2014:b3a::27"));
|
||||||
|
assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
|
||||||
|
assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
|
||||||
|
|
||||||
|
assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
|
||||||
|
assert!(IP_BRACKET_REGEX.is_match("[::1]"));
|
||||||
|
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
|
||||||
|
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
|
||||||
|
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
|
||||||
|
}
|
@ -3,9 +3,7 @@ use std::fmt::{self, Display};
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
|
||||||
|
|
||||||
#[api(default: "encrypt")]
|
#[api(default: "encrypt")]
|
||||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
@ -35,6 +33,9 @@ impl Fingerprint {
|
|||||||
pub fn bytes(&self) -> &[u8; 32] {
|
pub fn bytes(&self) -> &[u8; 32] {
|
||||||
&self.bytes
|
&self.bytes
|
||||||
}
|
}
|
||||||
|
pub fn signature(&self) -> String {
|
||||||
|
as_fingerprint(&self.bytes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Display as short key ID
|
/// Display as short key ID
|
||||||
@ -50,8 +51,45 @@ impl std::str::FromStr for Fingerprint {
|
|||||||
fn from_str(s: &str) -> Result<Self, Error> {
|
fn from_str(s: &str) -> Result<Self, Error> {
|
||||||
let mut tmp = s.to_string();
|
let mut tmp = s.to_string();
|
||||||
tmp.retain(|c| c != ':');
|
tmp.retain(|c| c != ':');
|
||||||
let bytes = proxmox::tools::hex_to_digest(&tmp)?;
|
let mut bytes = [0u8; 32];
|
||||||
|
hex::decode_to_slice(&tmp, &mut bytes)?;
|
||||||
Ok(Fingerprint::new(bytes))
|
Ok(Fingerprint::new(bytes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_fingerprint(bytes: &[u8]) -> String {
|
||||||
|
hex::encode(bytes)
|
||||||
|
.as_bytes()
|
||||||
|
.chunks(2)
|
||||||
|
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.join(":")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod bytes_as_fingerprint {
|
||||||
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Deserializer, Serializer};
|
||||||
|
|
||||||
|
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
let s = super::as_fingerprint(bytes);
|
||||||
|
serializer.serialize_str(&s)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
||||||
|
// hex::decode by-byte
|
||||||
|
let mut s = String::deserialize(deserializer)?;
|
||||||
|
s.retain(|c| c != ':');
|
||||||
|
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
||||||
|
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
||||||
|
.map_err(serde::de::Error::custom)?;
|
||||||
|
Ok(unsafe { out.assume_init() })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
|
358
pbs-api-types/src/human_byte.rs
Normal file
@ -0,0 +1,358 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
||||||
|
|
||||||
|
/// Size units for byte sizes
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||||
|
pub enum SizeUnit {
|
||||||
|
Byte,
|
||||||
|
// SI (base 10)
|
||||||
|
KByte,
|
||||||
|
MByte,
|
||||||
|
GByte,
|
||||||
|
TByte,
|
||||||
|
PByte,
|
||||||
|
// IEC (base 2)
|
||||||
|
Kibi,
|
||||||
|
Mebi,
|
||||||
|
Gibi,
|
||||||
|
Tebi,
|
||||||
|
Pebi,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SizeUnit {
|
||||||
|
/// Returns the scaling factor
|
||||||
|
pub fn factor(&self) -> f64 {
|
||||||
|
match self {
|
||||||
|
SizeUnit::Byte => 1.0,
|
||||||
|
// SI (base 10)
|
||||||
|
SizeUnit::KByte => 1_000.0,
|
||||||
|
SizeUnit::MByte => 1_000_000.0,
|
||||||
|
SizeUnit::GByte => 1_000_000_000.0,
|
||||||
|
SizeUnit::TByte => 1_000_000_000_000.0,
|
||||||
|
SizeUnit::PByte => 1_000_000_000_000_000.0,
|
||||||
|
// IEC (base 2)
|
||||||
|
SizeUnit::Kibi => 1024.0,
|
||||||
|
SizeUnit::Mebi => 1024.0 * 1024.0,
|
||||||
|
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
|
||||||
|
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
|
||||||
|
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// gets the biggest possible unit still having a value greater zero before the decimal point
|
||||||
|
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
|
||||||
|
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
|
||||||
|
if binary {
|
||||||
|
let bits = 64 - (size as u64).leading_zeros();
|
||||||
|
match bits {
|
||||||
|
51.. => SizeUnit::Pebi,
|
||||||
|
41..=50 => SizeUnit::Tebi,
|
||||||
|
31..=40 => SizeUnit::Gibi,
|
||||||
|
21..=30 => SizeUnit::Mebi,
|
||||||
|
11..=20 => SizeUnit::Kibi,
|
||||||
|
_ => SizeUnit::Byte,
|
||||||
|
}
|
||||||
|
} else if size >= 1_000_000_000_000_000.0 {
|
||||||
|
SizeUnit::PByte
|
||||||
|
} else if size >= 1_000_000_000_000.0 {
|
||||||
|
SizeUnit::TByte
|
||||||
|
} else if size >= 1_000_000_000.0 {
|
||||||
|
SizeUnit::GByte
|
||||||
|
} else if size >= 1_000_000.0 {
|
||||||
|
SizeUnit::MByte
|
||||||
|
} else if size >= 1_000.0 {
|
||||||
|
SizeUnit::KByte
|
||||||
|
} else {
|
||||||
|
SizeUnit::Byte
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the string repesentation
|
||||||
|
impl std::fmt::Display for SizeUnit {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
SizeUnit::Byte => write!(f, "B"),
|
||||||
|
// SI (base 10)
|
||||||
|
SizeUnit::KByte => write!(f, "KB"),
|
||||||
|
SizeUnit::MByte => write!(f, "MB"),
|
||||||
|
SizeUnit::GByte => write!(f, "GB"),
|
||||||
|
SizeUnit::TByte => write!(f, "TB"),
|
||||||
|
SizeUnit::PByte => write!(f, "PB"),
|
||||||
|
// IEC (base 2)
|
||||||
|
SizeUnit::Kibi => write!(f, "KiB"),
|
||||||
|
SizeUnit::Mebi => write!(f, "MiB"),
|
||||||
|
SizeUnit::Gibi => write!(f, "GiB"),
|
||||||
|
SizeUnit::Tebi => write!(f, "TiB"),
|
||||||
|
SizeUnit::Pebi => write!(f, "PiB"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Strips a trailing SizeUnit inclusive trailing whitespace
|
||||||
|
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
|
||||||
|
fn strip_unit(v: &str) -> (&str, SizeUnit) {
|
||||||
|
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
|
||||||
|
|
||||||
|
let (v, binary) = match v.strip_suffix('i') {
|
||||||
|
Some(n) => (n, true),
|
||||||
|
None => (v, false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut unit = SizeUnit::Byte;
|
||||||
|
#[rustfmt::skip]
|
||||||
|
let value = v.strip_suffix(|c: char| match c {
|
||||||
|
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
|
||||||
|
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
|
||||||
|
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
|
||||||
|
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
|
||||||
|
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
|
||||||
|
// binary (IEC recommended) variants
|
||||||
|
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
|
||||||
|
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
|
||||||
|
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
|
||||||
|
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
|
||||||
|
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
|
||||||
|
_ => false
|
||||||
|
}).unwrap_or(v).trim_end();
|
||||||
|
|
||||||
|
(value, unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Byte size which can be displayed in a human friendly way
|
||||||
|
#[derive(Debug, Copy, Clone, UpdaterType)]
|
||||||
|
pub struct HumanByte {
|
||||||
|
/// The siginficant value, it does not includes any factor of the `unit`
|
||||||
|
size: f64,
|
||||||
|
/// The scale/unit of the value
|
||||||
|
unit: SizeUnit,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_human_byte(s: &str) -> Result<(), Error> {
|
||||||
|
match s.parse::<HumanByte>() {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ApiType for HumanByte {
|
||||||
|
const API_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
|
||||||
|
)
|
||||||
|
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HumanByte {
|
||||||
|
/// Create instance with size and unit (size must be positive)
|
||||||
|
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
|
||||||
|
if size < 0.0 {
|
||||||
|
bail!("byte size may not be negative");
|
||||||
|
}
|
||||||
|
Ok(HumanByte { size, unit })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new instance with optimal binary unit computed
|
||||||
|
pub fn new_binary(size: f64) -> Self {
|
||||||
|
let unit = SizeUnit::auto_scale(size, true);
|
||||||
|
HumanByte {
|
||||||
|
size: size / unit.factor(),
|
||||||
|
unit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new instance with optimal decimal unit computed
|
||||||
|
pub fn new_decimal(size: f64) -> Self {
|
||||||
|
let unit = SizeUnit::auto_scale(size, false);
|
||||||
|
HumanByte {
|
||||||
|
size: size / unit.factor(),
|
||||||
|
unit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the size as u64 number of bytes
|
||||||
|
pub fn as_u64(&self) -> u64 {
|
||||||
|
self.as_f64() as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the size as f64 number of bytes
|
||||||
|
pub fn as_f64(&self) -> f64 {
|
||||||
|
self.size * self.unit.factor()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a copy with optimal binary unit computed
|
||||||
|
pub fn auto_scale_binary(self) -> Self {
|
||||||
|
HumanByte::new_binary(self.as_f64())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a copy with optimal decimal unit computed
|
||||||
|
pub fn auto_scale_decimal(self) -> Self {
|
||||||
|
HumanByte::new_decimal(self.as_f64())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u64> for HumanByte {
|
||||||
|
fn from(v: u64) -> Self {
|
||||||
|
HumanByte::new_binary(v as f64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<usize> for HumanByte {
|
||||||
|
fn from(v: usize) -> Self {
|
||||||
|
HumanByte::new_binary(v as f64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for HumanByte {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let precision = f.precision().unwrap_or(3) as f64;
|
||||||
|
let precision_factor = 1.0 * 10.0_f64.powf(precision);
|
||||||
|
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
|
||||||
|
let size = ((self.size * precision_factor).round()) / precision_factor;
|
||||||
|
write!(f, "{} {}", size, self.unit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for HumanByte {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(v: &str) -> Result<Self, Error> {
|
||||||
|
let (v, unit) = strip_unit(v);
|
||||||
|
HumanByte::with_unit(v.parse()?, unit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox_serde::forward_deserialize_to_from_str!(HumanByte);
|
||||||
|
proxmox_serde::forward_serialize_to_display!(HumanByte);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_human_byte_parser() -> Result<(), Error> {
|
||||||
|
assert!("-10".parse::<HumanByte>().is_err()); // negative size
|
||||||
|
|
||||||
|
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
|
||||||
|
let h: HumanByte = v.parse()?;
|
||||||
|
|
||||||
|
if h.size != size {
|
||||||
|
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
|
||||||
|
}
|
||||||
|
if h.unit != unit {
|
||||||
|
bail!(
|
||||||
|
"got unexpected unit for '{}' ({:?} != {:?})",
|
||||||
|
v,
|
||||||
|
h.unit,
|
||||||
|
unit
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let new = h.to_string();
|
||||||
|
if &new != as_str {
|
||||||
|
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
|
||||||
|
match do_test(v, size, unit, as_str) {
|
||||||
|
Ok(_) => true,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("{}", err); // makes debugging easier
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
|
||||||
|
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
|
||||||
|
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
|
||||||
|
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
|
||||||
|
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
|
||||||
|
|
||||||
|
let h: HumanByte = "1.2345678".parse()?;
|
||||||
|
assert_eq!(&format!("{:.0}", h), "1 B");
|
||||||
|
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
|
||||||
|
assert_eq!(&format!("{:.1}", h), "1.2 B");
|
||||||
|
assert_eq!(&format!("{:.2}", h), "1.23 B");
|
||||||
|
assert_eq!(&format!("{:.3}", h), "1.235 B");
|
||||||
|
assert_eq!(&format!("{:.4}", h), "1.2346 B");
|
||||||
|
assert_eq!(&format!("{:.5}", h), "1.23457 B");
|
||||||
|
assert_eq!(&format!("{:.6}", h), "1.234568 B");
|
||||||
|
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
|
||||||
|
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
|
||||||
|
|
||||||
|
assert!(test(
|
||||||
|
"987654321",
|
||||||
|
987654321.0,
|
||||||
|
SizeUnit::Byte,
|
||||||
|
"987654321 B"
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||||
|
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||||
|
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||||
|
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||||
|
|
||||||
|
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
|
||||||
|
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
|
||||||
|
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
|
||||||
|
|
||||||
|
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
|
||||||
|
|
||||||
|
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
|
||||||
|
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
|
||||||
|
|
||||||
|
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||||
|
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||||
|
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||||
|
|
||||||
|
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
|
||||||
|
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
|
||||||
|
|
||||||
|
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_human_byte_auto_unit_decimal() {
|
||||||
|
fn convert(b: u64) -> String {
|
||||||
|
HumanByte::new_decimal(b as f64).to_string()
|
||||||
|
}
|
||||||
|
assert_eq!(convert(987), "987 B");
|
||||||
|
assert_eq!(convert(1022), "1.022 KB");
|
||||||
|
assert_eq!(convert(9_000), "9 KB");
|
||||||
|
assert_eq!(convert(1_000), "1 KB");
|
||||||
|
assert_eq!(convert(1_000_000), "1 MB");
|
||||||
|
assert_eq!(convert(1_000_000_000), "1 GB");
|
||||||
|
assert_eq!(convert(1_000_000_000_000), "1 TB");
|
||||||
|
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
|
||||||
|
|
||||||
|
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
|
||||||
|
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
|
||||||
|
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_human_byte_auto_unit_binary() {
|
||||||
|
fn convert(b: u64) -> String {
|
||||||
|
HumanByte::from(b).to_string()
|
||||||
|
}
|
||||||
|
assert_eq!(convert(0), "0 B");
|
||||||
|
assert_eq!(convert(987), "987 B");
|
||||||
|
assert_eq!(convert(1022), "1022 B");
|
||||||
|
assert_eq!(convert(9_000), "8.789 KiB");
|
||||||
|
assert_eq!(convert(10_000_000), "9.537 MiB");
|
||||||
|
assert_eq!(convert(10_000_000_000), "9.313 GiB");
|
||||||
|
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
|
||||||
|
|
||||||
|
assert_eq!(convert(1 << 10), "1 KiB");
|
||||||
|
assert_eq!(convert((1 << 10) * 10), "10 KiB");
|
||||||
|
assert_eq!(convert(1 << 20), "1 MiB");
|
||||||
|
assert_eq!(convert(1 << 30), "1 GiB");
|
||||||
|
assert_eq!(convert(1 << 40), "1 TiB");
|
||||||
|
assert_eq!(convert(1 << 50), "1 PiB");
|
||||||
|
|
||||||
|
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
|
||||||
|
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
|
||||||
|
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
|
||||||
|
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
|
||||||
|
}
|
@ -1,20 +1,24 @@
|
|||||||
|
use anyhow::format_err;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use regex::Regex;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
use proxmox_schema::*;
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
|
||||||
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
|
BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
};
|
};
|
||||||
|
|
||||||
const_regex!{
|
const_regex! {
|
||||||
|
|
||||||
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||||
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
||||||
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
|
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
|
||||||
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||||
@ -23,34 +27,41 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
|
||||||
"Run sync job at specified schedule.")
|
.format(&ApiStringFormat::VerifyFn(
|
||||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
proxmox_time::verify_calendar_event,
|
||||||
|
))
|
||||||
.type_text("<calendar-event>")
|
.type_text("<calendar-event>")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const GC_SCHEDULE_SCHEMA: Schema =
|
||||||
"Run garbage collection job at specified schedule.")
|
StringSchema::new("Run garbage collection job at specified schedule.")
|
||||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(
|
||||||
|
proxmox_time::verify_calendar_event,
|
||||||
|
))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(
|
||||||
|
proxmox_time::verify_calendar_event,
|
||||||
|
))
|
||||||
.type_text("<calendar-event>")
|
.type_text("<calendar-event>")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
|
||||||
"Run prune job at specified schedule.")
|
StringSchema::new("Run verify job at specified schedule.")
|
||||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(
|
||||||
.type_text("<calendar-event>")
|
proxmox_time::verify_calendar_event,
|
||||||
.schema();
|
))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
.schema();
|
||||||
"Run verify job at specified schedule.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
|
||||||
.type_text("<calendar-event>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||||
.default(true)
|
)
|
||||||
.schema();
|
.default(false)
|
||||||
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
@ -76,17 +87,17 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Default)]
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Job Scheduling Status
|
/// Job Scheduling Status
|
||||||
pub struct JobScheduleStatus {
|
pub struct JobScheduleStatus {
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub next_run: Option<i64>,
|
pub next_run: Option<i64>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub last_run_state: Option<String>,
|
pub last_run_state: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub last_run_upid: Option<String>,
|
pub last_run_upid: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub last_run_endtime: Option<i64>,
|
pub last_run_endtime: Option<i64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,20 +141,23 @@ pub struct DatastoreNotify {
|
|||||||
pub sync: Option<Notify>,
|
pub sync: Option<Notify>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema =
|
||||||
"Datastore notification setting")
|
StringSchema::new("Datastore notification setting")
|
||||||
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
.format(&ApiStringFormat::PropertyString(
|
||||||
.schema();
|
&DatastoreNotify::API_SCHEMA,
|
||||||
|
))
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
"Do not verify backups that are already verified if their verification is not outdated.")
|
"Do not verify backups that are already verified if their verification is not outdated.",
|
||||||
.default(true)
|
)
|
||||||
.schema();
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
||||||
"Days after that a verification becomes outdated")
|
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
|
||||||
.minimum(1)
|
.minimum(0)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
@ -169,29 +183,53 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
|
ns: {
|
||||||
|
optional: true,
|
||||||
|
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||||
|
},
|
||||||
|
"max-depth": {
|
||||||
|
optional: true,
|
||||||
|
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Updater)]
|
#[derive(Serialize, Deserialize, Updater)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Verification Job
|
/// Verification Job
|
||||||
pub struct VerificationJobConfig {
|
pub struct VerificationJobConfig {
|
||||||
/// unique ID to address this job
|
/// unique ID to address this job
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub id: String,
|
pub id: String,
|
||||||
/// the datastore ID this verificaiton job affects
|
/// the datastore ID this verification job affects
|
||||||
pub store: String,
|
pub store: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// if not set to false, check the age of the last snapshot verification to filter
|
/// if not set to false, check the age of the last snapshot verification to filter
|
||||||
/// out recent ones, depending on 'outdated_after' configuration.
|
/// out recent ones, depending on 'outdated_after' configuration.
|
||||||
pub ignore_verified: Option<bool>,
|
pub ignore_verified: Option<bool>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||||
pub outdated_after: Option<i64>,
|
pub outdated_after: Option<i64>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// when to schedule this job in calendar event notation
|
/// when to schedule this job in calendar event notation
|
||||||
pub schedule: Option<String>,
|
pub schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
/// on which backup namespace to run the verification recursively
|
||||||
|
pub ns: Option<BackupNamespace>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
|
||||||
|
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
|
||||||
|
pub max_depth: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VerificationJobConfig {
|
||||||
|
pub fn acl_path(&self) -> Vec<&str> {
|
||||||
|
match self.ns.as_ref() {
|
||||||
|
Some(ns) => ns.acl_path(&self.store),
|
||||||
|
None => vec!["datastore", &self.store],
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -204,8 +242,8 @@ pub struct VerificationJobConfig {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Status of Verification Job
|
/// Status of Verification Job
|
||||||
pub struct VerificationJobStatus {
|
pub struct VerificationJobStatus {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
@ -244,24 +282,42 @@ pub struct VerificationJobStatus {
|
|||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Userid,
|
||||||
},
|
},
|
||||||
|
"group-filter": {
|
||||||
|
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
ns: {
|
||||||
|
type: BackupNamespace,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"max-depth": {
|
||||||
|
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Clone,Updater)]
|
#[derive(Serialize, Deserialize, Clone, Updater)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Tape Backup Job Setup
|
/// Tape Backup Job Setup
|
||||||
pub struct TapeBackupJobSetup {
|
pub struct TapeBackupJobSetup {
|
||||||
pub store: String,
|
pub store: String,
|
||||||
pub pool: String,
|
pub pool: String,
|
||||||
pub drive: String,
|
pub drive: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub eject_media: Option<bool>,
|
pub eject_media: Option<bool>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub export_media_set: Option<bool>,
|
pub export_media_set: Option<bool>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub latest_only: Option<bool>,
|
pub latest_only: Option<bool>,
|
||||||
/// Send job email notification to this user
|
/// Send job email notification to this user
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub notify_user: Option<Userid>,
|
pub notify_user: Option<Userid>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub group_filter: Option<Vec<GroupFilter>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
pub ns: Option<BackupNamespace>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
pub max_depth: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -282,17 +338,17 @@ pub struct TapeBackupJobSetup {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Clone,Updater)]
|
#[derive(Serialize, Deserialize, Clone, Updater)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Tape Backup Job
|
/// Tape Backup Job
|
||||||
pub struct TapeBackupJobConfig {
|
pub struct TapeBackupJobConfig {
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub id: String,
|
pub id: String,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub setup: TapeBackupJobSetup,
|
pub setup: TapeBackupJobSetup,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub schedule: Option<String>,
|
pub schedule: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -306,8 +362,8 @@ pub struct TapeBackupJobConfig {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Status of Tape Backup Job
|
/// Status of Tape Backup Job
|
||||||
pub struct TapeBackupJobStatus {
|
pub struct TapeBackupJobStatus {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
@ -315,10 +371,62 @@ pub struct TapeBackupJobStatus {
|
|||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub status: JobScheduleStatus,
|
pub status: JobScheduleStatus,
|
||||||
/// Next tape used (best guess)
|
/// Next tape used (best guess)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub next_media_label: Option<String>,
|
pub next_media_label: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
||||||
|
pub enum GroupFilter {
|
||||||
|
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
||||||
|
BackupType(BackupType),
|
||||||
|
/// Full identifier of BackupGroup, including type
|
||||||
|
Group(String),
|
||||||
|
/// A regular expression matched against the full identifier of the BackupGroup
|
||||||
|
Regex(Regex),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for GroupFilter {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s.split_once(':') {
|
||||||
|
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())),
|
||||||
|
Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)),
|
||||||
|
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
|
||||||
|
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
|
||||||
|
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
|
||||||
|
}.map_err(|err| format_err!("'{}' - {}", s, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// used for serializing below, caution!
|
||||||
|
impl std::fmt::Display for GroupFilter {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
||||||
|
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
|
||||||
|
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
|
||||||
|
proxmox_serde::forward_serialize_to_display!(GroupFilter);
|
||||||
|
|
||||||
|
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
||||||
|
GroupFilter::from_str(input).map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
||||||
|
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
|
||||||
|
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
id: {
|
id: {
|
||||||
@ -327,6 +435,10 @@ pub struct TapeBackupJobStatus {
|
|||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
|
ns: {
|
||||||
|
type: BackupNamespace,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"owner": {
|
"owner": {
|
||||||
type: Authid,
|
type: Authid,
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -337,37 +449,71 @@ pub struct TapeBackupJobStatus {
|
|||||||
"remote-store": {
|
"remote-store": {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"remote-ns": {
|
||||||
|
type: BackupNamespace,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"remove-vanished": {
|
"remove-vanished": {
|
||||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"max-depth": {
|
||||||
|
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
comment: {
|
comment: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
},
|
},
|
||||||
|
limit: {
|
||||||
|
type: RateLimitConfig,
|
||||||
|
},
|
||||||
schedule: {
|
schedule: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: SYNC_SCHEDULE_SCHEMA,
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"group-filter": {
|
||||||
|
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Clone,Updater)]
|
#[derive(Serialize, Deserialize, Clone, Updater)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Sync Job
|
/// Sync Job
|
||||||
pub struct SyncJobConfig {
|
pub struct SyncJobConfig {
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub id: String,
|
pub id: String,
|
||||||
pub store: String,
|
pub store: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ns: Option<BackupNamespace>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub owner: Option<Authid>,
|
pub owner: Option<Authid>,
|
||||||
pub remote: String,
|
pub remote: String,
|
||||||
pub remote_store: String,
|
pub remote_store: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub remote_ns: Option<BackupNamespace>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub remove_vanished: Option<bool>,
|
pub remove_vanished: Option<bool>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub max_depth: Option<usize>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub schedule: Option<String>,
|
pub schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub group_filter: Option<Vec<GroupFilter>>,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub limit: RateLimitConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncJobConfig {
|
||||||
|
pub fn acl_path(&self) -> Vec<&str> {
|
||||||
|
match self.ns.as_ref() {
|
||||||
|
Some(ns) => ns.acl_path(&self.store),
|
||||||
|
None => vec!["datastore", &self.store],
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -380,9 +526,8 @@ pub struct SyncJobConfig {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[serde(rename_all = "kebab-case")]
|
||||||
#[serde(rename_all="kebab-case")]
|
|
||||||
/// Status of Sync Job
|
/// Status of Sync Job
|
||||||
pub struct SyncJobStatus {
|
pub struct SyncJobStatus {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
@ -390,3 +535,186 @@ pub struct SyncJobStatus {
|
|||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub status: JobScheduleStatus,
|
pub status: JobScheduleStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
|
||||||
|
/// call to prune a specific group, where `max-depth` makes no sense.
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"keep-last": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default, Updater)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Common pruning options
|
||||||
|
pub struct KeepOptions {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_last: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_hourly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_daily: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_weekly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_monthly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keep_yearly: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeepOptions {
|
||||||
|
pub fn keeps_something(&self) -> bool {
|
||||||
|
self.keep_last.unwrap_or(0)
|
||||||
|
+ self.keep_hourly.unwrap_or(0)
|
||||||
|
+ self.keep_daily.unwrap_or(0)
|
||||||
|
+ self.keep_weekly.unwrap_or(0)
|
||||||
|
+ self.keep_monthly.unwrap_or(0)
|
||||||
|
+ self.keep_yearly.unwrap_or(0)
|
||||||
|
> 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
keep: {
|
||||||
|
type: KeepOptions,
|
||||||
|
},
|
||||||
|
ns: {
|
||||||
|
type: BackupNamespace,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"max-depth": {
|
||||||
|
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default, Updater)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Common pruning options
|
||||||
|
pub struct PruneJobOptions {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub keep: KeepOptions,
|
||||||
|
|
||||||
|
/// The (optional) recursion depth
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub max_depth: Option<usize>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ns: Option<BackupNamespace>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PruneJobOptions {
|
||||||
|
pub fn keeps_something(&self) -> bool {
|
||||||
|
self.keep.keeps_something()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
|
||||||
|
match &self.ns {
|
||||||
|
Some(ns) => ns.acl_path(store),
|
||||||
|
None => vec!["datastore", store],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
disable: {
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
type: PruneJobOptions,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize, Updater)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Prune configuration.
|
||||||
|
pub struct PruneJobConfig {
|
||||||
|
/// unique ID to address this job
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
|
||||||
|
pub store: String,
|
||||||
|
|
||||||
|
/// Disable this job.
|
||||||
|
#[serde(default, skip_serializing_if = "is_false")]
|
||||||
|
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||||
|
pub disable: bool,
|
||||||
|
|
||||||
|
pub schedule: String,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub options: PruneJobOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PruneJobConfig {
|
||||||
|
pub fn acl_path(&self) -> Vec<&str> {
|
||||||
|
self.options.acl_path(&self.store)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_false(b: &bool) -> bool {
|
||||||
|
!b
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: PruneJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Status of prune job
|
||||||
|
pub struct PruneJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: PruneJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
}
|
||||||
|