Compare commits
591 Commits
Author | SHA1 | Date | |
---|---|---|---|
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 | |||
c1769a749c | |||
facd9801cf | |||
21302088de | |||
8268c9d161 | |||
b91b7d9ffd | |||
6e1f0c138f | |||
8567c0d29c | |||
d33d8f4e6a | |||
5b1cfa01f1 | |||
05d18b907a | |||
e44fe0c9f5 | |||
4cf0ced950 | |||
98425309b0 | |||
7b1e26699d | |||
676b0fde49 | |||
60f9a6ea8f | |||
1090fd4424 | |||
92c3fd2e22 | |||
e3efaa1972 | |||
0cf2b6441e | |||
d6d3b353be | |||
a67f7d0a07 | |||
c8137518fe | |||
cbef49bf4f | |||
0b99e5aebc | |||
29c55e5fc4 | |||
f386f512d0 | |||
3ddb14889a | |||
00c2327564 | |||
d79926795a | |||
c08fac4d69 | |||
c40440092d | |||
dc2ef2b54f | |||
b28253d650 | |||
f28cfb322a | |||
3bbe291c51 | |||
42d19fdf69 | |||
215968e033 | |||
eddd1a1b9c | |||
d2ce211899 | |||
1cb46c6f65 | |||
5d88c3a1c8 | |||
07fb504943 | |||
f675c5e978 | |||
4e37d9ce67 | |||
e303077132 | |||
6ef9bb59eb | |||
eeaa2c212b | |||
4a3adc3de8 | |||
abdb976340 | |||
3b62116ce6 | |||
e005f953d9 | |||
1c090810f5 | |||
e181d2f6da | |||
16021f6ab7 | |||
ba694720fc | |||
bde8e243cf | |||
3352ee5656 | |||
b29cbc414d | |||
026dc1d11f | |||
9438aca6c9 | |||
547f0c97e4 | |||
177a2de992 | |||
0686b1f4db | |||
0727e56a06 | |||
2fd3d57490 | |||
3f851d1321 | |||
1aef491e24 | |||
d0eccae37d | |||
a34154d900 | |||
c2cc32b4dd | |||
46405fa35d | |||
66af7f51bc | |||
c72ccd4e33 | |||
902b2cc278 | |||
8ecd7c9c21 | |||
7f17f7444a | |||
fb5a066500 | |||
d19c96d507 | |||
929a13b357 | |||
36c65ee0b0 | |||
3378fd9fe5 | |||
58c51cf3d9 | |||
5509b199fb | |||
bb59df9134 | |||
2564b0834f | |||
9321bbd1f5 | |||
4264e52220 | |||
6988b29bdc | |||
98c54240e6 | |||
d30c192589 | |||
67908b47fa | |||
ac7513e368 | |||
fbbcd85839 | |||
7a6b549270 | |||
0196b9bf5b | |||
739a51459a | |||
195d7c90ce | |||
6f3146c08c | |||
4b12879289 | |||
20b3094bcb | |||
df528ee6fa | |||
57e50fb906 | |||
3136792c95 | |||
3d571d5509 | |||
8e6e18b77c | |||
4d16badf6f | |||
a609cf210e | |||
1498659b4e | |||
4482f3fe11 | |||
5d85847f91 | |||
476b4acadc | |||
cf1bd08131 | |||
ec8f042459 | |||
431cc7b185 | |||
e693818afc | |||
3d68536fc2 | |||
26e78a2efb | |||
5444fa940b | |||
d4f2397d4c | |||
fab2413741 | |||
669c137fec | |||
fc6047fcb1 | |||
3014088684 | |||
144006fade | |||
b9cf6ee797 | |||
cdde66d277 | |||
239e49f927 | |||
ae66873ce9 | |||
bda48e04da | |||
ba97479848 | |||
6cad8ce4ce | |||
34020b929e | |||
33070956af | |||
da84cc52f4 | |||
9825748e5e | |||
2179359f40 | |||
9bb161c881 | |||
297e600730 | |||
ed7b3a7de2 | |||
0f358204bd | |||
ca6124d5fa | |||
7eacdc765b | |||
c443f58b09 | |||
ab1092392f | |||
1e3d9b103d | |||
386990ba09 | |||
bc853b028f | |||
d406de299b | |||
dfb31de8f0 | |||
7c3aa258f8 | |||
044055062c | |||
2b388026f8 | |||
707974fdb3 | |||
9069debcd8 | |||
fa2bdc1309 | |||
8e40aa63c1 | |||
d2522b2db6 | |||
ce8e3de401 | |||
7fa2779559 | |||
042afd6e52 | |||
ff30caeaf8 | |||
553cd12ba6 | |||
de1e1a9d95 | |||
91960d6162 | |||
4c24a48eb3 | |||
484e761dab | |||
059b7a252e | |||
1278aeec36 | |||
e53a4c4577 | |||
98ad58fbd2 | |||
98bb3b9016 | |||
eb80aac288 | |||
c26aad405f | |||
f03a0e509e | |||
4c1e8855cc | |||
85a9a5b68c | |||
f856e0774e | |||
43ba913977 | |||
a720894ff0 | |||
a95a3fb893 | |||
620911b426 | |||
5c264c8d80 | |||
8d78589969 | |||
eed8a5ad79 | |||
538b9c1c27 | |||
55919bf141 | |||
456ad0c478 | |||
c76c7f8303 | |||
c48aa39f3b | |||
2d32fe2c04 | |||
dc155e9bd7 | |||
4e14781aec | |||
a595f0fee0 | |||
add5861e8d | |||
1610c45a86 | |||
b2387eaa45 | |||
96d65fbcd0 | |||
7cc3473a4e | |||
4856a21836 | |||
a0153b02c9 | |||
04b0ca8b59 | |||
86e432b0b8 | |||
f0ed6a218c | |||
709584719d | |||
d43f86f3f3 | |||
997d7e19fc | |||
c67b1fa72f | |||
268687ddf0 | |||
426c1e353b | |||
2888b27f4c | |||
f5d00373f3 | |||
934f5bb8ac | |||
9857472211 | |||
013fa7bbcb | |||
a8d7033cb2 | |||
04ad7bc436 | |||
77ebbefc1a | |||
750252ba2f | |||
dc58194ebe | |||
c6887a8a4d | |||
090decbe76 | |||
c32186595e | |||
947f45252d | |||
c94e1f655e | |||
d80d1f9a2b | |||
6161ac18a4 | |||
6bba120d14 | |||
91e5bb49f5 | |||
547e3c2f6c | |||
4bf26be3bb | |||
25c550bc28 | |||
0146133b4b | |||
3eeba68785 | |||
f5056656b2 | |||
8c87743642 | |||
05d755b282 | |||
143b654550 | |||
97fab7aa11 | |||
ed216fd773 | |||
0f13623443 | |||
dbd959d43f | |||
f68ae22cc0 | |||
06c3dc8a8e | |||
a6fbbd03c8 | |||
26956d73a2 | |||
3f98b34705 | |||
40dc103103 | |||
12710fd3c3 | |||
9e2a4653b4 | |||
de4db62c57 | |||
1a0d3d11d2 | |||
8c03041a2c | |||
3fcc4b4e5c | |||
3ed07ed2cd | |||
75410d65ef | |||
83fd4b3b1b | |||
bfa0146c00 | |||
5dcdcea293 | |||
99f443c6ae | |||
4f966d0592 | |||
db0c228719 | |||
880fa939d1 | |||
052aaeb5e9 | |||
5f249127b2 | |||
8277f4ace5 | |||
9b1aa424b9 | |||
fef2b3e04c | |||
7cebe5a1f4 | |||
309ef20d6d | |||
d0833a70f7 | |||
dda246403c | |||
16e0dd65f1 | |||
c5a516918f | |||
cc275e8f93 | |||
d8dc281992 | |||
2c66a590c0 | |||
485841da2c | |||
e8f5810aa1 | |||
3e930f2bdc | |||
dd15c0aa3b | |||
c1b24fbf0b | |||
3f23b17298 | |||
c25c9d8dd1 | |||
84dc6adcc1 | |||
0c4344650d | |||
4f9513996c | |||
736edc7a7e | |||
2b55de407e | |||
a608806f65 | |||
8f0cec2642 | |||
0ed9a2b3ae | |||
58edd33d2b | |||
4fb05fde17 | |||
daca4f7888 | |||
4e6585839b | |||
8c981ae379 | |||
803ab12ad4 | |||
a4a3f7ca5e | |||
ba1c249eec | |||
a2f862eed6 | |||
eaeda365e0 | |||
6359dc891a | |||
07ad6470ca | |||
a6160cdfeb | |||
183125d576 | |||
a3016d6583 | |||
b29d046e89 | |||
380bd7df97 | |||
ea6f404e55 | |||
a35a211d9e | |||
53e14507c1 | |||
6fa39e53e0 | |||
a220a4564a | |||
6f652b1b3a | |||
b1d4edc769 | |||
b4900286ce | |||
c681885227 | |||
ee8b464466 | |||
51c63475e1 | |||
ce55db66d6 | |||
2882c881e9 | |||
c0ac207453 | |||
ee1458b61d | |||
0542cfdf4f | |||
12e3895399 | |||
11b6391c83 | |||
2072aeaee6 | |||
b05672579e | |||
5160c0e986 | |||
1ad9dd08f4 | |||
4d3369cb9a | |||
25829a879b | |||
872062ee9f | |||
67f7ffd0db | |||
0fafac2492 | |||
49ff10921c | |||
479e4932b5 | |||
dd7a7eae8f | |||
8545480a31 | |||
d6c28ddf84 | |||
42fdbe5112 | |||
a67b70c154 | |||
9c5c383bff | |||
7d4e362993 | |||
88acc86129 | |||
1d8ef0dcf7 | |||
522c0da0a0 | |||
16c75c580b | |||
07ce44a633 | |||
6c5024b050 | |||
b2c9c793ad | |||
79166b3935 | |||
e8d1da6a15 | |||
2e686e0a63 | |||
7a314d18f7 | |||
2d08c97ae2 | |||
50ce1f987d | |||
d1a5ffdf78 | |||
99baf7afcc | |||
fed270bf3f | |||
e05b637c73 | |||
2ee6b3fdb9 | |||
f3a96b2cdb | |||
a260c74a12 | |||
52c70f3f5e | |||
30f577248b | |||
00491c0230 | |||
2ebdbac1c4 | |||
b4a85a3fa8 | |||
f486e9e50e | |||
7f5a27d302 | |||
40a36bcc57 | |||
b61d344f01 | |||
65dab0266c | |||
525008f7ad | |||
5bef0f43da | |||
0f6bdbb01f | |||
a4ccb46176 | |||
80bf084876 | |||
db5672e83e | |||
86a5d56c4e | |||
3dd27a3bf8 | |||
3aedb73816 | |||
bab5d18c3d | |||
c2ffc68554 | |||
9651833130 | |||
7b22acd0c2 | |||
5751e49566 | |||
197de83ffa | |||
10effc9849 | |||
139f891087 | |||
99641a6bbb | |||
74f7240b8d | |||
a66d5898a1 | |||
db1e061dcb | |||
96feecd621 | |||
f9dcfa4149 | |||
25cf09065f | |||
fc598cdbe1 | |||
bca294a17c | |||
a02e8b1e95 | |||
26d29e0ec7 | |||
15d74eaaf4 | |||
8df51d4852 | |||
8f3b3cc1f9 | |||
17ec699d79 | |||
b080583ba8 | |||
32d83bb34c | |||
e325dbd4a3 | |||
ecb53af6d9 | |||
ed751dc2ab | |||
ca9dfe5fa4 | |||
720af9f69b | |||
f1490da82a | |||
74c08a5782 | |||
7f402dafb7 | |||
bd88dc4116 | |||
ebe556d0e7 | |||
f9e3b1104e | |||
bc0d03885c | |||
acb428cdec | |||
de1f8f1d36 | |||
b9f2f761bb | |||
30fb602578 | |||
0a00f6e01c | |||
30003baaa4 | |||
5211705ff1 | |||
ec67af9af3 | |||
8247db5b39 | |||
409f44247b | |||
dd335b77f5 | |||
6f6aa95abb | |||
54552dda59 | |||
21690bfaef | |||
1347b1152d | |||
d00e1a216f | |||
9c7fe29dfc | |||
14627d671a | |||
76227a6acd | |||
6830608855 | |||
26d9aebc28 | |||
1ca540a63b | |||
9094186a57 | |||
27a3decbfe | |||
9af76ef075 | |||
00ec8d1685 | |||
fd7c0979b4 | |||
c67bc9c35c | |||
3181f9b625 | |||
2eefd9aee1 | |||
8a6b86b8a7 | |||
96d9478668 | |||
10a9be45bd | |||
5f60a58fd5 | |||
659c3be3d5 | |||
5e4e88e83f | |||
339965d720 | |||
c38b4bb8b2 | |||
42fbe91a34 | |||
1d9a68c2fc | |||
02269f3dba | |||
d5ca9bd5df | |||
02e36d96ad | |||
2c18efd902 | |||
4cb6bd894c | |||
b1564af25a | |||
bf004ecd87 | |||
f1026a5aa9 | |||
3fce3bc36e | |||
f8e7ac686a | |||
c016482c7a | |||
27f2c23049 | |||
df6bb03d0e | |||
e2d940b949 | |||
0c226bc173 | |||
76cf5208cf | |||
2ea7bf1b3d | |||
8b57cd4441 | |||
68da20bf62 | |||
c357260d09 | |||
7e02d08cd0 | |||
ca0e534796 | |||
904e988667 | |||
3f129233be | |||
a9bb491e35 | |||
1ec7f8a0dd | |||
92310d585c | |||
f34d4401f7 | |||
6e695960ca | |||
365f0f720c | |||
a737179eb4 | |||
bb072ba49c | |||
ff329f970b | |||
f7d4e4b506 | |||
404d78c41e | |||
1bfc1efa50 | |||
73ce1d1146 | |||
70e5f2461d | |||
c0ef209aeb | |||
9f9f7eefa3 | |||
bb34b58910 | |||
5972def5ec | |||
aa90ced3bf | |||
ca257c8097 | |||
3fff55b293 | |||
4f66423fcc | |||
d4f020f4c5 | |||
d28ddb8e04 | |||
83b6a7cf71 | |||
e4681f9f71 | |||
b5037fa8ed | |||
9989d2c4e9 | |||
1cf7bbf412 | |||
68ed0c629d | |||
4b40148caa | |||
423e656163 | |||
1ce8a5d0b7 | |||
109d7817cd | |||
5354511fd0 | |||
bd098a7f77 | |||
8d048af2bf | |||
4f3db187cf | |||
9a328319dd | |||
7e3d2e5b41 | |||
9c06f6c292 | |||
9f4e47dd93 | |||
d83175dd69 | |||
68ccdf09a4 | |||
9765092ede | |||
ed3e60ae69 | |||
a83eab3c4d | |||
0815ec7e65 | |||
5c6cdf9815 | |||
9abcae1b0e | |||
b88f9c5b1e | |||
879546aff6 | |||
73b40e9b46 | |||
708db4b3ae | |||
685e13347e | |||
7d817b0358 | |||
579728c641 | |||
cf459b1982 | |||
d16122cd87 | |||
dda7015497 | |||
5b5ca60a07 | |||
aeee4329b0 | |||
5f44899207 | |||
b1127fd0d0 | |||
4299ca727c | |||
3383973532 | |||
555dfe7b8e | |||
e8f0ad19af | |||
a83ee10c49 | |||
9abc1166b0 | |||
99c287861e | |||
6650a242fb | |||
66b4593b04 | |||
0e7ab0567c | |||
10426c1750 |
25
Cargo.toml
25
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.1.3"
|
||||
version = "0.5.0"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -14,44 +14,53 @@ name = "proxmox_backup"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.10"
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
chrono = "0.4" # Date and time library for Rust
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
failure = "0.1"
|
||||
anyhow = "1.0"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.2", features = ["stream"] }
|
||||
handlebars = "3.0"
|
||||
http = "0.2"
|
||||
hyper = "0.13"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
native-tls = "0.2"
|
||||
nix = "0.16"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0-alpha"
|
||||
proxmox = { version = "0.1.18", features = [ "sortable-macro", "api-macro" ] }
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "5.0.5"
|
||||
rustyline = "6"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
siphasher = "0.3"
|
||||
syslog = "4.0"
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio-openssl = "0.4.0"
|
||||
tokio-util = { version = "0.2.0", features = [ "codec" ] }
|
||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.3"
|
||||
url = "2.1"
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
11
Makefile
11
Makefile
@ -37,10 +37,12 @@ CARGO ?= cargo
|
||||
COMPILED_BINS := \
|
||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||
|
||||
DEBS= ${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb ${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
|
||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||
|
||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
||||
|
||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||
|
||||
DESTDIR=
|
||||
@ -135,7 +137,8 @@ install: $(COMPILED_BINS)
|
||||
$(MAKE) -C docs install
|
||||
|
||||
.PHONY: upload
|
||||
upload: ${DEBS}
|
||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${DEBS} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
|
21
TODO.rst
21
TODO.rst
@ -1,22 +1,37 @@
|
||||
TODO list for Proxmox Backup
|
||||
============================
|
||||
|
||||
* user management api
|
||||
|
||||
* disk management api
|
||||
|
||||
* start writing server GUI
|
||||
|
||||
* improve catalog shell commands
|
||||
|
||||
* improve user documentation
|
||||
|
||||
|
||||
GUI
|
||||
===
|
||||
|
||||
* fix network/dns GUI (network/dns api changed)
|
||||
|
||||
* user/acl/permission management GUI
|
||||
|
||||
* implement GUI to configure remotes
|
||||
|
||||
* implement fancy DatastoreStatus.js dashboard
|
||||
|
||||
* implement PVE GUI to add PBS storage (with convenient copy/paste
|
||||
functionality, like we have for cluster join)
|
||||
|
||||
|
||||
|
||||
Chores:
|
||||
=======
|
||||
|
||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||
|
||||
* remove pbs-* systemd timers and services on package purge
|
||||
|
||||
|
||||
Suggestions
|
||||
===========
|
||||
|
79
debian/changelog
vendored
79
debian/changelog
vendored
@ -1,3 +1,82 @@
|
||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||
|
||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||
|
||||
* ui: allow to Forget (delete) backup snapshots
|
||||
|
||||
* pxar: deal with files changing size during archiving
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||
|
||||
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||
|
||||
* change api for incremental backups mode
|
||||
|
||||
* zfs disk management gui
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||
|
||||
* support incremental backups mode
|
||||
|
||||
* new disk management
|
||||
|
||||
* single file restore for container backups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jun 2020 10:12:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||
|
||||
* tools/systemd/time: fix compute_next_event for weekdays
|
||||
|
||||
* improve display of 'next run' for sync jobs
|
||||
|
||||
* fix csum calculation for images which do not have a 'chunk_size' aligned
|
||||
size
|
||||
|
||||
* add parser for zpool list output
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jun 2020 10:39:06 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
||||
|
||||
* proxmox-backup-client.rs: implement quiet flag
|
||||
|
||||
* client restore: don't add server file ending if already specified
|
||||
|
||||
* src/client/pull.rs: also download client.log.blob
|
||||
|
||||
* src/client/pull.rs: more verbose logging
|
||||
|
||||
* gui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jun 2020 10:37:12 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
|
||||
|
||||
* ui: move server RRD statistics to 'Server Status' panel
|
||||
|
||||
* ui/api: add more server statistics
|
||||
|
||||
* ui/api: add per-datastore usage and performance statistics over time
|
||||
|
||||
* ui: add initial remote config management panel
|
||||
|
||||
* remotes: save passwords as base64
|
||||
|
||||
* gather zpool io stats
|
||||
|
||||
* various fixes/improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 28 May 2020 17:39:33 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.0-1) unstable; urgency=medium
|
||||
|
||||
* see git changelog (too many changes)
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 25 May 2020 19:17:03 +0200
|
||||
|
||||
rust-proxmox-backup (0.1.3-1) unstable; urgency=medium
|
||||
|
||||
* use SectionConfig from proxmox 0.1.18-1
|
||||
|
3
debian/control.in
vendored
3
debian/control.in
vendored
@ -3,8 +3,9 @@ Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Description: Proxmox Backup Server daemon with tools and GUI
|
||||
|
29
debian/debcargo.toml
vendored
29
debian/debcargo.toml
vendored
@ -11,8 +11,31 @@ vcs_git = ""
|
||||
vcs_browser = ""
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
section = "admin"
|
||||
build_depends = [ "debhelper (>= 12~)", "bash-completion" ]
|
||||
build_depends_excludes = [ "debhelper (>=11)" ]
|
||||
build_depends = [
|
||||
"debhelper (>= 12~)",
|
||||
"bash-completion",
|
||||
"python3-docutils",
|
||||
"python3-pygments",
|
||||
"rsync",
|
||||
"fonts-dejavu-core <!nodoc>",
|
||||
"fonts-lato <!nodoc>",
|
||||
"fonts-open-sans <!nodoc>",
|
||||
"graphviz <!nodoc>",
|
||||
"latexmk <!nodoc>",
|
||||
"python3-sphinx <!nodoc>",
|
||||
"texlive-fonts-extra <!nodoc>",
|
||||
"texlive-fonts-recommended <!nodoc>",
|
||||
"texlive-xetex <!nodoc>",
|
||||
"xindy <!nodoc>",
|
||||
]
|
||||
build_depends_excludes = [
|
||||
"debhelper (>=11)",
|
||||
]
|
||||
|
||||
[packages.lib]
|
||||
depends = [ "libacl1-dev", "libsystemd-dev", "libfuse3-dev", "uuid-dev" ]
|
||||
depends = [
|
||||
"libacl1-dev",
|
||||
"libfuse3-dev",
|
||||
"libsystemd-dev",
|
||||
"uuid-dev",
|
||||
]
|
||||
|
28
debian/postinst
vendored
Normal file
28
debian/postinst
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=try-reload-or-restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
10
debian/prerm
vendored
Normal file
10
debian/prerm
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
||||
fi
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -5,6 +5,7 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
usr/sbin/proxmox-backup-manager
|
||||
usr/share/javascript/proxmox-backup/index.hbs
|
||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||
usr/share/javascript/proxmox-backup/images/logo-128.png
|
||||
usr/share/javascript/proxmox-backup/images/proxmox_logo.png
|
||||
|
6
debian/rules
vendored
6
debian/rules
vendored
@ -37,9 +37,9 @@ override_dh_auto_install:
|
||||
PROXY_USER=backup \
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit
|
||||
dh_installinit --name proxmox-backup-proxy
|
||||
override_dh_installsystemd:
|
||||
# note: we start/try-reload-restart services manually in postinst
|
||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||
|
||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||
# TODO: remove once available (Debian 11 ?)
|
||||
|
@ -73,10 +73,11 @@ html: ${GENERATED_SYNOPSIS}
|
||||
|
||||
.PHONY: latexpdf
|
||||
latexpdf: ${GENERATED_SYNOPSIS}
|
||||
@echo "Requires python3-sphinx, texlive-xetex, xindy and texlive-fonts-extra"
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
@echo "Running LaTeX files through xelatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
@echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
.PHONY: epub3
|
||||
epub3: ${GENERATED_SYNOPSIS}
|
||||
|
@ -3,6 +3,7 @@ Administration Guide
|
||||
|
||||
The administration guide.
|
||||
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
@ -12,7 +13,7 @@ Backup Content
|
||||
|
||||
When doing deduplication, there are different strategies to get
|
||||
optimal results in terms of performance and/or deduplication rates.
|
||||
Depending on the type of data, one can split data into fixed or variable
|
||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
||||
sized chunks.
|
||||
|
||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
||||
@ -21,7 +22,7 @@ backup virtual machine images.
|
||||
Variable sized chunking needs more CPU power, but is essential to get
|
||||
good deduplication rates for file archives.
|
||||
|
||||
Therefore, the backup server supports both strategies.
|
||||
The backup server supports both strategies.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
@ -29,9 +30,9 @@ File Archives: ``<name>.pxar``
|
||||
|
||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||
|
||||
A file archive stores a whole directory tree. Content is stored using
|
||||
A file archive stores a full directory tree. Content is stored using
|
||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
||||
is specially optimized to achieve good deduplication rates.
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
Image Archives: ``<name>.img``
|
||||
@ -44,8 +45,8 @@ data. Content is split into fixed sized chunks.
|
||||
Binary Data (BLOBs)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This type is used to store smaller (< 16MB) binaries like
|
||||
configuration data. Larger files should be stored as image archive.
|
||||
This type is used to store smaller (< 16MB) binary data such as
|
||||
configuration files. Larger files should be stored as image archive.
|
||||
|
||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||
file archive to store whole directory trees.
|
||||
@ -54,15 +55,15 @@ configuration data. Larger files should be stored as image archive.
|
||||
Catalog File: ``catalog.pcat1``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The catalog file is basically an index for file archive. It contains
|
||||
the list of files, and is used to speedup search operations.
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed-up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The manifest contains the list of all backup files, including
|
||||
file sizes and checksums. It is used to verify the consistency of a
|
||||
The manifest contains the list of all backup files, their
|
||||
sizes and checksums. It is used to verify the consistency of a
|
||||
backup.
|
||||
|
||||
|
||||
@ -73,17 +74,17 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
contains the virtual machine configuration and an image archive
|
||||
contains the virtual machine's configuration and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Contains the container
|
||||
This type is used for :term:`container`\ s. Contains the container's
|
||||
configuration and a single file archive for the container content.
|
||||
|
||||
``host``
|
||||
This type is used for physical host, or if you want to run backups
|
||||
manually from inside virtual machines or containers. Such backups
|
||||
may contain file and image archives (no restrictions here).
|
||||
This type is used for backups created from within the backed up machine.
|
||||
Typically this would be a physical host but could also be a virtual machine
|
||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
||||
|
||||
|
||||
Backup ID
|
||||
@ -102,14 +103,14 @@ The time when the backup was made.
|
||||
Backup Group
|
||||
~~~~~~~~~~~~
|
||||
|
||||
We call the tuple ``<type>/<ID>`` a backup group. Such group
|
||||
may contains one or more backup snapshots.
|
||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||
may contain one or more backup snapshots.
|
||||
|
||||
|
||||
Backup Snapshot
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
We call the triplet ``<type>/<ID>/<time>`` a backup snapshot. It
|
||||
The triplet ``<type>/<ID>/<time>`` is called a backup snapshot. It
|
||||
uniquely identifies a specific backup within a datastore.
|
||||
|
||||
.. code-block:: console
|
||||
@ -118,25 +119,25 @@ uniquely identifies a specific backup within a datastore.
|
||||
vm/104/2019-10-09T08:01:06Z
|
||||
host/elsa/2019-11-08T09:48:14Z
|
||||
|
||||
As you can see, the time is formatted as RFC3399_ using Coordinated
|
||||
As you can see, the time format is RFC3399_ with Coordinated
|
||||
Universal Time (UTC_, identified by the trailing *Z*).
|
||||
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
A datastore is a place to store backups. The current implementation
|
||||
A datastore is a place where backups are stored. The current implementation
|
||||
uses a directory inside a standard unix file system (``ext4``, ``xfs``
|
||||
or ``zfs``) to store backup data.
|
||||
or ``zfs``) to store the backup data.
|
||||
|
||||
Datastores are identified by a simple *ID*. You can configure that
|
||||
Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the server is called
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
@ -144,7 +145,9 @@ Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :term:`datastore` is a place to store backups. You can configure
|
||||
several datastores, but you need at least one of them. The datastore is identified by a simple `name` and point to a directory.
|
||||
multiple datastores. At least one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points
|
||||
to a directory.
|
||||
|
||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
@ -152,20 +155,24 @@ The following command creates a new datastore called ``store1`` on :file:`/backu
|
||||
|
||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||
|
||||
To list existing datastores use:
|
||||
To list existing datastores run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore list
|
||||
store1 /backup/disk1/store1
|
||||
┌────────┬──────────────────────┬─────────────────────────────┐
|
||||
│ name │ path │ comment │
|
||||
╞════════╪══════════════════════╪═════════════════════════════╡
|
||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||
└────────┴──────────────────────┴─────────────────────────────┘
|
||||
|
||||
Finally, it is also possible to remove the datastore configuration:
|
||||
Finally, it is possible to remove the datastore configuration:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore remove store1
|
||||
|
||||
.. note:: Above command removes the datastore configuration. It does
|
||||
.. note:: The above command removes only the datastore configuration. It does
|
||||
not delete any data from the underlying directory.
|
||||
|
||||
|
||||
@ -175,6 +182,126 @@ File Layout
|
||||
.. todo:: Add datastore file layout example
|
||||
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup support several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users needs to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
|
||||
After installation, there is a single user ``root@pam``, which
|
||||
corresponds to the Unix superuser. You can use the
|
||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||
users:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌─────────────┬────────┬────────┬───────────┬──────────┬────────────────┬────────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞═════════════╪════════╪════════╪═══════════╪══════════╪════════════════╪════════════════════╡
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
|
||||
|
||||
The superuser has full administration rights on everything, so you
|
||||
normally want to add other users with less privileges:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
|
||||
The create command lets you specify many option like ``--email`` or
|
||||
``--password``, but you can update or change any of them using the
|
||||
update command later:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
|
||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||
|
||||
|
||||
The resulting use list looks like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌──────────┬────────┬────────┬───────────┬──────────┬──────────────────┬──────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞══════════╪════════╪════════╪═══════════╪══════════╪══════════════════╪══════════════════╡
|
||||
│ john@pbs │ 1 │ │ John │ Smith │ john@example.com │ An example user. │
|
||||
├──────────┼────────┼────────┼───────────┼──────────┼──────────────────┼──────────────────┤
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have an permissions. Please read the next
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the users with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user remove john@pbs
|
||||
|
||||
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Users do not have any permission by default. Instead you need to
|
||||
specify what is allowed and what not. You can do this by assigning
|
||||
roles to users on specific objects like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**Admin**
|
||||
The Administrator can do anything.
|
||||
|
||||
**Audit**
|
||||
An Auditor can view things, but is not allowed to change settings.
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
|
||||
**DatastoreAudit**
|
||||
Can view datastore settings and list content. But
|
||||
is not allowed to read the actual data.
|
||||
|
||||
**DataStoreReader**
|
||||
Can Inspect datastore content and can do restores.
|
||||
|
||||
**DataStoreBackup**
|
||||
Can backup and restore owned backups.
|
||||
|
||||
**DatastorePowerUser**
|
||||
Can backup, restore, and prune owned backups.
|
||||
|
||||
**RemoteAdmin**
|
||||
Can do anything on remotes.
|
||||
|
||||
**RemoteAudit**
|
||||
Can view remote settings.
|
||||
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
|
||||
|
||||
Backup Client usage
|
||||
-------------------
|
||||
|
||||
@ -184,16 +311,16 @@ The command line client is called :command:`proxmox-backup-client`.
|
||||
Respository Locations
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The client uses a special repository notation to specify a datastore
|
||||
The client uses the following notation to specify a datastore repository
|
||||
on the backup server.
|
||||
|
||||
[[username@]server:]datastore
|
||||
|
||||
If you do not specify a ``username`` the default is ``root@pam``. The
|
||||
default for server is to use the local host (``localhost``).
|
||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
||||
default is the local host (``localhost``).
|
||||
|
||||
You can pass the repository by setting the ``--repository`` command
|
||||
line options, or by setting the ``PBS_REPOSITORY`` environment
|
||||
You can pass the repository with the ``--repository`` command
|
||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||
variable.
|
||||
|
||||
|
||||
@ -219,8 +346,8 @@ Environment Variables
|
||||
Output Format
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Most commands support the ``--output-format`` parameter, which can be
|
||||
set to the following values:
|
||||
Most commands support the ``--output-format`` parameter. It accepts
|
||||
the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
@ -240,9 +367,9 @@ Please use the following environment variables to modify output behavior:
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, but
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format for such purpose because it is machine readable.
|
||||
format if you need to process the output.
|
||||
|
||||
|
||||
.. _creating-backups:
|
||||
@ -250,15 +377,15 @@ Please use the following environment variables to modify output behavior:
|
||||
Creating Backups
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This section explains how to create backup on physical host, or from
|
||||
inside virtual machines or containers. Such backups may contain file
|
||||
and image archives (no restrictions here).
|
||||
This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers see :ref:`pve-integration`.
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
||||
|
||||
The prerequisite is that you have already set up (or can access) a
|
||||
backup server. It is assumed that you know the repository name and
|
||||
credentials. In the following examples we simply use ``backup-server:store1``.
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
In the following examples we use ``backup-server:store1``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -275,15 +402,15 @@ credentials. In the following examples we simply use ``backup-server:store1``.
|
||||
This will prompt you for a password and then uploads a file archive named
|
||||
``root.pxar`` containing all the files in the ``/`` directory.
|
||||
|
||||
.. Caution:: Please note that proxmox-backup-client does not
|
||||
.. Caution:: Please note that the proxmox-backup-client does not
|
||||
automatically include mount points. Instead, you will see a short
|
||||
``skip mount point`` notice for each of them. The idea is that you
|
||||
create a separate file archive for each mounted disk. You can also
|
||||
``skip mount point`` notice for each of them. The idea is to
|
||||
create a separate file archive for each mounted disk. You can
|
||||
explicitly include them using the ``--include-dev`` option
|
||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||
multiple times, once for each mount point you want to include.
|
||||
multiple times for each mount point that should be included.
|
||||
|
||||
The ``--repository`` option is sometimes quite long and is used by all
|
||||
The ``--repository`` option can get quite long and is used by all
|
||||
commands. You can avoid having to enter this value by setting the
|
||||
environment variable ``PBS_REPOSITORY``.
|
||||
|
||||
@ -291,26 +418,26 @@ environment variable ``PBS_REPOSITORY``.
|
||||
|
||||
# export PBS_REPOSTORY=backup-server:store1
|
||||
|
||||
You can then execute all commands without specifying the ``--repository``
|
||||
After this you can execute all commands without specifying the ``--repository``
|
||||
option.
|
||||
|
||||
One single backup is allowed to contain more than one archive. For example, assume you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
|
||||
One single backup is allowed to contain more than one archive. For example, if
|
||||
you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2
|
||||
|
||||
This create a backup of both disks.
|
||||
This creates a backup of both disks.
|
||||
|
||||
The backup command takes a list of backup specifications, which
|
||||
include archive name on the server, the type of the archive, and the
|
||||
archive source at the client. The format is quite simple to understand:
|
||||
include the archive name on the server, the type of the archive, and the
|
||||
archive source at the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
||||
device images. Thus it is quite easy to create a backup for a block
|
||||
device:
|
||||
device images. To create a backup of a block device run the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -320,42 +447,43 @@ Excluding files/folders from a backup
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||
Using the proxmox backup client this is possible via simple text based
|
||||
``.pxarexclude`` files placed in the filesystem hierarchy.
|
||||
To tell the Proxmox backup client when and how to ignore files and directories,
|
||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||
Whenever the backup client encounters such a file in a directory, it interprets
|
||||
each line as glob match patterns for files and directories that are to be excluded
|
||||
from the backup.
|
||||
|
||||
Whenever such a file is encountered in a directory, the backup client reads its
|
||||
content and lines are interpreted as glob match patterns for files/directories
|
||||
to exclude from the archive.
|
||||
The file must contain a single glob pattern on each line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, indicating a line containing comments.
|
||||
Lines starting with ``!`` correspond to glob match patterns for explicit inclusion
|
||||
of files previously excluded by a match. This allows for example to exclude
|
||||
all entries in a directory except for a few single files.
|
||||
Lines ending in ``/`` match directory entries only.
|
||||
The folder containing the ``.pxarexclude`` file is considered to be the root of
|
||||
the given patterns. It is only possible to match files in this or below this folder.
|
||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, which indicates a comment.
|
||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||
directory except for a few single files/subdirectories.
|
||||
Lines ending in ``/`` match only on directories.
|
||||
The directory containing the ``.pxarexclude`` file is considered to be the root of
|
||||
the given patterns. It is only possible to match files in this directory and its subdirectories.
|
||||
|
||||
``\`` is used to escape glob characters. ``?`` matches any single character,
|
||||
``*`` matches any character including the empty string.
|
||||
``**`` is used to match also subdirectories and can be used to exclude for example
|
||||
all files ending in ``.tmp`` within the directory or a subdirectory by the
|
||||
``\`` is used to escape special glob characters.
|
||||
``?`` matches any single character.
|
||||
``*`` matches any character, including an empty string.
|
||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||
following pattern ``**/*.tmp``.
|
||||
``[...]`` matches a single character from any of the provided characters within
|
||||
the brackets. ``[!...]`` does the complementary and matches any singe character
|
||||
not contained within the brackets. It is also possible to specify ranges by two
|
||||
characters separated by ``-``. For example ``[a-z]`` matches any lowercase
|
||||
alphabetic character, ``[0-9]`` matches any one single digit.
|
||||
not contained within the brackets. It is also possible to specify ranges with two
|
||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||
|
||||
The order of the glob match patterns defines if the file is finally included or
|
||||
The order of the glob match patterns defines if a file is included or
|
||||
excluded, later entries win over previous ones.
|
||||
This is also true for match patterns encountered deeper down the directory tree,
|
||||
which may then override a previous exclusion.
|
||||
Note however that folders marked for exclusion are not read by the client,
|
||||
so ``.pxarexclude`` files contained within have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and are also included in the
|
||||
which can override a previous exclusion.
|
||||
Be aware that excluded directories will **not** be read by the backup client.
|
||||
A ``.pxarexclude`` file in a subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||
backup archive.
|
||||
|
||||
For example, consider the following folder structure:
|
||||
For example, consider the following directory structure:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -369,7 +497,7 @@ For example, consider the following folder structure:
|
||||
folder/subfolder1:
|
||||
. .. file0 file1 file2 file3
|
||||
|
||||
The ``.pxarexclude`` files containing the following:
|
||||
The different ``.pxarexclude`` files contain the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -386,7 +514,7 @@ The ``.pxarexclude`` files containing the following:
|
||||
This would exclude ``file1`` and ``file3`` in ``subfolder0`` and all of
|
||||
``subfolder1`` except ``file2``.
|
||||
|
||||
Restoring this archive form backup results in:
|
||||
Restoring this backup will result in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -403,8 +531,8 @@ Restoring this archive form backup results in:
|
||||
Encryption
|
||||
^^^^^^^^^^
|
||||
|
||||
Proxmox backup support client side encryption using AES-256 in GCM_
|
||||
mode. You first need to create an encryption key in order to use that:
|
||||
Proxmox backup supports client side encryption with AES-256 in GCM_
|
||||
mode. First you need to create an encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -416,7 +544,7 @@ extra protection, you can also create it without a password:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
@ -427,7 +555,7 @@ extra protection, you can also create it without a password:
|
||||
...
|
||||
|
||||
|
||||
You can avoid having to enter the passwords by setting the environment
|
||||
You can avoid entering the passwords by setting the environment
|
||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
|
||||
.. todo:: Explain master-key
|
||||
@ -437,22 +565,26 @@ Restoring Data
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The regular creation of backups is a necessary step to avoid data
|
||||
loss. More important, however, is the restoration. Be sure to perform
|
||||
periodic recovery tests to ensure that you can access your data in
|
||||
loss. More important, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in
|
||||
case of problems.
|
||||
|
||||
First, you need to find the snapshot you want to restore. The snapshot
|
||||
command gives you a list of all snapshots on the server:
|
||||
First, you need to find the snapshot which you want to restore. The snapshot
|
||||
command gives a list of all snapshots on the server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client snapshots
|
||||
...
|
||||
host/elsa/2019-12-03T09:30:15Z | 51788646825 | root.pxar catalog.pcat1 index.json
|
||||
host/elsa/2019-12-03T09:35:01Z | 51790622048 | root.pxar catalog.pcat1 index.json
|
||||
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
||||
│ snapshot │ size │ files │
|
||||
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
||||
│ host/elsa/2019-12-03T09:30:15Z │ 51788646825 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 51790622048 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
...
|
||||
|
||||
You can also inspect the catalog to find specific files.
|
||||
You can inspect the catalog to find specific files.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -470,9 +602,8 @@ backup.
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||
|
||||
You can instead simply download the contents of any archive using '-'
|
||||
instead of ``/target/path``. This dumps the content to standard
|
||||
output:
|
||||
To get the contents of any archive you can restore the ``ìndex.json`` file in the
|
||||
repository and restore it to '-'. This will dump the content to the standard output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -494,20 +625,18 @@ to use the interactive recovery shell.
|
||||
...
|
||||
|
||||
The interactive recovery shell is a minimalistic command line interface that
|
||||
utilizes the metadata stored in the catalog for you to quickly list, navigate and
|
||||
search files contained within a file archive.
|
||||
You can select individual files as well as select files matched by a glob pattern
|
||||
for restore.
|
||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||
search files in a file archive.
|
||||
To restore files, you can select them individually or match them with a glob
|
||||
pattern.
|
||||
|
||||
The use of the catalog for navigation reduces the overhead otherwise caused by
|
||||
network traffic and decryption, as instead of downloading and decrypting
|
||||
individual encrypted chunks from the chunk-store to access the metadata, we only
|
||||
need to download and decrypt the catalog.
|
||||
Using the catalog for navigation reduces the overhead considerably because only
|
||||
the catalog needs to be downloaded and, optionally, decrypted.
|
||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
||||
or for the actual restore.
|
||||
|
||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
||||
working directory and list directory contents of the archive.
|
||||
working directory and list directory contents in the archive.
|
||||
``pwd`` shows the full path of the current working directory with respect to the
|
||||
archive root.
|
||||
|
||||
@ -567,7 +696,7 @@ This allows you to access the full content of the archive in a seamless manner.
|
||||
load on your host, depending on the operations you perform on the mounted
|
||||
filesystem.
|
||||
|
||||
To unmount the filesystem simply use the ``umount`` command on the mountpoint:
|
||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -579,7 +708,7 @@ Login and Logout
|
||||
The client tool prompts you to enter the logon password as soon as you
|
||||
want to access the backup server. The server checks your credentials
|
||||
and responds with a ticket that is valid for two hours. The client
|
||||
tool automatically stores that ticket and use it for further requests
|
||||
tool automatically stores that ticket and uses it for further requests
|
||||
to this server.
|
||||
|
||||
You can also manually trigger this login/logout using the login and
|
||||
@ -590,7 +719,7 @@ logout commands:
|
||||
# proxmox-backup-client login
|
||||
Password: **********
|
||||
|
||||
To remove the ticket, simply issue a logout:
|
||||
To remove the ticket, issue a logout:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -608,76 +737,78 @@ command:
|
||||
# proxmox-backup-client forget <snapshot>
|
||||
|
||||
|
||||
.. caution:: This command removes all the archives in this backup
|
||||
snapshot so that they are inaccessible and unrecoverable.
|
||||
.. caution:: This command removes all archives in this backup
|
||||
snapshot. They will be inaccessible and unrecoverable.
|
||||
|
||||
|
||||
Such manual removal is sometimes required, but normally the prune
|
||||
The manual removal is sometimes required, but normally the prune
|
||||
command is used to systematically delete older backups. Prune lets
|
||||
you specify which backup snapshots you want to keep. There are the
|
||||
following retention options:
|
||||
you specify which backup snapshots you want to keep. The
|
||||
following retention options are available:
|
||||
|
||||
``--keep-last <N>``
|
||||
Keep the last ``<N>`` backup snapshots.
|
||||
|
||||
``--keep-hourly <N>``
|
||||
Keep backups for the last ``<N>`` different hours. If there is more than one
|
||||
backup for a single hour, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||
backup for a single hour, only the latest is kept.
|
||||
|
||||
``--keep-daily <N>``
|
||||
Keep backups for the last ``<N>`` different days. If there is more than one
|
||||
backup for a single day, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` days. If there is more than one
|
||||
backup for a single day, only the latest is kept.
|
||||
|
||||
``--keep-weekly <N>``
|
||||
Keep backups for the last ``<N>`` different weeks. If there is more than one
|
||||
backup for a single week, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||
backup for a single week, only the latest is kept.
|
||||
|
||||
.. note:: The weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and correctly handles weeks at
|
||||
the end of the year.
|
||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and handles weeks at
|
||||
the end of the year correctly.
|
||||
|
||||
``--keep-monthly <N>``
|
||||
Keep backups for the last ``<N>`` different months. If there is more than one
|
||||
backup for a single month, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` months. If there is more than one
|
||||
backup for a single month, only the latest is kept.
|
||||
|
||||
``--keep-yearly <N>``
|
||||
Keep backups for the last ``<N>`` different years. If there is more than one
|
||||
backup for a single year, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` years. If there is more than one
|
||||
backup for a single year, only the latest is kept.
|
||||
|
||||
The retention options are processed in the order given above. Each option
|
||||
only covers backups within its time period. The next option does not take care
|
||||
of already covered backups. It will only consider older backups.
|
||||
|
||||
Those retention options are processed in the order given above. Each
|
||||
option covers a specific period of time. We say that backups within
|
||||
this period are covered by this option. The next option does not take
|
||||
care of already covered backups and only considers older backups.
|
||||
|
||||
The prune command also looks for unfinished and incomplete backups and
|
||||
removes them unless they are newer than the last successful backup. In
|
||||
this case, the last failed backup is retained.
|
||||
Unfinished and incomplete backups will be removed by the prune command unless
|
||||
they are newer than the last successful backup. In this case, the last failed
|
||||
backup is retained.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune <group> --keep-daily 7 --keep-weekly 4 --keep-monthly 3
|
||||
|
||||
|
||||
You can use the ``--dry-run`` option to test your settings. This just
|
||||
shows the list of existing snapshots and what action prune would take
|
||||
on that.
|
||||
You can use the ``--dry-run`` option to test your settings. This only
|
||||
shows the list of existing snapshots and which action prune would take.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune host/elsa --dry-run --keep-daily 1 --keep-weekly 3
|
||||
retention options: --keep-daily 1 --keep-weekly 3
|
||||
Testing prune on store "store2" group "host/elsa"
|
||||
host/elsa/2019-12-04T13:20:37Z keep
|
||||
host/elsa/2019-12-03T09:35:01Z remove
|
||||
host/elsa/2019-11-22T11:54:47Z keep
|
||||
host/elsa/2019-11-21T12:36:25Z remove
|
||||
host/elsa/2019-11-10T10:42:20Z keep
|
||||
|
||||
┌────────────────────────────────┬──────┐
|
||||
│ snapshot │ keep │
|
||||
╞════════════════════════════════╪══════╡
|
||||
│ host/elsa/2019-12-04T13:20:37Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-22T11:54:47Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-21T12:36:25Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-10T10:42:20Z │ 1 │
|
||||
└────────────────────────────────┴──────┘
|
||||
|
||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||
in the chunk-store. The chunk-store still contains the data blocks
|
||||
unless you are performing :ref:`garbage-collection`.
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`garbage-collection`.
|
||||
|
||||
|
||||
.. _garbage-collection:
|
||||
@ -687,8 +818,7 @@ Garbage Collection
|
||||
|
||||
The ``prune`` command removes only the backup index files, not the data
|
||||
from the data store. This task is left to the garbage collection
|
||||
command. It is therefore recommended to carry out garbage collection
|
||||
regularly.
|
||||
command. It is recommended to carry out garbage collection on a regular basis.
|
||||
|
||||
The garbage collection works in two phases. In the first phase, all
|
||||
data blocks that are still in use are marked. In the second phase,
|
||||
@ -727,6 +857,41 @@ unused data blocks are removed.
|
||||
`Proxmox VE`_ integration
|
||||
-------------------------
|
||||
|
||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||
node. The following example uses ``store2`` as storage name, and
|
||||
assumes the server address is ``localhost``, and you want to connect
|
||||
as ``user1@pbs``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm add pbs store2 --server localhost --datastore store2
|
||||
# pvesm set store2 --username user1@pbs --password <secret>
|
||||
|
||||
If your backup server uses a self signed certificate, you need to add
|
||||
the certificate fingerprint to the configuration. You can get the
|
||||
fingerprint by running the following command on the backup server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Please add that fingerprint to your configuration to establish a trust
|
||||
relationship:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
After that you should be able to see storage status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm status --storage store2
|
||||
Name Type Status Total Used Available %
|
||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||
|
||||
|
||||
|
||||
.. include:: command-line-tools.rst
|
||||
|
||||
|
33
docs/conf.py
33
docs/conf.py
@ -21,6 +21,21 @@
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- Implement custom formatter for code-blocks ---------------------------
|
||||
#
|
||||
# * use smaller font
|
||||
# * avoid space between lines to nicely format utf8 tables
|
||||
|
||||
from sphinx.highlighting import PygmentsBridge
|
||||
from pygments.formatters.latex import LatexFormatter
|
||||
|
||||
class CustomLatexFormatter(LatexFormatter):
|
||||
def __init__(self, **options):
|
||||
super(CustomLatexFormatter, self).__init__(**options)
|
||||
self.verboptions = r"formatcom=\footnotesize\relax\let\strut\empty"
|
||||
|
||||
PygmentsBridge.latex_formatter = CustomLatexFormatter
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
@ -53,7 +68,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019, Proxmox Support Team'
|
||||
copyright = '2019-2020, Proxmox Support Team'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -61,9 +76,9 @@ author = 'Proxmox Support Team'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '1.0'
|
||||
version = '0.2'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '1.0-1'
|
||||
release = '0.2-1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -251,14 +266,24 @@ htmlhelp_basename = 'ProxmoxBackupdoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_engine = 'xelatex'
|
||||
|
||||
latex_elements = {
|
||||
'fontenc': '\\usepackage{fontspec}',
|
||||
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
'papersize': 'a4paper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
'pointsize': '12pt',
|
||||
'pointsize': '10pt',
|
||||
|
||||
'fontpkg': r'''
|
||||
\setmainfont{Open Sans}
|
||||
\setsansfont{Lato}
|
||||
\setmonofont{DejaVu Sans Mono}
|
||||
''',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
|
@ -5,24 +5,23 @@ Glossary
|
||||
|
||||
`Virtual machine`_
|
||||
|
||||
A Virtual machine is a program that can execute an entire
|
||||
operatin system inside an emulated hardware environment.
|
||||
A virtual machine is a program that can execute an entire
|
||||
operating system inside an emulated hardware environment.
|
||||
|
||||
`Container`_
|
||||
|
||||
A Container is an isolated user space. Programs runs directly on
|
||||
the hosts kernel, but with limited access to the host resources.
|
||||
A container is an isolated user space. Programs run directly on
|
||||
the host's kernel, but with limited access to the host resources.
|
||||
|
||||
Datastore
|
||||
|
||||
A place to store backups. The current implemenation is
|
||||
file-system based, so this refers to a directory containing the
|
||||
backup data.
|
||||
A place to store backups. A directory which contains the backup data.
|
||||
The current implemenation is file-system based.
|
||||
|
||||
`Rust`_
|
||||
|
||||
Rust is a new, fast and memory-efficient system programming
|
||||
language, with no runtime or garbage collector. Rust’s rich type
|
||||
language. It has no runtime or garbage collector. Rust’s rich type
|
||||
system and ownership model guarantee memory-safety and
|
||||
thread-safety. I can eliminate many classes of bugs
|
||||
at compile-time.
|
||||
@ -31,11 +30,9 @@ Glossary
|
||||
|
||||
Is a tool that makes it easy to create intelligent and
|
||||
beautiful documentation. It was originally created for the
|
||||
Python documentation, and it has excellent facilities for the
|
||||
documentation of the Python programming language. It has excellent facilities for the
|
||||
documentation of software projects in a range of languages.
|
||||
|
||||
|
||||
|
||||
`reStructuredText`_
|
||||
|
||||
Is an easy-to-read, what-you-see-is-what-you-get plaintext
|
||||
@ -44,8 +41,8 @@ Glossary
|
||||
`FUSE`
|
||||
|
||||
Filesystem in Userspace (`FUSE <https://en.wikipedia.org/wiki/Filesystem_in_Userspace>`_)
|
||||
defines an interface which allows to implement a filesystem in
|
||||
defines an interface which makes it possible to implement a filesystem in
|
||||
userspace as opposed to implementing it in the kernel. The fuse
|
||||
kernel driver handles filesystem requests and sends them to an
|
||||
userspace application for reply.
|
||||
kernel driver handles filesystem requests and sends them to a
|
||||
userspace application.
|
||||
|
||||
|
@ -1,55 +1,50 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
`Proxmox Backup`_ is split into a server part and a client part. The
|
||||
server part comes with it's own graphical installer, but we also
|
||||
ship Debian_ package repositories, so you can easily install those
|
||||
packages on any Debian_ based system.
|
||||
`Proxmox Backup`_ is split into a server and client part. The server part
|
||||
can either be installed with a graphical installer or on top of
|
||||
Debian_ from the provided package repository.
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
|
||||
|
||||
Server installation
|
||||
-------------------
|
||||
|
||||
The backup server stores the actual backup data, but also provides a
|
||||
web based GUI for various management tasks, for example disk
|
||||
management.
|
||||
The backup server stores the actual backed up data and provides a web based GUI
|
||||
for various management tasks such as disk management.
|
||||
|
||||
.. note:: You always need a backup server. It is not possible to use
|
||||
`Proxmox Backup`_ without the server part.
|
||||
|
||||
The server is based on Debian, therefore the disk image (ISO file) provided
|
||||
by us includes a complete Debian system ("buster" for version 1.x) as
|
||||
well as all necessary backup packages.
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||
|
||||
Using the installer will guide you through the setup, allowing
|
||||
The installer will guide you through the setup process and allows
|
||||
you to partition the local disk(s), apply basic system configurations
|
||||
(e.g. timezone, language, network) and install all required packages.
|
||||
Using the provided ISO will get you started in just a few minutes,
|
||||
that's why we recommend this method for new and existing users.
|
||||
(e.g. timezone, language, network), and installs all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
|
||||
Alternatively, `Proxmox Backup`_ server can be installed on top of an
|
||||
existing Debian system.
|
||||
|
||||
Using the `Proxmox Backup`_ Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Install `Proxmox Backup`_ with the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can download the ISO from |DOWNLOADS|.
|
||||
Download the ISO from |DOWNLOADS|.
|
||||
It includes the following:
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
||||
system.
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS and installs the operating
|
||||
system.
|
||||
|
||||
* Our Linux kernel with ZFS support.
|
||||
|
||||
* Complete toolset for administering backups and all necessary
|
||||
resources
|
||||
* Complete tool-set to administer backups and all necessary resources
|
||||
|
||||
* Web based management interface for using the toolset
|
||||
* Web based GUI management interface
|
||||
|
||||
.. note:: During the installation process, the complete server
|
||||
is used by default and all existing data is removed.
|
||||
@ -58,8 +53,8 @@ It includes the following:
|
||||
Install `Proxmox Backup`_ server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages, so you can install it on
|
||||
top of a standard Debian installation. After configuring the
|
||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||
standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
@ -67,7 +62,7 @@ top of a standard Debian installation. After configuring the
|
||||
# apt-get update
|
||||
# apt-get install proxmox-backup-server
|
||||
|
||||
Above code keeps the current (Debian) kernel and installs a minimal
|
||||
The commands above keep the current (Debian) kernel and install a minimal
|
||||
set of required packages.
|
||||
|
||||
If you want to install the same set of packages as the installer
|
||||
@ -78,16 +73,15 @@ does, please use the following:
|
||||
# apt-get update
|
||||
# apt-get install proxmox-backup
|
||||
|
||||
This installs all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of commonly useful packages.
|
||||
This will install all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of common and useful packages.
|
||||
|
||||
Installing on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that you have correctly installed the base system, and you
|
||||
know how you want to configure and use the local storage. Network
|
||||
configuration is also completely up to you.
|
||||
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that the base system and local storage has been set up correctly.
|
||||
|
||||
In general, this is not trivial, especially when you use LVM_ or
|
||||
ZFS_.
|
||||
In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
||||
|
||||
The network configuration is completely up to you as well.
|
||||
|
||||
Install Proxmox Backup server on `Proxmox VE`_
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -101,9 +95,9 @@ After configuring the
|
||||
# apt-get install proxmox-backup-server
|
||||
|
||||
.. caution:: Installing the backup server directly on the hypervisor
|
||||
is not recommended. It is more secure to use a separate physical
|
||||
server to store backups. If the hypervisor server fails, you can
|
||||
still access your backups.
|
||||
is not recommended. It is safer to use a separate physical
|
||||
server to store backups. Should the hypervisor server fail, you can
|
||||
still access the backups.
|
||||
|
||||
Client installation
|
||||
-------------------
|
||||
@ -111,8 +105,8 @@ Client installation
|
||||
Install `Proxmox Backup`_ client on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages, so you can install it on
|
||||
top of a standard Debian installation. After configuring the
|
||||
Proxmox ships as a set of Debian packages to be installed on
|
||||
top of a standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
|
@ -1,14 +1,14 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
This documentationm is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
||||
|
||||
|
||||
What is Proxmox Backup
|
||||
----------------------
|
||||
|
||||
Proxmox Backup is an enterprise class client-server backup software,
|
||||
specially optimized for `Proxmox Virtual Environment`_ to backup
|
||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
||||
possible to backup physical hosts.
|
||||
|
||||
@ -24,23 +24,23 @@ Architecture
|
||||
------------
|
||||
|
||||
Proxmox Backup uses a `Client-server model`_. The server is
|
||||
responsible to store the backup data, and provides an API to create
|
||||
backups and restore data. It is also possible to manage disks and
|
||||
responsible to store the backup data and provides an API to create
|
||||
backups and restore data. It is possible to manage disks and
|
||||
other server side resources using this API.
|
||||
|
||||
A backup client uses this API to access the backed up data,
|
||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
||||
backups and restore data. We also deliver an integrated client for
|
||||
backups and restore data. We deliver an integrated client for
|
||||
QEMU_ with `Proxmox Virtual Environment`_.
|
||||
|
||||
A single backup is allowed to contain several archives. For example,
|
||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
||||
separate archive inside that backup. The VM configuration also gets an
|
||||
extra file. This way, it is easy to access and restore important parts
|
||||
of the backup, without having to scan the whole backup.
|
||||
of the backup without having to scan the whole backup.
|
||||
|
||||
|
||||
Main features
|
||||
Main Features
|
||||
-------------
|
||||
|
||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||
@ -49,52 +49,52 @@ Main features
|
||||
|
||||
:GUI: We provide a graphical, web based user interface.
|
||||
|
||||
:Deduplication: Incremental backup produces large amounts of duplicate
|
||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
||||
data. The deduplication layer removes that redundancy and makes
|
||||
inkremental backup small and space efficient.
|
||||
incremental backups small and space efficient.
|
||||
|
||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficently synchronize data to remote
|
||||
sites. Only deltas containing new data are transfered.
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
sites. Only deltas containing new data are transferred.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
which provides high speed and memory efficiency.
|
||||
to provide high speed and memory efficiency.
|
||||
|
||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted at client side using AES-256 in
|
||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||
high performance on modern hardware.
|
||||
|
||||
:Open Source: No secrets. You have access to the whole source tree.
|
||||
:Open Source: No secrets. You have access to all the source code.
|
||||
|
||||
:Support: Commercial support options available from `Proxmox`_.
|
||||
:Support: Commercial support options are available from `Proxmox`_.
|
||||
|
||||
|
||||
Why Backup?
|
||||
-----------
|
||||
|
||||
The primary purpose of backup is to protect against data loss. Data
|
||||
loss can happen because of faulty hardware, but also by human errors.
|
||||
The primary purpose of a backup is to protect against data loss. Data
|
||||
loss can be caused by faulty hardware, but also by human error.
|
||||
|
||||
A common mistake is to delete a file or folder which is still
|
||||
required. Virtualization can amplify this problem, because it is now
|
||||
easy to delete a whole virtual machine by a single button press.
|
||||
required. Virtualization can amplify this problem. It is now
|
||||
easy to delete a whole virtual machine by pressing a single button.
|
||||
|
||||
Backups can also serve as a toolkit for administrators to temporarily
|
||||
Backups can serve as a toolkit for administrators to temporarily
|
||||
store data. For example, it is common practice to perform full backups
|
||||
before installing major software updates. If something goes wrong, you
|
||||
can just restore the previous state.
|
||||
can restore the previous state.
|
||||
|
||||
Another reason for backups are legal requirements. Some data must be
|
||||
kept in a safe place for several years so that you can access it if
|
||||
required by law.
|
||||
kept in a safe place for several years by law, so that it can be accessed if
|
||||
required.
|
||||
|
||||
Data loss can be very costly as it can severely restrict your
|
||||
business. Therefore, make sure that you regularly perform a backup
|
||||
business. Therefore, make sure that you perform a backup regularly
|
||||
and run restore tests.
|
||||
|
||||
|
||||
|
@ -5,12 +5,12 @@ Debian Package Repositories
|
||||
|
||||
All Debian based systems use APT_ as package
|
||||
management tool. The list of repositories is defined in
|
||||
``/etc/apt/sources.list`` and ``.list`` files found inside
|
||||
``/etc/apt/sources.d/``. Updates can be installed directly using
|
||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
||||
the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with
|
||||
the most preferred source listed first. Empty lines are ignored, and a
|
||||
the most preferred source listed first. Empty lines are ignored and a
|
||||
``#`` character anywhere on a line marks the remainder of that line as a
|
||||
comment. The information available from the configured sources is
|
||||
acquired by ``apt update``.
|
||||
@ -33,7 +33,7 @@ the backup server binaries.
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is the default, stable and recommended repository, available for
|
||||
This is the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
@ -44,15 +44,13 @@ enabled by default:
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
|
||||
|
||||
As soon as updates are available, the superuser (``root@pam`` user) is
|
||||
notified via email about the available new packages. On the GUI, the
|
||||
change-log of each package can be viewed (if available), showing all
|
||||
details of the update. So you will never miss important security
|
||||
fixes.
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. We offer different support levels, and you can find further
|
||||
details at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
@ -65,7 +63,7 @@ details at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. Its not recommended to run on production servers, as these
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
@ -92,9 +90,9 @@ latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used for testing new features or bug fixes.
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
As usual, you can configure this using ``/etc/apt/sources.list`` by
|
||||
You can configure this using ``/etc/apt/sources.list`` by
|
||||
adding the following line:
|
||||
|
||||
.. code-block:: sources.list
|
||||
|
@ -1,15 +1,15 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
``pxar`` is a command line utility used to create and manipulate archives in the
|
||||
``pxar`` is a command line utility to create and manipulate archives in the
|
||||
:ref:`pxar-format`.
|
||||
It is inspired by `casync file archive format
|
||||
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
||||
which has a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the proxmox
|
||||
backup server, for example efficient storage of hardlinks.
|
||||
which caters to a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||
Backup Server, for example, efficient storage of hardlinks.
|
||||
The format is designed to reduce storage space needed on the server by achieving
|
||||
high de-duplication.
|
||||
a high level of de-duplication.
|
||||
|
||||
Creating an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -20,23 +20,23 @@ Run the following command to create an archive of a folder named ``source``:
|
||||
|
||||
# pxar create archive.pxar source
|
||||
|
||||
This will create a new archive called ``archive.pxar`` from the contents of the
|
||||
This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
``source`` folder.
|
||||
|
||||
.. NOTE:: ``pxar`` will not overwrite any existing archives. If an archive with
|
||||
the same name is already present in the target folder, the creation will
|
||||
fail.
|
||||
|
||||
By default, ``pxar`` will skip certain mountpoints and not follow device
|
||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
||||
boundaries. This design decision is based on the primary use case of creating
|
||||
archives for backups, where it makes no sense to store the content of certain
|
||||
archives for backups. It is sensible to not back up the contents of certain
|
||||
temporary or system specific files.
|
||||
In order to alter this behavior and follow device boundaries, use the
|
||||
To alter this behavior and follow device boundaries, use the
|
||||
``--all-file-systems`` flag.
|
||||
|
||||
It is possible to exclude certain files and/or folders from the archive by
|
||||
passing glob match patterns as additional parameters. Whenever a file is matched
|
||||
by one of the patterns, you will get a warning saying that this file is skipped
|
||||
by one of the patterns, you will get a warning stating that this file is skipped
|
||||
and therefore not included in the archive.
|
||||
|
||||
For example, you can exclude all files ending in ``.txt`` from the archive
|
||||
@ -50,7 +50,7 @@ Be aware that the shell itself will try to expand all of the glob patterns befor
|
||||
invoking ``pxar``.
|
||||
In order to avoid this, all globs have to be quoted correctly.
|
||||
|
||||
It is also possible to pass a list of match pattern to fulfill more complex
|
||||
It is possible to pass a list of match patterns to fulfill more complex
|
||||
file exclusion/inclusion behavior, although it is recommended to use the
|
||||
``.pxarexclude`` files instead for such cases.
|
||||
|
||||
@ -67,7 +67,7 @@ All the glob pattern are relative to the ``source`` directory.
|
||||
previous ones. Permutations of the same patterns lead to different results.
|
||||
|
||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||
command line in a file called ``.pxarexclude-cli`` and store it at the root of
|
||||
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
|
||||
the archive.
|
||||
If a file with this name is already present in the source folder during archive
|
||||
creation, this file is not included in the archive and the file containing the
|
||||
@ -79,9 +79,9 @@ It is possible to create and place these files in any directory of the filesyste
|
||||
tree.
|
||||
These files must contain one pattern per line, again later patterns win over
|
||||
previous ones.
|
||||
The patterns control file exclusion of files present within the given directory
|
||||
The patterns control file exclusions of files present within the given directory
|
||||
or further below it in the tree.
|
||||
The behaviour is the same as described in :ref:`creating-backups`.
|
||||
The behavior is the same as described in :ref:`creating-backups`.
|
||||
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -96,7 +96,7 @@ with the following command:
|
||||
If no target is provided, the content of the archive is extracted to the current
|
||||
working directory.
|
||||
|
||||
In order to restore only part of an archive or single files and/or folders,
|
||||
In order to restore only parts of an archive, single files and/or folders,
|
||||
it is possible to pass the corresponding glob match patterns as additional
|
||||
parameters or use the patterns stored in a file:
|
||||
|
||||
@ -109,8 +109,8 @@ sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
||||
A path to the file containing match patterns can be specified using the
|
||||
``--files-from`` parameter.
|
||||
|
||||
List the Content of an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
List the Contents of an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To display the files and directories contained in an archive ``archive.pxar``,
|
||||
run the following command:
|
||||
@ -126,7 +126,7 @@ Mounting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``mnt``,
|
||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
|
||||
run the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
@ -2,7 +2,7 @@
|
||||
Description=Proxmox Backup API Proxy Server
|
||||
Wants=network-online.target
|
||||
After=network.target
|
||||
Requires=proxmox-backup.service
|
||||
Wants=proxmox-backup.service
|
||||
After=proxmox-backup.service
|
||||
|
||||
[Service]
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
// chacha20-poly1305
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::{*, cli::*};
|
||||
|
||||
@ -49,7 +49,7 @@ fn hello_command(
|
||||
}
|
||||
|
||||
#[api(input: { properties: {} })]
|
||||
/// Quit command. Exit the programm.
|
||||
/// Quit command. Exit the program.
|
||||
///
|
||||
/// Returns: nothing
|
||||
fn quit_command() -> Result<(), Error> {
|
||||
@ -83,7 +83,8 @@ fn main() -> Result<(), Error> {
|
||||
|
||||
let args = shellword_split(&line)?;
|
||||
|
||||
let _ = handle_command(helper.cmd_def(), "", args, None);
|
||||
let rpcenv = CliEnvironment::new();
|
||||
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
|
||||
|
||||
rl.add_history_entry(line);
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
use std::io::Write;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
@ -44,8 +44,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..100 {
|
||||
let writer = DummyWriter { bytes: 0 };
|
||||
let writer = client.speedtest(writer).await?;
|
||||
let mut writer = DummyWriter { bytes: 0 };
|
||||
client.speedtest(&mut writer).await?;
|
||||
println!("Received {} bytes", writer.bytes);
|
||||
bytes += writer.bytes;
|
||||
}
|
||||
@ -59,8 +59,7 @@ async fn run() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
fn main() {
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("ERROR: {}", err);
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use std::thread;
|
||||
use std::path::PathBuf;
|
||||
@ -16,7 +16,7 @@ use std::io::Write;
|
||||
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
||||
|
||||
// # pxar create test.pxar ./dyntest1/
|
||||
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
|
||||
fn create_large_file(path: PathBuf) {
|
||||
|
@ -2,7 +2,7 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
use tokio::net::TcpStream;
|
@ -2,7 +2,7 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::{Request, Response, Body};
|
||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::*;
|
||||
|
||||
// Simple H2 server to test H2 speed with h2client.rs
|
@ -2,7 +2,7 @@ extern crate proxmox_backup;
|
||||
|
||||
// also see https://www.johndcook.com/blog/standard_deviation/
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use proxmox_backup::backup::*;
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::*;
|
||||
|
||||
extern crate proxmox_backup;
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
|
||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
@ -1,13 +1,15 @@
|
||||
mod access;
|
||||
pub mod access;
|
||||
pub mod admin;
|
||||
pub mod backup;
|
||||
pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
pub mod pull;
|
||||
mod helpers;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::Router;
|
||||
@ -23,6 +25,7 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("nodes", &NODES_ROUTER),
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
@ -1,18 +1,33 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::sortable;
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{http_err, list_subdirs_api_method};
|
||||
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::*;
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
||||
|
||||
pub mod user;
|
||||
pub mod domain;
|
||||
pub mod acl;
|
||||
pub mod role;
|
||||
|
||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
if password.starts_with("PBS:") {
|
||||
@ -25,27 +40,17 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if username == "root@pam" {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials("root", password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
bail!("inavlid credentials");
|
||||
crate::auth::authenticate_user(username, password)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
username: {
|
||||
type: String,
|
||||
description: "User name.",
|
||||
max_length: 64,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
type: String,
|
||||
description: "The secret password. This can also be a valid ticket.",
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -66,6 +71,9 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
},
|
||||
},
|
||||
protected: true,
|
||||
access: {
|
||||
permission: &Permission::World,
|
||||
},
|
||||
)]
|
||||
/// Create or verify authentication ticket.
|
||||
///
|
||||
@ -94,13 +102,72 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
|
||||
)]
|
||||
/// Change user password
|
||||
///
|
||||
/// Each user is allowed to change his own password. Superuser
|
||||
/// can change all passwords.
|
||||
fn change_password(
|
||||
userid: String,
|
||||
password: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let current_user = rpcenv.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
|
||||
let mut allowed = userid == current_user;
|
||||
|
||||
if userid == "root@pam" { allowed = true; }
|
||||
|
||||
if !allowed {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(¤t_user, &[]);
|
||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
bail!("you are not authorized to change the password.");
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
("acl", &acl::ROUTER),
|
||||
(
|
||||
"password", &Router::new()
|
||||
.put(&API_METHOD_CHANGE_PASSWORD)
|
||||
),
|
||||
(
|
||||
"ticket", &Router::new()
|
||||
.post(&API_METHOD_CREATE_TICKET)
|
||||
)
|
||||
];
|
||||
),
|
||||
("domains", &domain::ROUTER),
|
||||
("roles", &role::ROUTER),
|
||||
("users", &user::ROUTER),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
|
228
src/api2/access/acl.rs
Normal file
228
src/api2/access/acl.rs
Normal file
@ -0,0 +1,228 @@
|
||||
use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl;
|
||||
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
propagate: {
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
ugid_type: {
|
||||
schema: ACL_UGID_TYPE_SCHEMA,
|
||||
},
|
||||
ugid: {
|
||||
type: String,
|
||||
description: "User or Group ID.",
|
||||
},
|
||||
roleid: {
|
||||
type: Role,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// ACL list entry.
|
||||
pub struct AclListItem {
|
||||
path: String,
|
||||
ugid: String,
|
||||
ugid_type: String,
|
||||
propagate: bool,
|
||||
roleid: String,
|
||||
}
|
||||
|
||||
fn extract_acl_node_data(
|
||||
node: &acl::AclTreeNode,
|
||||
path: &str,
|
||||
list: &mut Vec<AclListItem>,
|
||||
exact: bool,
|
||||
) {
|
||||
for (user, roles) in &node.users {
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
propagate: *propagate,
|
||||
ugid_type: String::from("user"),
|
||||
ugid: user.to_string(),
|
||||
roleid: role.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
propagate: *propagate,
|
||||
ugid_type: String::from("group"),
|
||||
ugid: group.to_string(),
|
||||
roleid: role.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
if exact {
|
||||
return;
|
||||
}
|
||||
for (comp, child) in &node.children {
|
||||
let new_path = format!("{}/{}", path, comp);
|
||||
extract_acl_node_data(child, &new_path, list, exact);
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
exact: {
|
||||
description: "If set, returns only ACL for the exact path.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "ACL entry list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: AclListItem,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read Access Control List (ACLs).
|
||||
pub fn read_acl(
|
||||
path: Option<String>,
|
||||
exact: bool,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<AclListItem>, Error> {
|
||||
|
||||
//let auth_user = rpcenv.get_user().unwrap();
|
||||
|
||||
let (mut tree, digest) = acl::config()?;
|
||||
|
||||
let mut list: Vec<AclListItem> = Vec::new();
|
||||
if let Some(path) = &path {
|
||||
if let Some(node) = &tree.find_node(path) {
|
||||
extract_acl_node_data(&node, path, &mut list, exact);
|
||||
}
|
||||
} else {
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact);
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
role: {
|
||||
type: Role,
|
||||
},
|
||||
propagate: {
|
||||
optional: true,
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
group: {
|
||||
optional: true,
|
||||
schema: PROXMOX_GROUP_ID_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
optional: true,
|
||||
description: "Remove permissions (instead of adding it).",
|
||||
type: bool,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update Access Control List (ACLs).
|
||||
pub fn update_acl(
|
||||
path: String,
|
||||
role: String,
|
||||
propagate: Option<bool>,
|
||||
userid: Option<String>,
|
||||
group: Option<String>,
|
||||
delete: Option<bool>,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut tree, expected_digest) = acl::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let propagate = propagate.unwrap_or(true);
|
||||
|
||||
let delete = delete.unwrap_or(false);
|
||||
|
||||
if let Some(ref _group) = group {
|
||||
bail!("parameter 'group' - groups are currently not supported.");
|
||||
} else if let Some(ref userid) = userid {
|
||||
if !delete { // Note: we allow to delete non-existent users
|
||||
let user_cfg = crate::config::user::cached_config()?;
|
||||
if user_cfg.sections.get(userid).is_none() {
|
||||
bail!("no such user.");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("missing 'userid' or 'group' parameter.");
|
||||
}
|
||||
|
||||
if !delete { // Note: we allow to delete entries with invalid path
|
||||
acl::check_acl_path(&path)?;
|
||||
}
|
||||
|
||||
if let Some(userid) = userid {
|
||||
if delete {
|
||||
tree.delete_user_role(&path, &userid, &role);
|
||||
} else {
|
||||
tree.insert_user_role(&path, &userid, &role, propagate);
|
||||
}
|
||||
} else if let Some(group) = group {
|
||||
if delete {
|
||||
tree.delete_group_role(&path, &group, &role);
|
||||
} else {
|
||||
tree.insert_group_role(&path, &group, &role, propagate);
|
||||
}
|
||||
}
|
||||
|
||||
acl::save_config(&tree)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_ACL)
|
||||
.put(&API_METHOD_UPDATE_ACL);
|
47
src/api2/access/domain.rs
Normal file
47
src/api2/access/domain.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List of realms.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User configuration (without password).",
|
||||
properties: {
|
||||
realm: {
|
||||
description: "Realm ID.",
|
||||
type: String,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
default: {
|
||||
description: "Default realm.",
|
||||
type: bool,
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
description: "Anyone can access this, because we need that list for the login box (before the user is authenticated).",
|
||||
permission: &Permission::World,
|
||||
}
|
||||
)]
|
||||
/// Authentication domain/realm index.
|
||||
fn list_domains() -> Result<Value, Error> {
|
||||
let mut list = Vec::new();
|
||||
list.push(json!({ "realm": "pam", "comment": "Linux PAM standard authentication", "default": true }));
|
||||
list.push(json!({ "realm": "pbs", "comment": "Proxmox Backup authentication server" }));
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DOMAINS);
|
58
src/api2/access/role.rs
Normal file
58
src/api2/access/role.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use anyhow::Error;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List of roles.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User name with description.",
|
||||
properties: {
|
||||
roleid: {
|
||||
type: Role,
|
||||
},
|
||||
privs: {
|
||||
type: Array,
|
||||
description: "List of Privileges",
|
||||
items: {
|
||||
type: String,
|
||||
description: "A Privilege",
|
||||
},
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
}
|
||||
)]
|
||||
/// Role list
|
||||
fn list_roles() -> Result<Value, Error> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (role, (privs, comment)) in ROLE_NAMES.iter() {
|
||||
let mut priv_list = Vec::new();
|
||||
for (name, privilege) in PRIVILEGES.iter() {
|
||||
if privs & privilege > 0 {
|
||||
priv_list.push(name.clone());
|
||||
}
|
||||
}
|
||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||
}
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ROLES);
|
295
src/api2/access/user.rs
Normal file
295
src/api2/access/user.rs
Normal file
@ -0,0 +1,295 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::{Schema, StringSchema};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(5)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List users (with config digest).",
|
||||
type: Array,
|
||||
items: { type: user::User },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all users
|
||||
pub fn list_users(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<user::User>, Error> {
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("user")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
password: {
|
||||
schema: PBS_PASSWORD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
firstname: {
|
||||
schema: user::FIRST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
lastname: {
|
||||
schema: user::LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: user::EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new user.
|
||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let user: user::User = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = user::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&user.userid) {
|
||||
bail!("user '{}' already exists.", user.userid);
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
|
||||
config.set_data(&user.userid, "user", &user)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
if let Some(password) = password {
|
||||
authenticator.store_password(&username, &password)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The user configuration (with config digest).",
|
||||
type: user::User,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
let (config, digest) = user::config()?;
|
||||
let user = config.lookup("user", &userid)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
schema: PBS_PASSWORD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
firstname: {
|
||||
schema: user::FIRST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
lastname: {
|
||||
schema: user::LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: user::EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update user configuration.
|
||||
pub fn update_user(
|
||||
userid: String,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
password: Option<String>,
|
||||
firstname: Option<String>,
|
||||
lastname: Option<String>,
|
||||
email: Option<String>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: user::User = config.lookup("user", &userid)?;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enable) = enable {
|
||||
data.enable = if enable { None } else { Some(false) };
|
||||
}
|
||||
|
||||
if let Some(expire) = expire {
|
||||
data.expire = if expire > 0 { Some(expire) } else { None };
|
||||
}
|
||||
|
||||
if let Some(password) = password {
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
}
|
||||
|
||||
if let Some(firstname) = firstname {
|
||||
data.firstname = if firstname.is_empty() { None } else { Some(firstname) };
|
||||
}
|
||||
|
||||
if let Some(lastname) = lastname {
|
||||
data.lastname = if lastname.is_empty() { None } else { Some(lastname) };
|
||||
}
|
||||
if let Some(email) = email {
|
||||
data.email = if email.is_empty() { None } else { Some(email) };
|
||||
}
|
||||
|
||||
config.set_data(&userid, "user", &data)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&userid) {
|
||||
Some(_) => { config.sections.remove(&userid); },
|
||||
None => bail!("user '{}' does not exist.", userid),
|
||||
}
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_USER)
|
||||
.put(&API_METHOD_UPDATE_USER)
|
||||
.delete(&API_METHOD_DELETE_USER);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_USERS)
|
||||
.post(&API_METHOD_CREATE_USER)
|
||||
.match_all("userid", &ITEM_ROUTER);
|
@ -2,9 +2,11 @@ use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod sync;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER)
|
||||
("datastore", &datastore::ROUTER),
|
||||
("sync", &sync::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
File diff suppressed because it is too large
Load Diff
130
src/api2/admin/sync.rs
Normal file
130
src/api2/admin/sync.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::{get_pull_parameters};
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobStatus },
|
||||
},
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
|
||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||
let tasks = server::read_task_list()?;
|
||||
|
||||
for info in tasks.iter() {
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(id) => id,
|
||||
_ => { continue; },
|
||||
};
|
||||
if let Some(last) = last_tasks.get(worker_id) {
|
||||
if last.upid.starttime < info.upid.starttime {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
} else {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
}
|
||||
|
||||
for job in &mut list {
|
||||
let mut last = 0;
|
||||
if let Some(task) = last_tasks.get(&job.id) {
|
||||
job.last_run_upid = Some(task.upid_str.clone());
|
||||
if let Some((endtime, status)) = &task.state {
|
||||
job.last_run_state = Some(String::from(status));
|
||||
job.last_run_endtime = Some(*endtime);
|
||||
last = *endtime;
|
||||
}
|
||||
}
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
compute_next_event(&event, last, false).ok()
|
||||
})();
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Runs the sync jobs manually.
|
||||
async fn run_sync_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync job '{}' start", &id));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &id));
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SYNC_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"run",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_RUN_SYNC_JOB)
|
||||
),
|
||||
];
|
||||
|
||||
const SYNC_INFO_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
|
||||
.subdirs(SYNC_INFO_SUBDIRS);
|
||||
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.match_all("id", &SYNC_INFO_ROUTER);
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{HeaderValue, UPGRADE};
|
||||
use hyper::http::request::Parts;
|
||||
@ -6,14 +6,16 @@ use hyper::{Body, Response, StatusCode};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::tools::{self, WrappedReaderStream};
|
||||
use crate::tools;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -37,6 +39,10 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
|
||||
Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
|
||||
&Permission::Anybody
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_protocol(
|
||||
@ -47,10 +53,16 @@ fn upgrade_to_backup_protocol(
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
@ -73,10 +85,15 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
let owner = datastore.create_backup_group(&backup_group, &username)?;
|
||||
// permission check
|
||||
if owner != username { // only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", username, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
||||
|
||||
@ -90,7 +107,7 @@ fn upgrade_to_backup_protocol(
|
||||
}
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directorty already exists."); }
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
@ -106,13 +123,12 @@ fn upgrade_to_backup_protocol(
|
||||
let abort_future = worker.abort_future();
|
||||
|
||||
let env2 = env.clone();
|
||||
let env3 = env.clone();
|
||||
|
||||
let req_fut = req_body
|
||||
let mut req_fut = req_body
|
||||
.on_upgrade()
|
||||
.map_err(Error::from)
|
||||
.and_then(move |conn| {
|
||||
env3.debug("protocol upgrade done");
|
||||
env2.debug("protocol upgrade done");
|
||||
|
||||
let mut http = hyper::server::conn::Http::new();
|
||||
http.http2_only(true);
|
||||
@ -124,36 +140,39 @@ fn upgrade_to_backup_protocol(
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
});
|
||||
let abort_future = abort_future
|
||||
let mut abort_future = abort_future
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
|
||||
use futures::future::Either;
|
||||
future::select(req_fut, abort_future)
|
||||
.map(|res| match res {
|
||||
Either::Left((Ok(res), _)) => Ok(res),
|
||||
Either::Left((Err(err), _)) => Err(err),
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
})
|
||||
.and_then(move |_result| async move {
|
||||
env.ensure_finished()?;
|
||||
env.log("backup finished sucessfully");
|
||||
Ok(())
|
||||
})
|
||||
.then(move |result| async move {
|
||||
if let Err(err) = result {
|
||||
match env2.ensure_finished() {
|
||||
Ok(()) => {}, // ignore error after finish
|
||||
_ => {
|
||||
env2.log(format!("backup failed: {}", err));
|
||||
env2.log("removing failed backup");
|
||||
env2.remove_backup()?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
async move {
|
||||
let res = select!{
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
};
|
||||
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished successfully");
|
||||
Ok(())
|
||||
},
|
||||
(Err(err), Ok(())) => {
|
||||
// ignore errors after finish
|
||||
env.log(format!("backup had errors but finished: {}", err));
|
||||
Ok(())
|
||||
},
|
||||
(Ok(_), Err(err)) => {
|
||||
env.log(format!("backup ended and finish failed: {}", err));
|
||||
env.log("removing unfinished backup");
|
||||
env.remove_backup()?;
|
||||
Err(err)
|
||||
},
|
||||
(Err(err), Err(_)) => {
|
||||
env.log(format!("backup failed: {}", err));
|
||||
env.log("removing failed backup");
|
||||
env.remove_backup()?;
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
@ -180,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"dynamic_index", &Router::new()
|
||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||
),
|
||||
@ -203,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"fixed_index", &Router::new()
|
||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||
.put(&API_METHOD_FIXED_APPEND)
|
||||
),
|
||||
(
|
||||
"previous", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||
@ -265,6 +286,8 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
|
||||
csum and reuse index for incremental backup if it matches.").schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
@ -277,10 +300,9 @@ fn create_fixed_index(
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
println!("PARAM: {:?}", param);
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
let reuse_csum = param["reuse-csum"].as_str();
|
||||
|
||||
let archive_name = name.clone();
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
@ -288,12 +310,49 @@ fn create_fixed_index(
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
path.push(&archive_name);
|
||||
|
||||
let chunk_size = 4096*1024; // todo: ??
|
||||
|
||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
||||
// do incremental backup if csum is set
|
||||
let mut reader = None;
|
||||
let mut incremental = false;
|
||||
if let Some(csum) = reuse_csum {
|
||||
incremental = true;
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
let mut last_path = last_backup.backup_dir.relative_path();
|
||||
last_path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(last_path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
bail!("cannot reuse index - no previous backup exists for archive");
|
||||
}
|
||||
};
|
||||
|
||||
let (old_csum, _) = index.compute_csum();
|
||||
let old_csum = proxmox::tools::digest_to_hex(&old_csum);
|
||||
if old_csum != csum {
|
||||
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
|
||||
csum, old_csum);
|
||||
}
|
||||
|
||||
reader = Some(index);
|
||||
}
|
||||
|
||||
let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
|
||||
if let Some(reader) = reader {
|
||||
writer.clone_data_from(&reader)?;
|
||||
}
|
||||
|
||||
let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
|
||||
|
||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||
|
||||
@ -359,7 +418,7 @@ fn dynamic_append (
|
||||
|
||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -424,7 +483,7 @@ fn fixed_append (
|
||||
|
||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -479,7 +538,7 @@ fn close_dynamic_index (
|
||||
|
||||
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
||||
env.log(format!("successfully closed dynamic index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -501,15 +560,15 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
@ -533,7 +592,7 @@ fn close_fixed_index (
|
||||
|
||||
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
||||
env.log(format!("successfully closed fixed index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -547,26 +606,23 @@ fn finish_backup (
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.finish_backup()?;
|
||||
env.log("sucessfully finished backup");
|
||||
env.log("successfully finished backup");
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
||||
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_previous),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
"Download archive from previous backup.",
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
fn download_previous(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
@ -579,130 +635,38 @@ fn dynamic_chunk_index(
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".didx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
None => bail!("no previous backup"),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_dynamic_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
{
|
||||
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
_ => { None }
|
||||
};
|
||||
if let Some(index) = index {
|
||||
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
env.register_chunk(info.digest, size as u32)?;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let (start, end, digest) = index.chunk_info(pos)?;
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
let image_size = index.index_bytes();
|
||||
for pos in 0..count {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
// Note: last chunk can be smaller
|
||||
let start = (pos*index.chunk_size) as u64;
|
||||
let mut end = start + index.chunk_size as u64;
|
||||
if end > image_size { end = image_size; }
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(*digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||
crate::api2::helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -47,12 +47,13 @@ struct FixedWriterState {
|
||||
chunk_count: u64,
|
||||
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
||||
upload_stat: UploadStatistic,
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // sucessfully uploaded files
|
||||
file_counter: usize, // successfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
@ -80,7 +81,7 @@ impl SharedBackupState {
|
||||
#[derive(Clone)]
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: HashMap<String, Value>,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
@ -110,7 +111,7 @@ impl BackupEnvironment {
|
||||
};
|
||||
|
||||
Self {
|
||||
result_attributes: HashMap::new(),
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
worker,
|
||||
@ -237,7 +238,7 @@ impl BackupEnvironment {
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
@ -245,7 +246,7 @@ impl BackupEnvironment {
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.fixed_writers.insert(uid, FixedWriterState {
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
@ -310,7 +311,13 @@ impl BackupEnvironment {
|
||||
|
||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||
|
||||
let client_side_duplicates = chunk_count - upload_stat.count;
|
||||
// account for zero chunk, which might be uploaded but never used
|
||||
let client_side_duplicates = if chunk_count < upload_stat.count {
|
||||
0
|
||||
} else {
|
||||
chunk_count - upload_stat.count
|
||||
};
|
||||
|
||||
let server_side_duplicates = upload_stat.duplicates;
|
||||
|
||||
if (client_side_duplicates + server_side_duplicates) > 0 {
|
||||
@ -373,21 +380,22 @@ impl BackupEnvironment {
|
||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
let expected_count = data.index.index_length();
|
||||
if !data.incremental {
|
||||
let expected_count = data.index.index_length();
|
||||
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = data.index.uuid;
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -430,8 +438,6 @@ impl BackupEnvironment {
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.finished = true;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
@ -440,6 +446,8 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -480,12 +488,12 @@ impl BackupEnvironment {
|
||||
|
||||
impl RpcEnvironment for BackupEnvironment {
|
||||
|
||||
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
||||
self.result_attributes.insert(name.into(), value);
|
||||
fn result_attrib_mut(&mut self) -> &mut Value {
|
||||
&mut self.result_attributes
|
||||
}
|
||||
|
||||
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
||||
self.result_attributes.get(name)
|
||||
fn result_attrib(&self) -> &Value {
|
||||
&self.result_attributes
|
||||
}
|
||||
|
||||
fn env_type(&self) -> RpcEnvironmentType {
|
||||
|
@ -2,7 +2,7 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::Body;
|
||||
use hyper::http::request::Parts;
|
||||
|
@ -3,10 +3,12 @@ use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER),
|
||||
("remote", &remote::ROUTER),
|
||||
("sync", &sync::ROUTER),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -1,13 +1,15 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -16,23 +18,32 @@ use crate::config::datastore;
|
||||
returns: {
|
||||
description: "List the configured datastores (with config digest).",
|
||||
type: Array,
|
||||
items: {
|
||||
type: datastore::DataStoreConfig,
|
||||
},
|
||||
items: { type: datastore::DataStoreConfig },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
pub fn list_datastores(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<DataStoreConfig>, Error> {
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
|
||||
Ok(config.convert_to_array("name", Some(&digest), &[]))
|
||||
let list = config.convert_to_typed_array("datastore")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
|
||||
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
|
||||
// but this need support for match inside const fn
|
||||
// see: https://github.com/rust-lang/rust/issues/49146
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
@ -40,18 +51,53 @@ pub fn list_datastores(
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: DIR_NAME_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: datastore::DIR_NAME_SCHEMA,
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
@ -59,16 +105,16 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&name) {
|
||||
bail!("datastore '{}' already exists.", name);
|
||||
if let Some(_) = config.sections.get(&datastore.name) {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
let path: PathBuf = datastore.path.clone().into();
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let _store = ChunkStore::create(&name, path, backup_user.uid, backup_user.gid)?;
|
||||
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid)?;
|
||||
|
||||
config.set_data(&name, "datastore", &datastore)?;
|
||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
@ -87,14 +133,47 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
description: "The datastore configuration (with config digest).",
|
||||
type: datastore::DataStoreConfig,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a datastore configuration.
|
||||
pub fn read_datastore(name: String) -> Result<Value, Error> {
|
||||
pub fn read_datastore(
|
||||
name: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<DataStoreConfig, Error> {
|
||||
let (config, digest) = datastore::config()?;
|
||||
let mut data = config.lookup_json("datastore", &name)?;
|
||||
data.as_object_mut().unwrap()
|
||||
.insert("digest".into(), proxmox::tools::digest_to_hex(&digest).into());
|
||||
Ok(data)
|
||||
|
||||
let store_config = config.lookup("datastore", &name)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(store_config)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the garbage collection schedule.
|
||||
gc_schedule,
|
||||
/// Delete the prune job schedule.
|
||||
prune_schedule,
|
||||
/// Delete the keep-last property
|
||||
keep_last,
|
||||
/// Delete the keep-hourly property
|
||||
keep_hourly,
|
||||
/// Delete the keep-daily property
|
||||
keep_daily,
|
||||
/// Delete the keep-weekly property
|
||||
keep_weekly,
|
||||
/// Delete the keep-monthly property
|
||||
keep_monthly,
|
||||
/// Delete the keep-yearly property
|
||||
keep_yearly,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -108,17 +187,69 @@ pub fn read_datastore(name: String) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
/// Update datastore config.
|
||||
pub fn update_datastore(
|
||||
name: String,
|
||||
comment: Option<String>,
|
||||
gc_schedule: Option<String>,
|
||||
prune_schedule: Option<String>,
|
||||
keep_last: Option<u64>,
|
||||
keep_hourly: Option<u64>,
|
||||
keep_daily: Option<u64>,
|
||||
keep_weekly: Option<u64>,
|
||||
keep_monthly: Option<u64>,
|
||||
keep_yearly: Option<u64>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -134,6 +265,22 @@ pub fn update_datastore(
|
||||
|
||||
let mut data: datastore::DataStoreConfig = config.lookup("datastore", &name)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
|
||||
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
|
||||
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
@ -143,6 +290,16 @@ pub fn update_datastore(
|
||||
}
|
||||
}
|
||||
|
||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
||||
if keep_weekly.is_some() { data.keep_weekly = keep_weekly; }
|
||||
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
|
||||
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
|
||||
|
||||
config.set_data(&name, "datastore", &data)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
@ -157,16 +314,27 @@ pub fn update_datastore(
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String) -> Result<(), Error> {
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
// fixme: locking ?
|
||||
// fixme: check digest ?
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&name) {
|
||||
Some(_) => { config.sections.remove(&name); },
|
||||
|
@ -1,10 +1,13 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use base64;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::remote;
|
||||
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -14,42 +17,32 @@ use crate::config::remote;
|
||||
description: "The list of configured remotes (with config digest).",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
type: remote::Remote,
|
||||
description: "Remote configuration (without password).",
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
fingerprint: {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all remotes
|
||||
pub fn list_remotes(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<remote::Remote>, Error> {
|
||||
|
||||
let (config, digest) = remote::config()?;
|
||||
|
||||
let value = config.convert_to_array("name", Some(&digest), &["password"]);
|
||||
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
|
||||
|
||||
Ok(value.into())
|
||||
// don't return password in api
|
||||
for remote in &mut list {
|
||||
remote.password = "".to_string();
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -78,21 +71,26 @@ pub fn list_remotes(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new remote.
|
||||
pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
|
||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let remote: remote::Remote = serde_json::from_value(param.clone())?;
|
||||
let mut data = param.clone();
|
||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||
let remote: remote::Remote = serde_json::from_value(data)?;
|
||||
|
||||
let (mut config, _digest) = remote::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&name) {
|
||||
bail!("remote '{}' already exists.", name);
|
||||
if let Some(_) = config.sections.get(&remote.name) {
|
||||
bail!("remote '{}' already exists.", remote.name);
|
||||
}
|
||||
|
||||
config.set_data(&name, "remote", &remote)?;
|
||||
config.set_data(&remote.name, "remote", &remote)?;
|
||||
|
||||
remote::save_config(&config)?;
|
||||
|
||||
@ -111,16 +109,34 @@ pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
|
||||
description: "The remote configuration (with config digest).",
|
||||
type: remote::Remote,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
||||
}
|
||||
)]
|
||||
/// Read remote configuration data.
|
||||
pub fn read_remote(name: String) -> Result<Value, Error> {
|
||||
pub fn read_remote(
|
||||
name: String,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<remote::Remote, Error> {
|
||||
let (config, digest) = remote::config()?;
|
||||
let mut data = config.lookup_json("remote", &name)?;
|
||||
data.as_object_mut().unwrap()
|
||||
.insert("digest".into(), proxmox::tools::digest_to_hex(&digest).into());
|
||||
let mut data: remote::Remote = config.lookup("remote", &name)?;
|
||||
data.password = "".to_string(); // do not return password in api
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the fingerprint property.
|
||||
fingerprint,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
@ -148,12 +164,23 @@ pub fn read_remote(name: String) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update remote configuration.
|
||||
pub fn update_remote(
|
||||
@ -163,6 +190,7 @@ pub fn update_remote(
|
||||
userid: Option<String>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -177,6 +205,15 @@ pub fn update_remote(
|
||||
|
||||
let mut data: remote::Remote = config.lookup("remote", &name)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::fingerprint => { data.fingerprint = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
@ -189,7 +226,6 @@ pub fn update_remote(
|
||||
if let Some(userid) = userid { data.userid = userid; }
|
||||
if let Some(password) = password { data.password = password; }
|
||||
|
||||
// fixme: howto delete a fingeprint?
|
||||
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
|
||||
|
||||
config.set_data(&name, "remote", &data)?;
|
||||
@ -206,22 +242,35 @@ pub fn update_remote(
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a remote from the configuration file.
|
||||
pub fn delete_remote(name: String) -> Result<(), Error> {
|
||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
// fixme: locking ?
|
||||
// fixme: check digest ?
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = remote::config()?;
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&name) {
|
||||
Some(_) => { config.sections.remove(&name); },
|
||||
None => bail!("remote '{}' does not exist.", name),
|
||||
}
|
||||
|
||||
remote::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
277
src/api2/config/sync.rs
Normal file
277
src/api2/config/sync.rs
Normal file
@ -0,0 +1,277 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::sync::{self, SyncJobConfig};
|
||||
|
||||
// fixme: add access permissions
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobConfig },
|
||||
},
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobConfig>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("sync")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new sync job.
|
||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
let (mut config, _digest) = sync::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
||||
bail!("job '{}' already exists.", sync_job.id);
|
||||
}
|
||||
|
||||
config.set_data(&sync_job.id, "sync", &sync_job)?;
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The sync job configuration.",
|
||||
type: sync::SyncJobConfig,
|
||||
},
|
||||
)]
|
||||
/// Read a sync job configuration.
|
||||
pub fn read_sync_job(
|
||||
id: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<SyncJobConfig, Error> {
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let sync_job = config.lookup("sync", &id)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(sync_job)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the job schedule.
|
||||
schedule,
|
||||
/// Delete the remove-vanished flag.
|
||||
remove_vanished,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Update sync job config.
|
||||
pub fn update_sync_job(
|
||||
id: String,
|
||||
store: Option<String>,
|
||||
remote: Option<String>,
|
||||
remote_store: Option<String>,
|
||||
remove_vanished: Option<bool>,
|
||||
comment: Option<String>,
|
||||
schedule: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: sync::SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::schedule => { data.schedule = None; },
|
||||
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(store) = store { data.store = store; }
|
||||
if let Some(remote) = remote { data.remote = remote; }
|
||||
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
|
||||
|
||||
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
|
||||
|
||||
config.set_data(&id, "sync", &data)?;
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Remove a sync job configuration
|
||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&id) {
|
||||
Some(_) => { config.sections.remove(&id); },
|
||||
None => bail!("job '{}' does not exist.", id),
|
||||
}
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_SYNC_JOB)
|
||||
.put(&API_METHOD_UPDATE_SYNC_JOB)
|
||||
.delete(&API_METHOD_DELETE_SYNC_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.post(&API_METHOD_CREATE_SYNC_JOB)
|
||||
.match_all("id", &ITEM_ROUTER);
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
@ -3,17 +3,21 @@ use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
mod network;
|
||||
pub mod network;
|
||||
pub mod dns;
|
||||
mod syslog;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
("rrd", &rrd::ROUTER),
|
||||
("services", &services::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("syslog", &syslog::ROUTER),
|
||||
|
188
src/api2/node/disks.rs
Normal file
188
src/api2/node/disks.rs
Normal file
@ -0,0 +1,188 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{list_subdirs_api_method};
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskUsageInfo, DiskUsageType, DiskManage, SmartData,
|
||||
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
skipsmart: {
|
||||
description: "Skip smart checks.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"usage-type": {
|
||||
type: DiskUsageType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Local disk list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DiskUsageInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List local disks
|
||||
pub fn list_disks(
|
||||
skipsmart: bool,
|
||||
usage_type: Option<DiskUsageType>,
|
||||
) -> Result<Vec<DiskUsageInfo>, Error> {
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_, info) in get_disks(None, skipsmart)? {
|
||||
if let Some(ref usage_type) = usage_type {
|
||||
if info.used == *usage_type {
|
||||
list.push(info);
|
||||
}
|
||||
} else {
|
||||
list.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
healthonly: {
|
||||
description: "If true returns only the health status.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: SmartData,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get SMART attributes and health of a disk.
|
||||
pub fn smart_status(
|
||||
disk: String,
|
||||
healthonly: Option<bool>,
|
||||
) -> Result<SmartData, Error> {
|
||||
|
||||
let healthonly = healthonly.unwrap_or(false);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
let disk = manager.disk_by_name(&disk)?;
|
||||
get_smart_data(&disk, healthonly)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
pub fn initialize_disk(
|
||||
disk: String,
|
||||
uuid: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
let disk_info = disk_manager.disk_by_name(&disk)?;
|
||||
|
||||
inititialize_gpt_disk(&disk_info, uuid.as_deref())?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
// ("lvm", &lvm::ROUTER),
|
||||
("directory", &directory::ROUTER),
|
||||
("zfs", &zfs::ROUTER),
|
||||
(
|
||||
"initgpt", &Router::new()
|
||||
.post(&API_METHOD_INITIALIZE_DISK)
|
||||
),
|
||||
(
|
||||
"list", &Router::new()
|
||||
.get(&API_METHOD_LIST_DISKS)
|
||||
),
|
||||
(
|
||||
"smart", &Router::new()
|
||||
.get(&API_METHOD_SMART_STATUS)
|
||||
),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
221
src/api2/node/disks/directory.rs
Normal file
221
src/api2/node/disks/directory.rs
Normal file
@ -0,0 +1,221 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::json;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskManage, FileSystemType, DiskUsageType,
|
||||
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
|
||||
};
|
||||
use crate::tools::systemd::{self, types::*};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filesystem": {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore mount info.
|
||||
pub struct DatastoreMountInfo {
|
||||
/// The path of the mount unit.
|
||||
pub unitfile: String,
|
||||
/// The mount path.
|
||||
pub path: String,
|
||||
/// The mounted device.
|
||||
pub device: String,
|
||||
/// File system type
|
||||
pub filesystem: Option<String>,
|
||||
/// Mount options
|
||||
pub options: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "List of systemd datastore mount units.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DatastoreMountInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
let basedir = "/etc/systemd/system";
|
||||
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
|
||||
let item = item?;
|
||||
let name = item.file_name().to_string_lossy().to_string();
|
||||
|
||||
let unitfile = format!("{}/{}", basedir, name);
|
||||
let config = systemd::config::parse_systemd_mount(&unitfile)?;
|
||||
let data: SystemdMountSection = config.lookup("Mount", "Mount")?;
|
||||
|
||||
list.push(DatastoreMountInfo {
|
||||
unitfile,
|
||||
device: data.What,
|
||||
path: data.Where,
|
||||
filesystem: data.Type,
|
||||
options: data.Options,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn create_datastore_disk(
|
||||
name: String,
|
||||
disk: String,
|
||||
add_datastore: Option<bool>,
|
||||
filesystem: Option<FileSystemType>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
|
||||
let disk = manager.clone().disk_by_name(&disk)?;
|
||||
|
||||
let partition = create_single_linux_partition(&disk)?;
|
||||
create_file_system(&partition, filesystem)?;
|
||||
|
||||
let uuid = get_fs_uuid(&partition)?;
|
||||
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
|
||||
|
||||
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
|
||||
|
||||
systemd::reload_daemon()?;
|
||||
systemd::enable_unit(&mount_unit_name)?;
|
||||
systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
datastore_name: &str,
|
||||
fs_type: FileSystemType,
|
||||
what: &str,
|
||||
) -> Result<(String, String), Error> {
|
||||
|
||||
let mount_point = format!("/mnt/datastore/{}", datastore_name);
|
||||
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
||||
let unit = SystemdUnitSection {
|
||||
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let install = SystemdInstallSection {
|
||||
WantedBy: Some(vec!["multi-user.target".to_string()]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mount = SystemdMountSection {
|
||||
What: what.to_string(),
|
||||
Where: mount_point.clone(),
|
||||
Type: Some(fs_type.to_string()),
|
||||
Options: Some(String::from("defaults")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut config = SectionConfigData::new();
|
||||
config.set_data("Unit", "Unit", unit)?;
|
||||
config.set_data("Install", "Install", install)?;
|
||||
config.set_data("Mount", "Mount", mount)?;
|
||||
|
||||
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
|
||||
|
||||
Ok((mount_unit_name, mount_point))
|
||||
}
|
380
src/api2/node/disks/zfs.rs
Normal file
380
src/api2/node/disks/zfs.rs
Normal file
@ -0,0 +1,380 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||
schema::{
|
||||
Schema,
|
||||
StringSchema,
|
||||
ArraySchema,
|
||||
IntegerSchema,
|
||||
ApiStringFormat,
|
||||
parse_property_string,
|
||||
},
|
||||
};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of disk names, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List of zpools.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ZpoolListItem,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List zfs pools.
|
||||
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
|
||||
let data = zpool_list(None, false)?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for item in data {
|
||||
if let Some(usage) = item.usage {
|
||||
list.push(ZpoolListItem {
|
||||
name: item.name,
|
||||
health: item.health,
|
||||
size: usage.size,
|
||||
alloc: usage.alloc,
|
||||
free: usage.free,
|
||||
frag: usage.frag,
|
||||
dedup: usage.dedup,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "zpool vdev tree with status",
|
||||
properties: {
|
||||
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get zpool status details.
|
||||
pub fn zpool_details(
|
||||
name: String,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let key_value_list = zpool_status(&name)?;
|
||||
|
||||
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
|
||||
Some((_, v)) => v,
|
||||
None => bail!("got zpool status without config key"),
|
||||
};
|
||||
|
||||
let vdev_list = parse_zpool_status_config_tree(config)?;
|
||||
let mut tree = vdev_list_to_tree(&vdev_list)?;
|
||||
|
||||
for (k, v) in key_value_list {
|
||||
if k != "config" {
|
||||
tree[k] = v.into();
|
||||
}
|
||||
}
|
||||
|
||||
tree["name"] = tree.as_object_mut().unwrap()
|
||||
.remove("pool")
|
||||
.unwrap_or_else(|| name.into());
|
||||
|
||||
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new ZFS pool.
|
||||
pub fn create_zpool(
|
||||
name: String,
|
||||
devices: String,
|
||||
raidlevel: ZfsRaidLevel,
|
||||
compression: Option<String>,
|
||||
ashift: Option<usize>,
|
||||
add_datastore: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
let ashift = ashift.unwrap_or(12);
|
||||
|
||||
let devices_text = devices.clone();
|
||||
let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
|
||||
let devices: Vec<String> = devices.as_array().unwrap().iter()
|
||||
.map(|v| v.as_str().unwrap().to_string()).collect();
|
||||
|
||||
let disk_map = crate::tools::disks::get_disks(None, true)?;
|
||||
for disk in devices.iter() {
|
||||
match disk_map.get(disk) {
|
||||
Some(info) => {
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
bail!("no such disk '{}'", disk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let min_disks = match raidlevel {
|
||||
ZfsRaidLevel::Single => 1,
|
||||
ZfsRaidLevel::Mirror => 2,
|
||||
ZfsRaidLevel::Raid10 => 4,
|
||||
ZfsRaidLevel::RaidZ => 3,
|
||||
ZfsRaidLevel::RaidZ2 => 4,
|
||||
ZfsRaidLevel::RaidZ3 => 5,
|
||||
};
|
||||
|
||||
// Sanity checks
|
||||
if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
|
||||
bail!("Raid10 needs an even number of disks.");
|
||||
}
|
||||
|
||||
if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
|
||||
bail!("Please give only one disk for single disk mode.");
|
||||
}
|
||||
|
||||
if devices.len() < min_disks {
|
||||
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
|
||||
}
|
||||
|
||||
// check if the default path does exist already and bail if it does
|
||||
// otherwise we get an error on mounting
|
||||
let mut default_path = std::path::PathBuf::from("/");
|
||||
default_path.push(&name);
|
||||
|
||||
match std::fs::metadata(&default_path) {
|
||||
Err(_) => {}, // path does not exist
|
||||
Ok(_) => {
|
||||
bail!("path {:?} already exists", default_path);
|
||||
}
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
|
||||
|
||||
match raidlevel {
|
||||
ZfsRaidLevel::Single => {
|
||||
command.arg(&devices[0]);
|
||||
}
|
||||
ZfsRaidLevel::Mirror => {
|
||||
command.arg("mirror");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::Raid10 => {
|
||||
devices.chunks(2).for_each(|pair| {
|
||||
command.arg("mirror");
|
||||
command.args(pair);
|
||||
});
|
||||
}
|
||||
ZfsRaidLevel::RaidZ => {
|
||||
command.arg("raidz");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ2 => {
|
||||
command.arg("raidz2");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ3 => {
|
||||
command.arg("raidz3");
|
||||
command.args(devices);
|
||||
}
|
||||
}
|
||||
|
||||
worker.log(format!("# {:?}", command));
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
|
||||
if let Some(compression) = compression {
|
||||
let mut command = std::process::Command::new("zfs");
|
||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||
worker.log(format!("# {:?}", command));
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
}
|
||||
|
||||
if add_datastore {
|
||||
let mount_point = format!("/{}", name);
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const POOL_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_ZPOOL_DETAILS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ZPOOLS)
|
||||
.post(&API_METHOD_CREATE_ZPOOL)
|
||||
.match_all("name", &POOL_ROUTER);
|
@ -1,21 +1,34 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use lazy_static::lazy_static;
|
||||
use openssl::sha;
|
||||
use regex::Regex;
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
|
||||
static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete first nameserver entry
|
||||
dns1,
|
||||
/// Delete second nameserver entry
|
||||
dns2,
|
||||
/// Delete third nameserver entry
|
||||
dns3,
|
||||
}
|
||||
|
||||
pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
|
||||
let mut result = json!({});
|
||||
@ -34,6 +47,8 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
|
||||
}
|
||||
|
||||
let mut options = String::new();
|
||||
|
||||
for line in data.lines() {
|
||||
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
|
||||
@ -44,16 +59,69 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
let nameserver = &caps[1];
|
||||
let id = format!("dns{}", nscount);
|
||||
result[id] = Value::from(nameserver);
|
||||
} else {
|
||||
if !options.is_empty() { options.push('\n'); }
|
||||
options.push_str(line);
|
||||
}
|
||||
}
|
||||
|
||||
if !options.is_empty() {
|
||||
result["options"] = options.into();
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn update_dns(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
description: "Update DNS settings.",
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
search: {
|
||||
schema: SEARCH_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
dns1: {
|
||||
optional: true,
|
||||
schema: FIRST_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns2: {
|
||||
optional: true,
|
||||
schema: SECOND_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns3: {
|
||||
optional: true,
|
||||
schema: THIRD_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_MODIFY, false),
|
||||
}
|
||||
)]
|
||||
/// Update DNS settings
|
||||
pub fn update_dns(
|
||||
search: Option<String>,
|
||||
dns1: Option<String>,
|
||||
dns2: Option<String>,
|
||||
dns3: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
lazy_static! {
|
||||
@ -62,33 +130,41 @@ fn update_dns(
|
||||
|
||||
let _guard = MUTEX.lock();
|
||||
|
||||
let search = crate::tools::required_string_param(¶m, "search")?;
|
||||
let mut config = read_etc_resolv_conf()?;
|
||||
let old_digest = config["digest"].as_str().unwrap();
|
||||
|
||||
let raw = file_get_contents(RESOLV_CONF_FN)?;
|
||||
let old_digest = proxmox::tools::digest_to_hex(&sha::sha256(&raw));
|
||||
|
||||
if let Some(digest) = param["digest"].as_str() {
|
||||
crate::tools::assert_if_modified(&old_digest, &digest)?;
|
||||
if let Some(digest) = digest {
|
||||
crate::tools::assert_if_modified(old_digest, &digest)?;
|
||||
}
|
||||
|
||||
let old_data = String::from_utf8(raw)?;
|
||||
|
||||
let mut data = format!("search {}\n", search);
|
||||
|
||||
for opt in &["dns1", "dns2", "dns3"] {
|
||||
if let Some(server) = param[opt].as_str() {
|
||||
data.push_str(&format!("nameserver {}\n", server));
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
let config = config.as_object_mut().unwrap();
|
||||
match delete_prop {
|
||||
DeletableProperty::dns1 => { config.remove("dns1"); },
|
||||
DeletableProperty::dns2 => { config.remove("dns2"); },
|
||||
DeletableProperty::dns3 => { config.remove("dns3"); },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append other data
|
||||
lazy_static! {
|
||||
static ref SKIP_REGEX: Regex = Regex::new(r"^(search|domain|nameserver)\s+").unwrap();
|
||||
if let Some(search) = search { config["search"] = search.into(); }
|
||||
if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); }
|
||||
if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); }
|
||||
if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); }
|
||||
|
||||
let mut data = String::new();
|
||||
|
||||
if let Some(search) = config["search"].as_str() {
|
||||
data.push_str(&format!("search {}\n", search));
|
||||
}
|
||||
for line in old_data.lines() {
|
||||
if SKIP_REGEX.is_match(line) { continue; }
|
||||
data.push_str(line);
|
||||
data.push('\n');
|
||||
for opt in &["dns1", "dns2", "dns3"] {
|
||||
if let Some(server) = config[opt].as_str() {
|
||||
data.push_str(&format!("nameserver {}\n", server));
|
||||
}
|
||||
}
|
||||
if let Some(options) = config["options"].as_str() {
|
||||
data.push_str(options);
|
||||
}
|
||||
|
||||
replace_file(RESOLV_CONF_FN, data.as_bytes(), CreateOptions::new())?;
|
||||
@ -96,7 +172,45 @@ fn update_dns(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn get_dns(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns DNS server IPs and sreach domain.",
|
||||
type: Object,
|
||||
properties: {
|
||||
digest: {
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
search: {
|
||||
optional: true,
|
||||
schema: SEARCH_DOMAIN_SCHEMA,
|
||||
},
|
||||
dns1: {
|
||||
optional: true,
|
||||
schema: FIRST_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns2: {
|
||||
optional: true,
|
||||
schema: SECOND_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns3: {
|
||||
optional: true,
|
||||
schema: THIRD_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_AUDIT, false),
|
||||
}
|
||||
)]
|
||||
/// Read DNS settings.
|
||||
pub fn get_dns(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -105,41 +219,6 @@ fn get_dns(
|
||||
read_etc_resolv_conf()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_dns),
|
||||
&ObjectSchema::new(
|
||||
"Read DNS settings.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&sorted!([
|
||||
("digest", false, &PROXMOX_CONFIG_DIGEST_SCHEMA),
|
||||
("search", true, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
]),
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&update_dns),
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("search", false, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
("digest", true, &PROXMOX_CONFIG_DIGEST_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
);
|
||||
.get(&API_METHOD_GET_DNS)
|
||||
.put(&API_METHOD_UPDATE_DNS);
|
||||
|
@ -1,12 +1,13 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
use std::io::{BufRead,BufReader};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
@ -53,6 +54,9 @@ use crate::api2::types::*;
|
||||
description: "Line text.",
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read syslog entries.
|
||||
fn get_journal(
|
||||
@ -90,7 +94,7 @@ fn get_journal(
|
||||
|
||||
let mut lines: Vec<String> = vec![];
|
||||
|
||||
let mut child = Command::new("/usr/bin/mini-journalreader")
|
||||
let mut child = Command::new("mini-journalreader")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -1,28 +1,671 @@
|
||||
use failure::*;
|
||||
use serde_json::{json, Value};
|
||||
use anyhow::{Error, bail};
|
||||
use serde_json::{Value, to_value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::parse_property_string;
|
||||
|
||||
use crate::config::network::{self, NetworkConfig};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{WorkerTask};
|
||||
|
||||
fn get_network_config(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
Ok(json!({}))
|
||||
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
||||
let value = parse_property_string(&list, &NETWORK_INTERFACE_ARRAY_SCHEMA)?;
|
||||
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_network_config),
|
||||
&ObjectSchema::new(
|
||||
"Read network configuration.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
)
|
||||
);
|
||||
fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
|
||||
|
||||
let current_gateway_v4 = config.interfaces.iter()
|
||||
.find(|(_, interface)| interface.gateway.is_some())
|
||||
.map(|(name, _)| name.to_string());
|
||||
|
||||
if let Some(current_gateway_v4) = current_gateway_v4 {
|
||||
if current_gateway_v4 != iface {
|
||||
bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
|
||||
|
||||
let current_gateway_v6 = config.interfaces.iter()
|
||||
.find(|(_, interface)| interface.gateway6.is_some())
|
||||
.map(|(name, _)| name.to_string());
|
||||
|
||||
if let Some(current_gateway_v6) = current_gateway_v6 {
|
||||
if current_gateway_v6 != iface {
|
||||
bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List network devices (with config digest).",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Interface,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
pub fn list_network_devices(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, digest) = network::config()?;
|
||||
let digest = proxmox::tools::digest_to_hex(&digest);
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (iface, interface) in config.interfaces.iter() {
|
||||
if iface == "lo" { continue; } // do not list lo
|
||||
let mut item: Value = to_value(interface)?;
|
||||
item["digest"] = digest.clone().into();
|
||||
item["iface"] = iface.to_string().into();
|
||||
list.push(item);
|
||||
}
|
||||
|
||||
let diff = network::changes()?;
|
||||
if !diff.is_empty() {
|
||||
rpcenv["changes"] = diff.into();
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The network interface configuration (with config digest).",
|
||||
type: Interface,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a network interface configuration.
|
||||
pub fn read_interface(iface: String) -> Result<Value, Error> {
|
||||
|
||||
let (config, digest) = network::config()?;
|
||||
|
||||
let interface = config.lookup(&iface)?;
|
||||
|
||||
let mut data: Value = to_value(interface)?;
|
||||
data["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
description: "Interface type.",
|
||||
type: NetworkInterfaceType,
|
||||
optional: true,
|
||||
},
|
||||
autostart: {
|
||||
description: "Autostart interface.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet5, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
mtu: {
|
||||
description: "Maximum Transmission Unit.",
|
||||
optional: true,
|
||||
minimum: 46,
|
||||
maximum: 65535,
|
||||
default: 1500,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bridge_vlan_aware: {
|
||||
description: "Enable bridge vlan support.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create network interface configuration.
|
||||
pub fn create_interface(
|
||||
iface: String,
|
||||
autostart: Option<bool>,
|
||||
method: Option<NetworkConfigMethod>,
|
||||
method6: Option<NetworkConfigMethod>,
|
||||
comments: Option<String>,
|
||||
comments6: Option<String>,
|
||||
cidr: Option<String>,
|
||||
gateway: Option<String>,
|
||||
cidr6: Option<String>,
|
||||
gateway6: Option<String>,
|
||||
mtu: Option<u64>,
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
slaves: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = network::config()?;
|
||||
|
||||
if config.interfaces.contains_key(&iface) {
|
||||
bail!("interface '{}' already exists", iface);
|
||||
}
|
||||
|
||||
let mut interface = Interface::new(iface.clone());
|
||||
interface.interface_type = interface_type;
|
||||
|
||||
if let Some(autostart) = autostart { interface.autostart = autostart; }
|
||||
if method.is_some() { interface.method = method; }
|
||||
if method6.is_some() { interface.method6 = method6; }
|
||||
if mtu.is_some() { interface.mtu = mtu; }
|
||||
if comments.is_some() { interface.comments = comments; }
|
||||
if comments6.is_some() { interface.comments6 = comments6; }
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.cidr = Some(cidr);
|
||||
}
|
||||
|
||||
if let Some(cidr6) = cidr6 {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.cidr6 = Some(cidr6);
|
||||
}
|
||||
|
||||
if let Some(gateway) = gateway {
|
||||
let is_v6 = gateway.contains(':');
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
check_duplicate_gateway_v4(&config, &iface)?;
|
||||
interface.gateway = Some(gateway);
|
||||
}
|
||||
|
||||
if let Some(gateway6) = gateway6 {
|
||||
let is_v6 = gateway6.contains(':');
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
check_duplicate_gateway_v6(&config, &iface)?;
|
||||
interface.gateway6 = Some(gateway6);
|
||||
}
|
||||
|
||||
match interface_type {
|
||||
NetworkInterfaceType::Bridge => {
|
||||
if let Some(ports) = bridge_ports {
|
||||
let ports = split_interface_list(&ports)?;
|
||||
interface.set_bridge_ports(ports)?;
|
||||
}
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
}
|
||||
NetworkInterfaceType::Bond => {
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
}
|
||||
_ => bail!("creating network interface type '{:?}' is not supported", interface_type),
|
||||
}
|
||||
|
||||
if interface.cidr.is_some() || interface.gateway.is_some() {
|
||||
interface.method = Some(NetworkConfigMethod::Static);
|
||||
} else if interface.method.is_none() {
|
||||
interface.method = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
if interface.cidr6.is_some() || interface.gateway6.is_some() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Static);
|
||||
} else if interface.method6.is_none() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
config.interfaces.insert(iface, interface);
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the IPv4 address property.
|
||||
cidr,
|
||||
/// Delete the IPv6 address property.
|
||||
cidr6,
|
||||
/// Delete the IPv4 gateway property.
|
||||
gateway,
|
||||
/// Delete the IPv6 gateway property.
|
||||
gateway6,
|
||||
/// Delete the whole IPv4 configuration entry.
|
||||
method,
|
||||
/// Delete the whole IPv6 configuration entry.
|
||||
method6,
|
||||
/// Delete IPv4 comments
|
||||
comments,
|
||||
/// Delete IPv6 comments
|
||||
comments6,
|
||||
/// Delete mtu.
|
||||
mtu,
|
||||
/// Delete autostart flag
|
||||
autostart,
|
||||
/// Delete bridge ports (set to 'none')
|
||||
bridge_ports,
|
||||
/// Delete bridge-vlan-aware flag
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
description: "Interface type. If specified, need to match the current type.",
|
||||
type: NetworkInterfaceType,
|
||||
optional: true,
|
||||
},
|
||||
autostart: {
|
||||
description: "Autostart interface.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet5, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
mtu: {
|
||||
description: "Maximum Transmission Unit.",
|
||||
optional: true,
|
||||
minimum: 46,
|
||||
maximum: 65535,
|
||||
default: 1500,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bridge_vlan_aware: {
|
||||
description: "Enable bridge vlan support.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update network interface config.
|
||||
pub fn update_interface(
|
||||
iface: String,
|
||||
autostart: Option<bool>,
|
||||
method: Option<NetworkConfigMethod>,
|
||||
method6: Option<NetworkConfigMethod>,
|
||||
comments: Option<String>,
|
||||
comments6: Option<String>,
|
||||
cidr: Option<String>,
|
||||
gateway: Option<String>,
|
||||
cidr6: Option<String>,
|
||||
gateway6: Option<String>,
|
||||
mtu: Option<u64>,
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
slaves: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; }
|
||||
if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; }
|
||||
|
||||
let interface = config.lookup_mut(&iface)?;
|
||||
|
||||
if let Some(interface_type) = param.get("type") {
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.clone())?;
|
||||
if interface_type != interface.interface_type {
|
||||
bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::cidr => { interface.cidr = None; },
|
||||
DeletableProperty::cidr6 => { interface.cidr6 = None; },
|
||||
DeletableProperty::gateway => { interface.gateway = None; },
|
||||
DeletableProperty::gateway6 => { interface.gateway6 = None; },
|
||||
DeletableProperty::method => { interface.method = None; },
|
||||
DeletableProperty::method6 => { interface.method6 = None; },
|
||||
DeletableProperty::comments => { interface.comments = None; },
|
||||
DeletableProperty::comments6 => { interface.comments6 = None; },
|
||||
DeletableProperty::mtu => { interface.mtu = None; },
|
||||
DeletableProperty::autostart => { interface.autostart = false; },
|
||||
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
|
||||
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
|
||||
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(autostart) = autostart { interface.autostart = autostart; }
|
||||
if method.is_some() { interface.method = method; }
|
||||
if method6.is_some() { interface.method6 = method6; }
|
||||
if mtu.is_some() { interface.mtu = mtu; }
|
||||
if let Some(ports) = bridge_ports {
|
||||
let ports = split_interface_list(&ports)?;
|
||||
interface.set_bridge_ports(ports)?;
|
||||
}
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.cidr = Some(cidr);
|
||||
}
|
||||
|
||||
if let Some(cidr6) = cidr6 {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.cidr6 = Some(cidr6);
|
||||
}
|
||||
|
||||
if let Some(gateway) = gateway {
|
||||
let is_v6 = gateway.contains(':');
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.gateway = Some(gateway);
|
||||
}
|
||||
|
||||
if let Some(gateway6) = gateway6 {
|
||||
let is_v6 = gateway6.contains(':');
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.gateway6 = Some(gateway6);
|
||||
}
|
||||
|
||||
if comments.is_some() { interface.comments = comments; }
|
||||
if comments6.is_some() { interface.comments6 = comments6; }
|
||||
|
||||
if interface.cidr.is_some() || interface.gateway.is_some() {
|
||||
interface.method = Some(NetworkConfigMethod::Static);
|
||||
} else {
|
||||
interface.method = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
if interface.cidr6.is_some() || interface.gateway6.is_some() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Static);
|
||||
} else {
|
||||
interface.method6 = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove network interface configuration.
|
||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let _interface = config.lookup(&iface)?; // check if interface exists
|
||||
|
||||
config.interfaces.remove(&iface);
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Reload network configuration (requires ifupdown2).
|
||||
pub async fn reload_network_config(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
network::assert_ifupdown2_installed()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
|
||||
|
||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||
|
||||
network::network_reload()?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Revert network configuration (rm /etc/network/interfaces.new).
|
||||
pub fn revert_network_config() -> Result<(), Error> {
|
||||
|
||||
let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_INTERFACE)
|
||||
.put(&API_METHOD_UPDATE_INTERFACE)
|
||||
.delete(&API_METHOD_DELETE_INTERFACE);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_NETWORK_DEVICES)
|
||||
.put(&API_METHOD_RELOAD_NETWORK_CONFIG)
|
||||
.post(&API_METHOD_CREATE_INTERFACE)
|
||||
.delete(&API_METHOD_REVERT_NETWORK_CONFIG)
|
||||
.match_all("iface", &ITEM_ROUTER);
|
||||
|
87
src/api2/node/rrd.rs
Normal file
87
src/api2/node/rrd.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
basedir: &str,
|
||||
list: &[&str],
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
Some(result) => result,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut t = start;
|
||||
for index in 0..RRD_DATA_ENTRIES {
|
||||
if result.len() <= index {
|
||||
if let Some(value) = list[index] {
|
||||
result.push(json!({ "time": t, *name: value }));
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
timeframe: {
|
||||
type: RRDTimeFrameResolution,
|
||||
},
|
||||
cf: {
|
||||
type: RRDMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Read node stats
|
||||
fn get_node_stats(
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
create_value_from_rrd(
|
||||
"host",
|
||||
&[
|
||||
"cpu", "iowait",
|
||||
"memtotal", "memused",
|
||||
"swaptotal", "swapused",
|
||||
"netin", "netout",
|
||||
"loadavg",
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes",
|
||||
"write_ios", "write_bytes",
|
||||
"io_ticks",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_NODE_STATS);
|
@ -1,15 +1,15 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
|
||||
static SERVICE_NAME_LIST: [&str; 7] = [
|
||||
"proxmox-backup",
|
||||
@ -38,7 +38,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let mut child = Command::new("/bin/systemctl")
|
||||
let mut child = Command::new("systemctl")
|
||||
.args(&["show", real_service_name])
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
@ -91,11 +91,45 @@ fn json_service_state(service: &str, status: Value) -> Value {
|
||||
Value::Null
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns a list of systemd services.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Service details.",
|
||||
properties: {
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
type: String,
|
||||
description: "systemd service name.",
|
||||
},
|
||||
desc: {
|
||||
type: String,
|
||||
description: "systemd service description.",
|
||||
},
|
||||
state: {
|
||||
type: String,
|
||||
description: "systemd service 'SubState'.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Service list.
|
||||
fn list_services(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut list = vec![];
|
||||
@ -115,21 +149,36 @@ fn list_services(
|
||||
Ok(Value::from(list))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read service properties.
|
||||
fn get_service_state(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
let service = service.as_str();
|
||||
|
||||
if !SERVICE_NAME_LIST.contains(&service) {
|
||||
bail!("unknown service name '{}'", service);
|
||||
}
|
||||
|
||||
let status = get_full_service_state(service)?;
|
||||
let status = get_full_service_state(&service)?;
|
||||
|
||||
Ok(json_service_state(service, status))
|
||||
Ok(json_service_state(&service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
@ -147,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let status = Command::new("/bin/systemctl")
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
@ -158,61 +207,117 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Start service.
|
||||
fn start_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("starting service {}", service);
|
||||
|
||||
run_service_command(service, "start")
|
||||
run_service_command(&service, "start")
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Stop service.
|
||||
fn stop_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
log::info!("stoping service {}", service);
|
||||
|
||||
run_service_command(service, "stop")
|
||||
run_service_command(&service, "stop")
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Retart service.
|
||||
fn restart_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("re-starting service {}", service);
|
||||
|
||||
if service == "proxmox-backup-proxy" {
|
||||
if &service == "proxmox-backup-proxy" {
|
||||
// special case, avoid aborting running tasks
|
||||
run_service_command(service, "reload")
|
||||
run_service_command(&service, "reload")
|
||||
} else {
|
||||
run_service_command(service, "restart")
|
||||
run_service_command(&service, "restart")
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Reload service.
|
||||
fn reload_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("reloading service {}", service);
|
||||
|
||||
run_service_command(service, "reload")
|
||||
run_service_command(&service, "reload")
|
||||
}
|
||||
|
||||
|
||||
@ -221,111 +326,33 @@ const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
|
||||
.schema();
|
||||
|
||||
#[sortable]
|
||||
const SERVICE_SUBDIRS: SubdirMap = &[
|
||||
const SERVICE_SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"reload", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&reload_service),
|
||||
&ObjectSchema::new(
|
||||
"Reload service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_RELOAD_SERVICE)
|
||||
),
|
||||
(
|
||||
"restart", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&restart_service),
|
||||
&ObjectSchema::new(
|
||||
"Restart service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_RESTART_SERVICE)
|
||||
),
|
||||
(
|
||||
"start", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&start_service),
|
||||
&ObjectSchema::new(
|
||||
"Start service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_START_SERVICE)
|
||||
),
|
||||
(
|
||||
"state", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_service_state),
|
||||
&ObjectSchema::new(
|
||||
"Read service properties.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_GET_SERVICE_STATE)
|
||||
),
|
||||
(
|
||||
"stop", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_service),
|
||||
&ObjectSchema::new(
|
||||
"Stop service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_STOP_SERVICE)
|
||||
),
|
||||
];
|
||||
]);
|
||||
|
||||
const SERVICE_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SERVICE_SUBDIRS))
|
||||
.subdirs(SERVICE_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_services),
|
||||
&ObjectSchema::new(
|
||||
"Service list.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ArraySchema::new(
|
||||
"Returns a list of systemd services.",
|
||||
&ObjectSchema::new(
|
||||
"Service details.",
|
||||
&sorted!([
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
("name", false, &StringSchema::new("systemd service name.").schema()),
|
||||
("desc", false, &StringSchema::new("systemd service description.").schema()),
|
||||
("state", false, &StringSchema::new("systemd service 'SubState'.").schema()),
|
||||
]),
|
||||
).schema()
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_LIST_SERVICES)
|
||||
.match_all("service", &SERVICE_ROUTER);
|
||||
|
||||
|
@ -1,12 +1,15 @@
|
||||
use failure::*;
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -44,7 +47,10 @@ use crate::api2::types::*;
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node memory, CPU and (root) disk usage
|
||||
fn get_usage(
|
||||
@ -55,6 +61,7 @@ fn get_usage(
|
||||
|
||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
@ -63,15 +70,57 @@ fn get_usage(
|
||||
"free": meminfo.memfree,
|
||||
},
|
||||
"cpu": kstat.cpu,
|
||||
"root": {
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
pub const USAGE_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_USAGE);
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
command: {
|
||||
type: NodePowerCommand,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_POWER_MANAGEMENT, false),
|
||||
},
|
||||
)]
|
||||
/// Reboot or shutdown the node.
|
||||
fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
|
||||
let systemctl_command = match command {
|
||||
NodePowerCommand::Reboot => "reboot",
|
||||
NodePowerCommand::Shutdown => "poweroff",
|
||||
};
|
||||
|
||||
let output = Command::new("systemctl")
|
||||
.arg(systemctl_command)
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||
|
||||
if !output.status.success() {
|
||||
match output.status.code() {
|
||||
Some(code) => {
|
||||
let msg = String::from_utf8(output.stderr)
|
||||
.map(|m| if m.is_empty() { String::from("no error message") } else { m })
|
||||
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
|
||||
bail!("diff failed with status code: {} - {}", code, msg);
|
||||
}
|
||||
None => bail!("systemctl terminated by signal"),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("usage", &USAGE_ROUTER),
|
||||
];
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
.get(&API_METHOD_GET_USAGE)
|
||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||
|
@ -1,11 +1,12 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
fn dump_journal(
|
||||
start: Option<u64>,
|
||||
@ -26,7 +27,7 @@ fn dump_journal(
|
||||
let start = start.unwrap_or(0);
|
||||
let mut count: u64 = 0;
|
||||
|
||||
let mut child = Command::new("/bin/journalctl")
|
||||
let mut child = Command::new("journalctl")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
@ -122,12 +123,15 @@ fn dump_journal(
|
||||
}
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read syslog entries.
|
||||
fn get_syslog(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (count, lines) = dump_journal(
|
||||
@ -137,7 +141,7 @@ fn get_syslog(
|
||||
param["until"].as_str(),
|
||||
param["service"].as_str())?;
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
Ok(json!(lines))
|
||||
}
|
||||
|
@ -1,26 +1,96 @@
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{self, UPID};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
fn get_task_status(
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Task status nformation.",
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
pid: {
|
||||
type: i64,
|
||||
description: "The Unix PID.",
|
||||
},
|
||||
pstart: {
|
||||
type: u64,
|
||||
description: "The Unix process start time from `/proc/pid/stat`",
|
||||
},
|
||||
starttime: {
|
||||
type: i64,
|
||||
description: "The task start time (Epoch)",
|
||||
},
|
||||
"type": {
|
||||
type: String,
|
||||
description: "Worker type (arbitrary ASCII string)",
|
||||
},
|
||||
id: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "Worker ID (arbitrary ASCII string)",
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
description: "The user who started the task.",
|
||||
},
|
||||
status: {
|
||||
type: String,
|
||||
description: "'running' or 'stopped'",
|
||||
},
|
||||
exitstatus: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "'OK', 'Error: <msg>', or 'unkwown'.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Get task status.
|
||||
async fn get_task_status(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let mut result = json!({
|
||||
"upid": param["upid"],
|
||||
"node": upid.node,
|
||||
@ -32,7 +102,7 @@ fn get_task_status(
|
||||
"user": upid.username,
|
||||
});
|
||||
|
||||
if crate::server::worker_is_active(&upid) {
|
||||
if crate::server::worker_is_active(&upid).await? {
|
||||
result["status"] = Value::from("running");
|
||||
} else {
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
|
||||
@ -50,14 +120,54 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||
upid_str.parse::<UPID>()
|
||||
}
|
||||
|
||||
fn read_task_log(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
"test-status": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
description: "Test task status, and set result attribute \"active\" accordingly.",
|
||||
},
|
||||
start: {
|
||||
type: u64,
|
||||
optional: true,
|
||||
description: "Start at this line.",
|
||||
default: 0,
|
||||
},
|
||||
limit: {
|
||||
type: u64,
|
||||
optional: true,
|
||||
description: "Only list this amount of lines.",
|
||||
default: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read task log.
|
||||
async fn read_task_log(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||
|
||||
let start = param["start"].as_u64().unwrap_or(0);
|
||||
@ -89,28 +199,50 @@ fn read_task_log(
|
||||
}
|
||||
}
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
if test_status {
|
||||
let active = crate::server::worker_is_active(&upid);
|
||||
rpcenv.set_result_attrib("active", Value::from(active));
|
||||
let active = crate::server::worker_is_active(&upid).await?;
|
||||
rpcenv["active"] = Value::from(active);
|
||||
}
|
||||
|
||||
Ok(json!(lines))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can stop there own tasks, or need Sys.Modify on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Try to stop a task.
|
||||
fn stop_task(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
if crate::server::worker_is_active(&upid) {
|
||||
server::abort_worker_async(upid);
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
}
|
||||
|
||||
server::abort_worker_async(upid);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
@ -140,11 +272,13 @@ fn stop_task(
|
||||
type: bool,
|
||||
description: "Only list running tasks.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
errors: {
|
||||
type: bool,
|
||||
description: "Only list erroneous tasks.",
|
||||
optional:true,
|
||||
default: false,
|
||||
},
|
||||
userfilter: {
|
||||
optional:true,
|
||||
@ -158,18 +292,26 @@ fn stop_task(
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
start: u64,
|
||||
limit: u64,
|
||||
errors: bool,
|
||||
running: bool,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let start = param["start"].as_u64().unwrap_or(0);
|
||||
let limit = param["limit"].as_u64().unwrap_or(50);
|
||||
let errors = param["errors"].as_bool().unwrap_or(false);
|
||||
let running = param["running"].as_bool().unwrap_or(false);
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
let store = param["store"].as_str();
|
||||
|
||||
@ -181,19 +323,9 @@ pub fn list_tasks(
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for info in list.iter() {
|
||||
let mut entry = TaskListItem {
|
||||
upid: info.upid_str.clone(),
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type.clone(),
|
||||
worker_id: info.upid.worker_id.clone(),
|
||||
user: info.upid.username.clone(),
|
||||
endtime: None,
|
||||
status: None,
|
||||
};
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
@ -223,9 +355,6 @@ pub fn list_tasks(
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
}
|
||||
|
||||
entry.endtime = Some(state.0);
|
||||
entry.status = Some(state.1.clone());
|
||||
}
|
||||
|
||||
if (count as u64) < start {
|
||||
@ -235,82 +364,31 @@ pub fn list_tasks(
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if (result.len() as u64) < limit { result.push(entry); };
|
||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||
}
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const UPID_API_SUBDIRS: SubdirMap = &[
|
||||
const UPID_API_SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"log", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&read_task_log),
|
||||
&ObjectSchema::new(
|
||||
"Read task log.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
( "test-status",
|
||||
true,
|
||||
&BooleanSchema::new(
|
||||
"Test task status, and set result attribute \"active\" accordingly."
|
||||
).schema()
|
||||
),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
("start", true, &IntegerSchema::new("Start at this line.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
.schema()
|
||||
),
|
||||
("limit", true, &IntegerSchema::new("Only list this amount of lines.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_READ_TASK_LOG)
|
||||
),
|
||||
(
|
||||
"status", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_task_status),
|
||||
&ObjectSchema::new(
|
||||
"Get task status.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_GET_TASK_STATUS)
|
||||
)
|
||||
];
|
||||
]);
|
||||
|
||||
#[sortable]
|
||||
pub const UPID_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||
.delete(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_task),
|
||||
&ObjectSchema::new(
|
||||
"Try to stop a task.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.delete(&API_METHOD_STOP_TASK)
|
||||
.subdirs(&UPID_API_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TASKS)
|
||||
.match_all("upid", &UPID_API_ROUTER);
|
||||
|
@ -1,14 +1,11 @@
|
||||
use std::mem::{self, MaybeUninit};
|
||||
|
||||
use chrono::prelude::*;
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::tools::fs::{file_read_firstline, replace_file, CreateOptions};
|
||||
|
||||
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||
use crate::api2::types::*;
|
||||
|
||||
fn read_etc_localtime() -> Result<String, Error> {
|
||||
@ -18,34 +15,48 @@ fn read_etc_localtime() -> Result<String, Error> {
|
||||
}
|
||||
|
||||
// otherwise guess from the /etc/localtime symlink
|
||||
let mut buf = MaybeUninit::<[u8; 64]>::uninit();
|
||||
let len = unsafe {
|
||||
libc::readlink(
|
||||
"/etc/localtime".as_ptr() as *const _,
|
||||
buf.as_mut_ptr() as *mut _,
|
||||
mem::size_of_val(&buf),
|
||||
)
|
||||
};
|
||||
if len <= 0 {
|
||||
bail!("failed to guess timezone");
|
||||
}
|
||||
let len = len as usize;
|
||||
let buf = unsafe {
|
||||
(*buf.as_mut_ptr())[len] = 0;
|
||||
buf.assume_init()
|
||||
};
|
||||
let link = std::str::from_utf8(&buf[..len])?;
|
||||
let link = std::fs::read_link("/etc/localtime").
|
||||
map_err(|err| format_err!("failed to guess timezone - {}", err))?;
|
||||
|
||||
let link = link.to_string_lossy();
|
||||
match link.rfind("/zoneinfo/") {
|
||||
Some(pos) => Ok(link[(pos + 10)..].to_string()),
|
||||
None => Ok(link.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_time(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns server time and timezone.",
|
||||
properties: {
|
||||
timezone: {
|
||||
schema: TIME_ZONE_SCHEMA,
|
||||
},
|
||||
time: {
|
||||
type: i64,
|
||||
description: "Seconds since 1970-01-01 00:00:00 UTC.",
|
||||
minimum: 1_297_163_644,
|
||||
},
|
||||
localtime: {
|
||||
type: i64,
|
||||
description: "Seconds since 1970-01-01 00:00:00 UTC. (local time)",
|
||||
minimum: 1_297_163_644,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read server time and time zone settings.
|
||||
fn get_time(_param: Value) -> Result<Value, Error> {
|
||||
let datetime = Local::now();
|
||||
let offset = datetime.offset();
|
||||
let time = datetime.timestamp();
|
||||
@ -58,13 +69,28 @@ fn get_time(
|
||||
}))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
reload_timezone: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
timezone: {
|
||||
schema: TIME_ZONE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "time"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Set time zone
|
||||
fn set_timezone(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
timezone: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
let timezone = crate::tools::required_string_param(¶m, "timezone")?;
|
||||
|
||||
let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
|
||||
|
||||
if !path.exists() {
|
||||
@ -81,45 +107,6 @@ fn set_timezone(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_time),
|
||||
&ObjectSchema::new(
|
||||
"Read server time and time zone settings.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns server time and timezone.",
|
||||
&sorted!([
|
||||
("timezone", false, &StringSchema::new("Time zone").schema()),
|
||||
("time", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC.")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
("localtime", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC. (local time)")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&set_timezone),
|
||||
&ObjectSchema::new(
|
||||
"Set time zone.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("timezone", false, &StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
).protected(true).reload_timezone(true)
|
||||
);
|
||||
|
||||
.get(&API_METHOD_GET_TIME)
|
||||
.put(&API_METHOD_SET_TIMEZONE);
|
||||
|
417
src/api2/pull.rs
417
src/api2/pull.rs
@ -1,370 +1,65 @@
|
||||
//! Sync datastore from remote server
|
||||
use std::sync::{Arc};
|
||||
|
||||
use failure::*;
|
||||
use serde_json::json;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use chrono::{Utc, TimeZone};
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::backup::*;
|
||||
use crate::client::*;
|
||||
use crate::config::remote;
|
||||
use crate::backup::DataStore;
|
||||
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
|
||||
use crate::api2::types::*;
|
||||
|
||||
// fixme: implement filters
|
||||
// fixme: delete vanished groups
|
||||
// Todo: correctly lock backup groups
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
_worker: &WorkerTask,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
) -> Result<(), Error> {
|
||||
use crate::config::{
|
||||
remote,
|
||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(digest, false)?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn download_manifest(
|
||||
reader: &BackupReader,
|
||||
filename: &std::path::Path,
|
||||
) -> Result<std::fs::File, Error> {
|
||||
|
||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
||||
|
||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
Ok(tmp_manifest_file)
|
||||
}
|
||||
|
||||
async fn pull_single_archive(
|
||||
worker: &WorkerTask,
|
||||
reader: &BackupReader,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_name: &str,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
path.push(archive_name);
|
||||
|
||||
let mut tmp_path = path.clone();
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
worker.log(format!("sync archive {}", archive_name));
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
||||
|
||||
match archive_type(archive_name)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::Blob => { /* nothing to do */ }
|
||||
}
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_snapshot(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut manifest_name = tgt_store.base_path();
|
||||
manifest_name.push(snapshot.relative_path());
|
||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
|
||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
||||
tmp_manifest_blob.verify_crc()?;
|
||||
|
||||
if manifest_name.exists() {
|
||||
let manifest_blob = proxmox::try_block!({
|
||||
let mut manifest_file = std::fs::File::open(&manifest_name)
|
||||
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
|
||||
|
||||
let manifest_blob = DataBlob::load(&mut manifest_file)?;
|
||||
manifest_blob.verify_crc()?;
|
||||
Ok(manifest_blob)
|
||||
}).map_err(|err: Error| {
|
||||
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
|
||||
})?;
|
||||
|
||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
|
||||
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
||||
|
||||
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
|
||||
|
||||
for item in manifest.files() {
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
path.push(&item.filename);
|
||||
|
||||
if path.exists() {
|
||||
match archive_type(&item.filename)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = DynamicIndexReader::open(&path)?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::open(&path)?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
ArchiveType::Blob => {
|
||||
let mut tmpfile = std::fs::File::open(&path)?;
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pull_single_archive(
|
||||
worker,
|
||||
&reader,
|
||||
&mut chunk_reader,
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item.filename,
|
||||
).await?;
|
||||
}
|
||||
|
||||
if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
|
||||
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
||||
}
|
||||
|
||||
// cleanup - remove stale files
|
||||
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_snapshot_from(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?;
|
||||
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_group(
|
||||
worker: &WorkerTask,
|
||||
client: &HttpClient,
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
pub fn check_pull_privs(
|
||||
username: &str,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let args = json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
});
|
||||
|
||||
let mut result = client.get(&path, Some(args)).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
|
||||
|
||||
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
|
||||
|
||||
let auth_info = client.login().await?;
|
||||
let fingerprint = client.fingerprint();
|
||||
|
||||
let last_sync = tgt_store.last_successful_backup(group)?;
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let backup_time = Utc.timestamp(item.backup_time, 0);
|
||||
remote_snapshots.insert(backup_time);
|
||||
|
||||
if let Some(last_sync_time) = last_sync {
|
||||
if last_sync_time > backup_time { continue; }
|
||||
}
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(auth_info.ticket.clone()))
|
||||
.fingerprint(fingerprint.clone());
|
||||
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.user(), options)?;
|
||||
|
||||
let reader = BackupReader::start(
|
||||
new_client,
|
||||
None,
|
||||
src_repo.store(),
|
||||
&item.backup_type,
|
||||
&item.backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
}
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
let local_list = group.list_backups(&tgt_store.base_path())?;
|
||||
for info in local_list {
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
if remote_snapshots.contains(&backup_time) { continue; }
|
||||
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
|
||||
tgt_store.remove_backup_dir(&info.backup_dir)?;
|
||||
}
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_store(
|
||||
worker: &WorkerTask,
|
||||
client: &HttpClient,
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
pub async fn get_pull_parameters(
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
) -> Result<(HttpClient, BackupRepository, Arc<DataStore>), Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/groups", src_repo.store());
|
||||
let tgt_store = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let mut result = client.get(&path, None).await?;
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
|
||||
|
||||
let mut list: Vec<GroupListItem> = serde_json::from_value(result["data"].take())?;
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
list.sort_unstable_by(|a, b| {
|
||||
let type_order = a.backup_type.cmp(&b.backup_type);
|
||||
if type_order == std::cmp::Ordering::Equal {
|
||||
a.backup_id.cmp(&b.backup_id)
|
||||
} else {
|
||||
type_order
|
||||
}
|
||||
});
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let mut errors = false;
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
||||
|
||||
let mut new_groups = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||
if let Err(err) = pull_group(worker, client, src_repo, tgt_store.clone(), &group, delete).await {
|
||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
||||
errors = true;
|
||||
// do not stop here, instead continue
|
||||
}
|
||||
new_groups.insert(group);
|
||||
}
|
||||
|
||||
if delete {
|
||||
let result: Result<(), Error> = proxmox::try_block!({
|
||||
let local_groups = BackupGroup::list_groups(&tgt_store.base_path())?;
|
||||
for local_group in local_groups {
|
||||
if new_groups.contains(&local_group) { continue; }
|
||||
worker.log(format!("delete vanished group '{}/{}'", local_group.backup_type(), local_group.backup_id()));
|
||||
if let Err(err) = tgt_store.remove_backup_group(&local_group) {
|
||||
worker.log(err.to_string());
|
||||
errors = true;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("error during cleanup: {}", err));
|
||||
errors = true;
|
||||
};
|
||||
}
|
||||
|
||||
if errors {
|
||||
bail!("sync failed with some errors.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -379,54 +74,44 @@ pub async fn pull_store(
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
type: Boolean,
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
// Note: used parameters are no uri parameters, so we need to test inside function body
|
||||
description: r###"The user needs Datastore.Backup privilege on '/datastore/{store}',
|
||||
and needs to own the backup group. Remote.Read is required on '/remote/{remote}/{remote-store}'.
|
||||
The delete flag additionally requires the Datastore.Prune privilege on '/datastore/{store}'.
|
||||
"###,
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Sync store from other repository
|
||||
async fn pull (
|
||||
store: String,
|
||||
remote: String,
|
||||
remote_store: String,
|
||||
delete: Option<bool>,
|
||||
remove_vanished: Option<bool>,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
let delete = delete.unwrap_or(true);
|
||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", &remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
let _shared_store_lock = tgt_store.try_shared_chunk_store_lock()?;
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete).await?;
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
//use chrono::{Local, TimeZone};
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
use hyper::http::request::Parts;
|
||||
@ -7,7 +7,7 @@ use hyper::{Body, Response, StatusCode};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::http_err;
|
||||
|
||||
@ -15,6 +15,9 @@ use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -29,18 +32,16 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.schema()
|
||||
),
|
||||
("backup-id", false, &StringSchema::new("Backup ID.").schema()),
|
||||
("backup-time", false, &IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308)
|
||||
.schema()
|
||||
),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
|
||||
Some("The user needs Datastore.Read privilege on /datastore/{store}."),
|
||||
&Permission::Anybody
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_reader_protocol(
|
||||
@ -54,7 +55,12 @@ fn upgrade_to_backup_reader_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
@ -75,7 +81,6 @@ fn upgrade_to_backup_reader_protocol(
|
||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||
}
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
@ -127,7 +132,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
})
|
||||
.map_ok(move |_| env.log("reader finished sucessfully"))
|
||||
.map_ok(move |_| env.log("reader finished successfully"))
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
@ -183,26 +188,9 @@ fn download_file(
|
||||
path.push(env.backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let path2 = path.clone();
|
||||
let path3 = path.clone();
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
|
||||
env.log(format!("download {:?}", path3));
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,7 @@
|
||||
//use failure::*;
|
||||
//use anyhow::{bail, format_err, Error};
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
@ -16,7 +15,7 @@ use crate::server::formatter::*;
|
||||
#[derive(Clone)]
|
||||
pub struct ReaderEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: HashMap<String, Value>,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
@ -37,7 +36,7 @@ impl ReaderEnvironment {
|
||||
|
||||
|
||||
Self {
|
||||
result_attributes: HashMap::new(),
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
worker,
|
||||
@ -61,12 +60,12 @@ impl ReaderEnvironment {
|
||||
|
||||
impl RpcEnvironment for ReaderEnvironment {
|
||||
|
||||
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
||||
self.result_attributes.insert(name.into(), value);
|
||||
fn result_attrib_mut(&mut self) -> &mut Value {
|
||||
&mut self.result_attributes
|
||||
}
|
||||
|
||||
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
||||
self.result_attributes.get(name)
|
||||
fn result_attrib(&self) -> &Value {
|
||||
&self.result_attributes
|
||||
}
|
||||
|
||||
fn env_type(&self) -> RpcEnvironmentType {
|
||||
|
226
src/api2/status.rs
Normal file
226
src/api2/status.rs
Normal file
@ -0,0 +1,226 @@
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
ApiMethod,
|
||||
Permission,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
PRIV_SYS_AUDIT,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Lists the Status of the Datastores.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Status of a Datastore",
|
||||
type: Object,
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
total: {
|
||||
type: Integer,
|
||||
description: "The Size of the underlying storage in bytes",
|
||||
},
|
||||
used: {
|
||||
type: Integer,
|
||||
description: "The used bytes of the underlying storage",
|
||||
},
|
||||
avail: {
|
||||
type: Integer,
|
||||
description: "The available bytes of the underlying storage",
|
||||
},
|
||||
history: {
|
||||
type: Array,
|
||||
description: "A list of usages of the past (last Month).",
|
||||
items: {
|
||||
type: Number,
|
||||
description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
|
||||
}
|
||||
},
|
||||
"estimated-full-date": {
|
||||
type: Integer,
|
||||
optional: true,
|
||||
description: "Estimation of the UNIX epoch when the storage will be full.\
|
||||
This is calculated via a simple Linear Regression (Least Squares)\
|
||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||
If the estimate lies in the past, the usage is decreasing.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
|
||||
let mut entry = json!({
|
||||
"store": store,
|
||||
"total": status.total,
|
||||
"used": status.used,
|
||||
"avail": status.avail,
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
let total_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"total",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
let used_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"used",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
match (total_res, used_res) {
|
||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
since: {
|
||||
type: u64,
|
||||
description: "Only list tasks since this UNIX epoch.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of tasks.",
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
// TODO: replace with call that gets all task since 'since' epoch
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
|
||||
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -1,16 +1,39 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
fn get_subscription(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Subscription status.",
|
||||
properties: {
|
||||
status: {
|
||||
type: String,
|
||||
description: "'NotFound', 'active' or 'inactive'."
|
||||
},
|
||||
message: {
|
||||
type: String,
|
||||
description: "Human readable problem description.",
|
||||
},
|
||||
serverid: {
|
||||
type: String,
|
||||
description: "The unique server ID.",
|
||||
},
|
||||
url: {
|
||||
type: String,
|
||||
description: "URL to Web Shop.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read subscription info.
|
||||
fn get_subscription(_param: Value) -> Result<Value, Error> {
|
||||
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
Ok(json!({
|
||||
@ -22,9 +45,4 @@ fn get_subscription(
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_subscription),
|
||||
&ObjectSchema::new("Read subscription info.", &[])
|
||||
)
|
||||
);
|
||||
.get(&API_METHOD_GET_SUBSCRIPTION);
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
@ -25,11 +25,24 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
|
||||
const_regex!{
|
||||
pub IP_FORMAT_REGEX = IPRE!();
|
||||
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
|
||||
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
|
||||
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
|
||||
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
|
||||
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
|
||||
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
|
||||
|
||||
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
|
||||
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
|
||||
|
||||
@ -52,16 +65,30 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
|
||||
|
||||
pub const IP_V4_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_V4_REGEX);
|
||||
|
||||
pub const IP_V6_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_V6_REGEX);
|
||||
|
||||
pub const IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_FORMAT_REGEX);
|
||||
ApiStringFormat::Pattern(&IP_REGEX);
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
@ -87,10 +114,41 @@ pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PASSWORD_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const CIDR_V4_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_V4_REGEX);
|
||||
|
||||
pub const CIDR_V6_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_V6_REGEX);
|
||||
|
||||
pub const CIDR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||
|
||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(5)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
|
||||
"X509 certificate fingerprint (sha256)."
|
||||
@ -142,6 +200,68 @@ pub const THIRD_DNS_SERVER_SCHEMA: Schema =
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const IP_V4_SCHEMA: Schema =
|
||||
StringSchema::new("IPv4 address.")
|
||||
.format(&IP_V4_FORMAT)
|
||||
.max_length(15)
|
||||
.schema();
|
||||
|
||||
pub const IP_V6_SCHEMA: Schema =
|
||||
StringSchema::new("IPv6 address.")
|
||||
.format(&IP_V6_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const IP_SCHEMA: Schema =
|
||||
StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||
.format(&IP_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V4_SCHEMA: Schema =
|
||||
StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V4_FORMAT)
|
||||
.max_length(18)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V6_SCHEMA: Schema =
|
||||
StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V6_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_SCHEMA: Schema =
|
||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||
.format(&CIDR_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
||||
"Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
||||
"Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group")]))
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||
StringSchema::new("Backup archive name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
@ -149,7 +269,10 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||
|
||||
pub const BACKUP_TYPE_SCHEMA: Schema =
|
||||
StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("vm", "Virtual Machine Backup"),
|
||||
EnumEntry::new("ct", "Container Backup"),
|
||||
EnumEntry::new("host", "Host Backup")]))
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ID_SCHEMA: Schema =
|
||||
@ -172,12 +295,38 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.schema();
|
||||
@ -202,6 +351,17 @@ pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
@ -237,6 +397,9 @@ pub struct GroupListItem {
|
||||
pub backup_count: u64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -265,12 +428,69 @@ pub struct SnapshotListItem {
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Prune result.
|
||||
pub struct PruneListItem {
|
||||
pub backup_type: String, // enum
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// Keep snapshot
|
||||
pub keep: bool,
|
||||
}
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new(
|
||||
"Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new(
|
||||
"Number of hourly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new(
|
||||
"Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new(
|
||||
"Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new(
|
||||
"Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
"Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filename": {
|
||||
@ -283,6 +503,9 @@ pub struct SnapshotListItem {
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted (or empty if we do not have that info)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub encrypted: Option<bool>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -313,6 +536,10 @@ pub struct GarbageCollectionStatus {
|
||||
pub removed_bytes: u64,
|
||||
/// Number of removed chunks.
|
||||
pub removed_chunks: usize,
|
||||
/// Sum of pending bytes (pending removal - kept for safety).
|
||||
pub pending_bytes: u64,
|
||||
/// Number of pending chunks (pending removal - kept for safety).
|
||||
pub pending_chunks: usize,
|
||||
}
|
||||
|
||||
impl Default for GarbageCollectionStatus {
|
||||
@ -325,6 +552,8 @@ impl Default for GarbageCollectionStatus {
|
||||
disk_chunks: 0,
|
||||
removed_bytes: 0,
|
||||
removed_chunks: 0,
|
||||
pending_bytes: 0,
|
||||
pending_chunks: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -373,10 +602,245 @@ pub struct TaskListItem {
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Node Power command type.
|
||||
pub enum NodePowerCommand {
|
||||
/// Restart the server
|
||||
Reboot,
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Interface configuration method
|
||||
pub enum NetworkConfigMethod {
|
||||
/// Configuration is done manually using other tools
|
||||
Manual,
|
||||
/// Define interfaces with statically allocated addresses.
|
||||
Static,
|
||||
/// Obtain an address via DHCP
|
||||
DHCP,
|
||||
/// Define the loopback interface.
|
||||
Loopback,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[repr(u8)]
|
||||
/// Linux Bond Mode
|
||||
pub enum LinuxBondMode {
|
||||
/// Round-robin policy
|
||||
balance_rr = 0,
|
||||
/// Active-backup policy
|
||||
active_backup = 1,
|
||||
/// XOR policy
|
||||
balance_xor = 2,
|
||||
/// Broadcast policy
|
||||
broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
//#[serde(rename = "802.3ad")]
|
||||
ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
balance_tlb = 5,
|
||||
/// Adaptive load balancing
|
||||
balance_alb = 6,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Network interface type
|
||||
pub enum NetworkInterfaceType {
|
||||
/// Loopback
|
||||
Loopback,
|
||||
/// Physical Ethernet device
|
||||
Eth,
|
||||
/// Linux Bridge
|
||||
Bridge,
|
||||
/// Linux Bond
|
||||
Bond,
|
||||
/// Linux VLAN (eth.10)
|
||||
Vlan,
|
||||
/// Interface Alias (eth:1)
|
||||
Alias,
|
||||
/// Unknown interface type
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||
.format(&NETWORK_INTERFACE_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(libc::IFNAMSIZ-1)
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of network devices, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: NetworkInterfaceType,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
options: {
|
||||
description: "Option list (inet)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
options6: {
|
||||
description: "Option list (inet6)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet6, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Network Interface configuration
|
||||
pub struct Interface {
|
||||
/// Autostart interface
|
||||
#[serde(rename = "autostart")]
|
||||
pub autostart: bool,
|
||||
/// Interface is active (UP)
|
||||
pub active: bool,
|
||||
/// Interface name
|
||||
pub name: String,
|
||||
/// Interface type
|
||||
#[serde(rename = "type")]
|
||||
pub interface_type: NetworkInterfaceType,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub method: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub method6: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv4 address with netmask
|
||||
pub cidr: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv4 gateway
|
||||
pub gateway: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv6 address with netmask
|
||||
pub cidr6: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv6 gateway
|
||||
pub gateway6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||
pub options: Vec<String>,
|
||||
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||
pub options6: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comments: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comments6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// Maximum Transmission Unit
|
||||
pub mtu: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bridge_ports: Option<Vec<String>>,
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
}
|
||||
|
||||
// Regression tests
|
||||
|
||||
#[test]
|
||||
fn test_cert_fingerprint_schema() -> Result<(), Error> {
|
||||
fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
@ -391,7 +855,7 @@ fn test_cert_fingerprint_schema() -> Result<(), Error> {
|
||||
|
||||
for fingerprint in invalid_fingerprints.iter() {
|
||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
|
||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,7 +881,7 @@ fn test_cert_fingerprint_schema() -> Result<(), Error> {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proxmox_user_id_schema() -> Result<(), Error> {
|
||||
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = PROXMOX_USER_ID_SCHEMA;
|
||||
|
||||
@ -435,7 +899,7 @@ fn test_proxmox_user_id_schema() -> Result<(), Error> {
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -462,3 +926,31 @@ fn test_proxmox_user_id_schema() -> Result<(), Error> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[repr(u64)]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum RRDTimeFrameResolution {
|
||||
/// 1 min => last 70 minutes
|
||||
Hour = 60,
|
||||
/// 30 min => last 35 hours
|
||||
Day = 60*30,
|
||||
/// 3 hours => about 8 days
|
||||
Week = 60*180,
|
||||
/// 12 hours => last 35 days
|
||||
Month = 60*720,
|
||||
/// 1 week => last 490 days
|
||||
Year = 60*10080,
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
|
||||
pub const PROXMOX_PKG_VERSION: &str =
|
||||
@ -31,6 +31,6 @@ pub const ROUTER: Router = Router::new()
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_version),
|
||||
&ObjectSchema::new("Proxmox Backup Server API version.", &[])
|
||||
)
|
||||
).access(None, &Permission::Anybody)
|
||||
);
|
||||
|
||||
|
148
src/auth.rs
Normal file
148
src/auth.rs
Normal file
@ -0,0 +1,148 @@
|
||||
//! Proxmox Backup Server Authentication
|
||||
//!
|
||||
//! This library contains helper to authenticate users.
|
||||
|
||||
use std::process::{Command, Stdio};
|
||||
use std::io::Write;
|
||||
use std::ffi::{CString, CStr};
|
||||
|
||||
use base64;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
|
||||
pub trait ProxmoxAuthenticator {
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct PAM();
|
||||
|
||||
impl ProxmoxAuthenticator for PAM {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials(username, password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let mut child = Command::new("passwd")
|
||||
.arg(username)
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
|
||||
|
||||
// Note: passwd reads password twice from stdin (for verify)
|
||||
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
||||
|
||||
let output = child.wait_with_output()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PBS();
|
||||
|
||||
pub fn crypt(password: &[u8], salt: &str) -> Result<String, Error> {
|
||||
|
||||
#[link(name="crypt")]
|
||||
extern "C" {
|
||||
#[link_name = "crypt"]
|
||||
fn __crypt(key: *const libc::c_char, salt: *const libc::c_char) -> * mut libc::c_char;
|
||||
}
|
||||
|
||||
let salt = CString::new(salt)?;
|
||||
let password = CString::new(password)?;
|
||||
|
||||
let res = unsafe {
|
||||
CStr::from_ptr(
|
||||
__crypt(
|
||||
password.as_c_str().as_ptr(),
|
||||
salt.as_c_str().as_ptr()
|
||||
)
|
||||
)
|
||||
};
|
||||
Ok(String::from(res.to_str()?))
|
||||
}
|
||||
|
||||
|
||||
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
|
||||
|
||||
let salt = proxmox::sys::linux::random_data(8)?;
|
||||
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
|
||||
|
||||
crypt(password.as_bytes(), &salt)
|
||||
}
|
||||
|
||||
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
||||
let verify = crypt(password.as_bytes(), enc_password)?;
|
||||
if &verify != enc_password {
|
||||
bail!("invalid credentials");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
|
||||
|
||||
impl ProxmoxAuthenticator for PBS {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
match data[username].as_str() {
|
||||
None => bail!("no password set"),
|
||||
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let enc_password = encrypt_pw(password)?;
|
||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
data[username] = enc_password.into();
|
||||
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||
let options = proxmox::tools::fs::CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(nix::unistd::Gid::from_raw(0));
|
||||
|
||||
let data = serde_json::to_vec_pretty(&data)?;
|
||||
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
|
||||
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
|
||||
|
||||
if data.len() != 2 {
|
||||
bail!("userid '{}' has no realm", userid);
|
||||
}
|
||||
Ok((data[1].to_owned(), data[0].to_owned()))
|
||||
}
|
||||
|
||||
/// Lookup the autenticator for the specified realm
|
||||
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm {
|
||||
"pam" => Ok(Box::new(PAM())),
|
||||
"pbs" => Ok(Box::new(PBS())),
|
||||
_ => bail!("unknown realm '{}'", realm),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authenticate users
|
||||
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
|
||||
let (username, realm) = parse_userid(userid)?;
|
||||
|
||||
lookup_authenticator(&realm)?
|
||||
.authenticate_user(&username, password)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use openssl::rsa::{Rsa};
|
||||
@ -10,6 +10,8 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
@ -29,8 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
username: &str,
|
||||
) -> String {
|
||||
|
||||
let epoch = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs() as i64;
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
|
||||
@ -67,8 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH)?.as_secs() as i64;
|
||||
let now = epoch_now_u64()? as i64;
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -103,7 +103,7 @@
|
||||
//!
|
||||
//! Not sure if this is better. TODO
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
// Note: .pcat1 => Proxmox Catalog Format version 1
|
||||
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
|
||||
@ -198,5 +198,11 @@ pub use prune::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod verify;
|
||||
pub use verify::*;
|
||||
|
||||
mod catalog_shell;
|
||||
pub use catalog_shell::*;
|
||||
|
||||
mod async_index_reader;
|
||||
pub use async_index_reader::*;
|
||||
|
127
src/backup/async_index_reader.rs
Normal file
127
src/backup/async_index_reader.rs
Normal file
@ -0,0 +1,127 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
// ok because the only public interfaces operates on &mut Self
|
||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||
|
||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
pub fn new(index: I, store: S) -> Self {
|
||||
Self {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024*1024),
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
||||
S: AsyncReadChunk + Unpin + 'static,
|
||||
I: IndexFile + Unpin
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<tokio::io::Result<usize>> {
|
||||
let this = Pin::get_mut(self);
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
continue;
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
let mut store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||
},
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
|
||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||
},
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.store = Some(store);
|
||||
},
|
||||
Err(err) => {
|
||||
return Poll::Ready(Err(io_err_other(err)));
|
||||
},
|
||||
};
|
||||
},
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
} else {
|
||||
buf.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
use crate::tools;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
@ -59,17 +59,6 @@ impl BackupGroup {
|
||||
&self.backup_id
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn group_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = PathBuf::new();
|
||||
@ -152,6 +141,31 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.backup_type();
|
||||
let id = self.backup_id();
|
||||
write!(f, "{}/{}", backup_type, id)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupGroup {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a backup group path
|
||||
///
|
||||
/// This parses strings like `vm/100".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
@ -188,16 +202,6 @@ impl BackupDir {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
@ -212,6 +216,31 @@ impl BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a snapshot path
|
||||
///
|
||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
@ -1,23 +1,21 @@
|
||||
use failure::*;
|
||||
use std::fmt;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||
use crate::tools::runtime::block_on;
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Copy,Clone,PartialEq)]
|
||||
enum CatalogEntryType {
|
||||
pub(crate) enum CatalogEntryType {
|
||||
Directory = b'd',
|
||||
File = b'f',
|
||||
Symlink = b'l',
|
||||
@ -46,6 +44,21 @@ impl TryFrom<u8> for CatalogEntryType {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DirEntryAttribute> for CatalogEntryType {
|
||||
fn from(value: &DirEntryAttribute) -> Self {
|
||||
match value {
|
||||
DirEntryAttribute::Directory { .. } => CatalogEntryType::Directory,
|
||||
DirEntryAttribute::File { .. } => CatalogEntryType::File,
|
||||
DirEntryAttribute::Symlink => CatalogEntryType::Symlink,
|
||||
DirEntryAttribute::Hardlink => CatalogEntryType::Hardlink,
|
||||
DirEntryAttribute::BlockDevice => CatalogEntryType::BlockDevice,
|
||||
DirEntryAttribute::CharDevice => CatalogEntryType::CharDevice,
|
||||
DirEntryAttribute::Fifo => CatalogEntryType::Fifo,
|
||||
DirEntryAttribute::Socket => CatalogEntryType::Socket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CatalogEntryType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", char::from(*self as u8))
|
||||
@ -63,7 +76,7 @@ pub struct DirEntry {
|
||||
}
|
||||
|
||||
/// Used to specific additional attributes inside DirEntry
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
@ -106,6 +119,23 @@ impl DirEntry {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get file mode bits for this entry to be used with the `MatchList` api.
|
||||
pub fn get_file_mode(&self) -> Option<u32> {
|
||||
Some(
|
||||
match self.attr {
|
||||
DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
|
||||
DirEntryAttribute::File { .. } => pxar::mode::IFREG,
|
||||
DirEntryAttribute::Symlink => pxar::mode::IFLNK,
|
||||
DirEntryAttribute::Hardlink => return None,
|
||||
DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
|
||||
DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
|
||||
DirEntryAttribute::Fifo => pxar::mode::IFIFO,
|
||||
DirEntryAttribute::Socket => pxar::mode::IFSOCK,
|
||||
}
|
||||
as u32
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if DirEntry is a directory
|
||||
pub fn is_directory(&self) -> bool {
|
||||
match self.attr {
|
||||
@ -383,32 +413,6 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
// fixme: move to somehere else?
|
||||
/// Implement Write to tokio mpsc channel Sender
|
||||
pub struct SenderWriter(tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>);
|
||||
|
||||
impl SenderWriter {
|
||||
pub fn new(sender: tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>) -> Self {
|
||||
Self(sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for SenderWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
block_on(async move {
|
||||
self.0
|
||||
.send(Ok(buf.to_vec()))
|
||||
.await
|
||||
.map_err(io_err_other)
|
||||
.and(Ok(buf.len()))
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Catalog files
|
||||
pub struct CatalogReader<R> {
|
||||
reader: R,
|
||||
@ -476,7 +480,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
&mut self,
|
||||
parent: &DirEntry,
|
||||
filename: &[u8],
|
||||
) -> Result<DirEntry, Error> {
|
||||
) -> Result<Option<DirEntry>, Error> {
|
||||
|
||||
let start = match parent.attr {
|
||||
DirEntryAttribute::Directory { start } => start,
|
||||
@ -496,10 +500,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
Ok(false) // stop parsing
|
||||
})?;
|
||||
|
||||
match item {
|
||||
None => bail!("no such file"),
|
||||
Some(entry) => Ok(entry),
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
/// Read the raw directory info block from current reader position.
|
||||
@ -532,7 +533,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local.timestamp(mtime as i64, 0);
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
@ -555,38 +559,30 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
/// provided callback on them.
|
||||
pub fn find(
|
||||
&mut self,
|
||||
mut entry: &mut Vec<DirEntry>,
|
||||
pattern: &[MatchPatternSlice],
|
||||
callback: &Box<fn(&[DirEntry])>,
|
||||
parent: &DirEntry,
|
||||
file_path: &mut Vec<u8>,
|
||||
match_list: &impl MatchList, //&[MatchEntry],
|
||||
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
let parent = entry.last().unwrap();
|
||||
if !parent.is_directory() {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
let file_len = file_path.len();
|
||||
for e in self.read_dir(parent)? {
|
||||
match MatchPatternSlice::match_filename_include(
|
||||
&CString::new(e.name.clone())?,
|
||||
e.is_directory(),
|
||||
pattern,
|
||||
)? {
|
||||
(MatchType::Positive, _) => {
|
||||
entry.push(e);
|
||||
callback(&entry);
|
||||
let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
|
||||
let child_pattern = vec![pattern.as_slice()];
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
(MatchType::PartialPositive, child_pattern)
|
||||
| (MatchType::PartialNegative, child_pattern) => {
|
||||
entry.push(e);
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
_ => {}
|
||||
let is_dir = e.is_directory();
|
||||
file_path.truncate(file_len);
|
||||
if !e.name.starts_with(b"/") {
|
||||
file_path.reserve(e.name.len() + 1);
|
||||
file_path.push(b'/');
|
||||
}
|
||||
file_path.extend(&e.name);
|
||||
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||
Some(MatchType::Exclude) => continue,
|
||||
Some(MatchType::Include) => callback(&file_path)?,
|
||||
None => (),
|
||||
}
|
||||
if is_dir {
|
||||
self.find(&e, file_path, match_list, callback)?;
|
||||
}
|
||||
}
|
||||
file_path.truncate(file_len);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::Read;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
use std::io::Write;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use super::CryptConfig;
|
||||
use crate::tools::borrow::Tied;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::io::Write;
|
||||
@ -157,8 +157,8 @@ impl ChunkStore {
|
||||
|
||||
let (chunk_path, _digest_str) = self.chunk_path(digest);
|
||||
|
||||
const UTIME_NOW: i64 = ((1 << 30) - 1);
|
||||
const UTIME_OMIT: i64 = ((1 << 30) - 2);
|
||||
const UTIME_NOW: i64 = (1 << 30) - 1;
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
|
||||
@ -289,9 +289,9 @@ impl ChunkStore {
|
||||
|
||||
pub fn sweep_unused_chunks(
|
||||
&self,
|
||||
oldest_writer: Option<i64>,
|
||||
oldest_writer: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
|
||||
@ -299,10 +299,8 @@ impl ChunkStore {
|
||||
|
||||
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
|
||||
|
||||
if let Some(stamp) = oldest_writer {
|
||||
if stamp < min_atime {
|
||||
min_atime = stamp;
|
||||
}
|
||||
if oldest_writer < min_atime {
|
||||
min_atime = oldest_writer;
|
||||
}
|
||||
|
||||
min_atime -= 300; // add 5 mins gap for safety
|
||||
@ -316,6 +314,7 @@ impl ChunkStore {
|
||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
let (dirfd, entry) = match entry {
|
||||
@ -338,10 +337,9 @@ impl ChunkStore {
|
||||
let lock = self.mutex.lock();
|
||||
|
||||
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
let age = now - stat.st_atime;
|
||||
//println!("FOUND {} {:?}", age/(3600*24), filename);
|
||||
if stat.st_atime < min_atime {
|
||||
println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
//let age = now - stat.st_atime;
|
||||
//println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
|
||||
if res != 0 {
|
||||
let err = nix::Error::last();
|
||||
@ -354,9 +352,14 @@ impl ChunkStore {
|
||||
}
|
||||
status.removed_chunks += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
status.disk_chunks += 1;
|
||||
status.disk_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
if stat.st_atime < oldest_writer {
|
||||
status.pending_chunks += 1;
|
||||
status.pending_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
status.disk_chunks += 1;
|
||||
status.disk_bytes += stat.st_size as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(lock);
|
||||
@ -426,6 +429,10 @@ impl ChunkStore {
|
||||
full_path
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.base.clone()
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::BytesMut;
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::ready;
|
||||
use futures::stream::{Stream, TryStream};
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
//! See the Wikipedia Artikel for [Authenticated
|
||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||
//! for a short introduction.
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufRead};
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::Write;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::convert::TryInto;
|
||||
|
||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
@ -167,7 +167,7 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
@ -311,7 +311,9 @@ impl DataBlob {
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is noth possible for encrypted chunks.
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -320,22 +322,18 @@ impl DataBlob {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
let verify_raw_data = |data: &[u8]| {
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[12..], 16*1024*1024)?;
|
||||
verify_raw_data(&data)?;
|
||||
} else if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
verify_raw_data(&self.raw_data[12..])?;
|
||||
let data = self.decode(None)?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufReader};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -19,6 +19,10 @@ pub struct DataBlobReader<R: Read> {
|
||||
state: BlobReaderState<R>,
|
||||
}
|
||||
|
||||
// zstd_safe::DCtx is not sync but we are, since
|
||||
// the only public interface is on mutable reference
|
||||
unsafe impl<R: Read> Sync for DataBlobReader<R> {}
|
||||
|
||||
impl <R: Read> DataBlobReader<R> {
|
||||
|
||||
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use proxmox::tools::io::WriteExt;
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io;
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
@ -11,7 +12,7 @@ use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, BackupManifest};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
@ -134,6 +135,10 @@ impl DataStore {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
self.chunk_store.name()
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
@ -149,6 +154,7 @@ impl DataStore {
|
||||
|
||||
let mut wanted_files = HashSet::new();
|
||||
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
||||
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
|
||||
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
|
||||
|
||||
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
|
||||
@ -236,18 +242,80 @@ impl DataStore {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the user who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.group_path());
|
||||
full_path.push("owner");
|
||||
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
||||
Ok(owner.trim_end().to_string()) // remove trailing newline
|
||||
}
|
||||
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_group.group_path());
|
||||
path.push("owner");
|
||||
|
||||
let mut open_options = std::fs::OpenOptions::new();
|
||||
open_options.write(true);
|
||||
open_options.truncate(true);
|
||||
|
||||
if force {
|
||||
open_options.create(true);
|
||||
} else {
|
||||
open_options.create_new(true);
|
||||
}
|
||||
|
||||
let mut file = open_options.open(&path)
|
||||
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
||||
|
||||
write!(file, "{}\n", userid)
|
||||
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a backup group if it does not already exists.
|
||||
///
|
||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||
/// current owner (instead of setting the owner).
|
||||
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
|
||||
|
||||
// create intermediate path first:
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_dir.group().group_path());
|
||||
let base_path = self.base_path();
|
||||
|
||||
let mut full_path = base_path.clone();
|
||||
full_path.push(backup_group.backup_type());
|
||||
std::fs::create_dir_all(&full_path)?;
|
||||
|
||||
full_path.push(backup_group.backup_id());
|
||||
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => {
|
||||
self.set_owner(backup_group, userid, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
}
|
||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new backup snapshot inside a BackupGroup
|
||||
///
|
||||
/// The BackupGroup directory needs to exist.
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
let relative_path = backup_dir.relative_path();
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(&relative_path);
|
||||
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => Ok((relative_path, true)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
|
||||
@ -290,12 +358,14 @@ impl DataStore {
|
||||
index: I,
|
||||
file_name: &Path, // only used for error reporting
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
status.index_file_count += 1;
|
||||
status.index_data_bytes += index.index_bytes();
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||
@ -306,21 +376,22 @@ impl DataStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
|
||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex {
|
||||
let index = self.open_fixed_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
let index = self.open_dynamic_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -332,26 +403,36 @@ impl DataStore {
|
||||
self.last_gc_status.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
|
||||
pub fn garbage_collection_running(&self) -> bool {
|
||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
||||
|
||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer();
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
||||
|
||||
let mut gc_status = GarbageCollectionStatus::default();
|
||||
gc_status.upid = Some(worker.to_string());
|
||||
|
||||
worker.log("Start GC phase1 (mark used chunks)");
|
||||
|
||||
self.mark_used_chunks(&mut gc_status)?;
|
||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||
|
||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, worker.clone())?;
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
|
||||
|
||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
||||
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
@ -394,4 +475,28 @@ impl DataStore {
|
||||
) -> Result<(bool, u64), Error> {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,28 @@
|
||||
use std::convert::TryInto;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
||||
use std::io::{self, BufWriter, Seek, SeekFrom, Write};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use super::chunk_stat::ChunkStat;
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::index::ChunkReadInfo;
|
||||
use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools;
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
@ -36,34 +41,34 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
end_le: u64,
|
||||
digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl DynamicEntry {
|
||||
#[inline]
|
||||
pub fn end(&self) -> u64 {
|
||||
u64::from_le(self.end_le)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicIndexReader {
|
||||
_file: File,
|
||||
pub size: usize,
|
||||
index: *const u8,
|
||||
index_entries: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
// FIXME: Introduce an mmap wrapper type for this?
|
||||
unsafe impl Send for DynamicIndexReader {}
|
||||
unsafe impl Sync for DynamicIndexReader {}
|
||||
|
||||
impl Drop for DynamicIndexReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(err) = self.unmap() {
|
||||
eprintln!("Unable to unmap dynamic index - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DynamicIndexReader {
|
||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||
File::open(path)
|
||||
.map_err(Error::from)
|
||||
.and_then(|file| Self::new(file))
|
||||
.and_then(Self::new)
|
||||
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
||||
}
|
||||
|
||||
@ -74,6 +79,7 @@ impl DynamicIndexReader {
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
@ -93,123 +99,49 @@ impl DynamicIndexReader {
|
||||
let size = stat.st_size as usize;
|
||||
|
||||
let index_size = size - header_size;
|
||||
if (index_size % 40) != 0 {
|
||||
let index_count = index_size / 40;
|
||||
if index_count * 40 != index_size {
|
||||
bail!("got unexpected file size");
|
||||
}
|
||||
|
||||
let data = unsafe {
|
||||
nix::sys::mman::mmap(
|
||||
std::ptr::null_mut(),
|
||||
index_size,
|
||||
let index = unsafe {
|
||||
Mmap::map_fd(
|
||||
rawfd,
|
||||
header_size as u64,
|
||||
index_count,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||
rawfd,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *const u8;
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
_file: file,
|
||||
size,
|
||||
index: data,
|
||||
index_entries: index_size / 40,
|
||||
index,
|
||||
ctime,
|
||||
uuid: header.uuid,
|
||||
index_csum: header.index_csum,
|
||||
})
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = unsafe {
|
||||
nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, self.index_entries * 40)
|
||||
} {
|
||||
bail!("unmap dynamic index failed - {}", err);
|
||||
}
|
||||
|
||||
self.index = std::ptr::null_mut();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_entries {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = if pos == 0 {
|
||||
0
|
||||
} else {
|
||||
unsafe { *(self.index.add((pos - 1) * 40) as *const u64) }
|
||||
};
|
||||
|
||||
let end = unsafe { *(self.index.add(pos * 40) as *const u64) };
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 40 + 8),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
unsafe { *(self.index.add(pos * 40) as *const u64) }
|
||||
self.index[pos].end()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 40 + 8), 32) };
|
||||
slice.try_into().unwrap()
|
||||
&self.index[pos].digest
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_entries {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn dump_pxar(&self, mut writer: Box<dyn Write>) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..self.index_entries {
|
||||
let _end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
//println!("Dump {:08x}", end );
|
||||
let chunk = self.store.read_chunk(digest)?;
|
||||
// fimxe: handle encrypted chunks
|
||||
let data = chunk.decode(None)?;
|
||||
writer.write_all(&data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
*/
|
||||
|
||||
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||
fn binary_search(
|
||||
&self,
|
||||
start_idx: usize,
|
||||
@ -238,11 +170,11 @@ impl DynamicIndexReader {
|
||||
|
||||
impl IndexFile for DynamicIndexReader {
|
||||
fn index_count(&self) -> usize {
|
||||
self.index_entries
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||
@ -250,12 +182,59 @@ impl IndexFile for DynamicIndexReader {
|
||||
}
|
||||
|
||||
fn index_bytes(&self) -> u64 {
|
||||
if self.index_entries == 0 {
|
||||
if self.index.is_empty() {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end((self.index_entries - 1) as usize)
|
||||
self.chunk_end(self.index.len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index.len() {
|
||||
return None;
|
||||
}
|
||||
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||
|
||||
let end = self.index[pos].end();
|
||||
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
@ -266,7 +245,7 @@ pub struct BufferedDynamicReader<S> {
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
@ -274,16 +253,21 @@ struct ChunkCacher<'a, S> {
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, failure::Error> {
|
||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.chunk_end(index.index_entries - 1);
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
@ -301,7 +285,8 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let (start, end, data) = self.lru_cache.access(
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
@ -309,21 +294,13 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
if (*end - *start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(*end - *start),
|
||||
data.len()
|
||||
);
|
||||
}
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = *start;
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
@ -340,7 +317,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_entries)
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index.len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
@ -356,7 +333,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index_entries - 1;
|
||||
let end_idx = index.index.len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
@ -383,9 +360,7 @@ impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
@ -417,6 +392,49 @@ impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create dynamic index files (`.dixd`)
|
||||
pub struct DynamicIndexWriter {
|
||||
store: Arc<ChunkStore>,
|
||||
@ -460,9 +478,7 @@ impl DynamicIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use failure::*;
|
||||
use std::convert::TryInto;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::IndexFile;
|
||||
use crate::tools;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_length {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 32),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
||||
slice.try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_length {
|
||||
chunk_end = ((pos + 1) * self.chunk_size) as u64;
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
||||
fn index_bytes(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index_length {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let digest = self.index_digest(pos).unwrap();
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: *digest,
|
||||
})
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -290,9 +275,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -469,6 +452,18 @@ impl FixedIndexWriter {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clone_data_from(&mut self, reader: &FixedIndexReader) -> Result<(), Error> {
|
||||
if self.index_length != reader.index_count() {
|
||||
bail!("clone_data_from failed - index sizes not equal");
|
||||
}
|
||||
|
||||
for i in 0..self.index_length {
|
||||
self.add_digest(i, reader.index_digest(i).unwrap())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
@ -501,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let (start, end, digest) = index.chunk_info(idx)?;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&digest)?;
|
||||
|
||||
if (end - start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(end - start),
|
||||
data.len()
|
||||
);
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
@ -520,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = start as u64;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::ops::Range;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use failure::*;
|
||||
use futures::*;
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl ChunkReadInfo {
|
||||
#[inline]
|
||||
pub fn size(&self) -> u64 {
|
||||
self.range.end - self.range.start
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait to get digest list from index files
|
||||
///
|
||||
@ -13,6 +20,10 @@ pub trait IndexFile {
|
||||
fn index_count(&self) -> usize;
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
/// Returns most often used chunks
|
||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||
@ -46,111 +57,3 @@ pub trait IndexFile {
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode digest list from an `IndexFile` into a binary stream
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
pub struct DigestListEncoder {
|
||||
index: Box<dyn IndexFile + Send + Sync>,
|
||||
pos: usize,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl DigestListEncoder {
|
||||
|
||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
||||
let count = index.index_count();
|
||||
Self { index, pos: 0, count }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::io::Read for DigestListEncoder {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
if buf.len() < 32 {
|
||||
panic!("read buffer too small");
|
||||
}
|
||||
|
||||
if self.pos < self.count {
|
||||
let mut written = 0;
|
||||
loop {
|
||||
let digest = self.index.index_digest(self.pos).unwrap();
|
||||
buf[written..(written + 32)].copy_from_slice(digest);
|
||||
self.pos += 1;
|
||||
written += 32;
|
||||
if self.pos >= self.count {
|
||||
break;
|
||||
}
|
||||
if (written + 32) >= buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(written)
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
|
||||
pub struct DigestListDecoder<S: Unpin> {
|
||||
input: S,
|
||||
buffer: BytesMut,
|
||||
}
|
||||
|
||||
impl<S: Unpin> DigestListDecoder<S> {
|
||||
pub fn new(input: S) -> Self {
|
||||
Self { input, buffer: BytesMut::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
||||
|
||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
||||
where
|
||||
S: Stream<Item=Result<Bytes, E>>,
|
||||
E: Into<Error>,
|
||||
{
|
||||
type Item = Result<[u8; 32], Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.buffer.len() >= 32 {
|
||||
let left = this.buffer.split_to(32);
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
||||
}
|
||||
}
|
||||
|
||||
match Pin::new(&mut this.input).poll_next(cx) {
|
||||
Poll::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
Poll::Ready(Some(Err(err))) => {
|
||||
return Poll::Ready(Some(Err(err.into())));
|
||||
}
|
||||
Poll::Ready(Some(Ok(data))) => {
|
||||
this.buffer.extend_from_slice(&data);
|
||||
// continue
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
let rest = this.buffer.len();
|
||||
if rest == 0 {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
return Poll::Ready(Some(Err(format_err!(
|
||||
"got small digest ({} != 32).",
|
||||
rest,
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
@ -7,9 +7,11 @@ use serde_json::{json, Value};
|
||||
use crate::backup::BackupDir;
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub encrypted: Option<bool>,
|
||||
pub size: u64,
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
@ -47,9 +49,9 @@ impl BackupManifest {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32]) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum });
|
||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -72,7 +74,7 @@ impl BackupManifest {
|
||||
let info = self.lookup_file_info(name)?;
|
||||
|
||||
if size != info.size {
|
||||
bail!("wrong size for file '{}' ({} != {}", name, info.size, size);
|
||||
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
|
||||
}
|
||||
|
||||
if csum != &info.csum {
|
||||
@ -89,11 +91,18 @@ impl BackupManifest {
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
acc.push(json!({
|
||||
let mut value = json!({
|
||||
"filename": info.filename,
|
||||
"encrypted": info.encrypted,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
}));
|
||||
});
|
||||
|
||||
if let Some(encrypted) = info.encrypted {
|
||||
value["encrypted"] = encrypted.into();
|
||||
}
|
||||
|
||||
acc.push(value);
|
||||
acc
|
||||
})
|
||||
})
|
||||
@ -133,7 +142,8 @@ impl TryFrom<Value> for BackupManifest {
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
manifest.add_file(filename, size, csum)?;
|
||||
let encrypted = item["encrypted"].as_bool();
|
||||
manifest.add_file(filename, size, csum, encrypted)?;
|
||||
}
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
|
@ -1,38 +1,39 @@
|
||||
use failure::*;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::datastore::*;
|
||||
use super::crypt_config::*;
|
||||
use super::data_blob::*;
|
||||
use anyhow::Error;
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||
pub trait ReadChunk {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error>;
|
||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
Self { store, crypt_config }
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||
println!("READ CHUNK {}", digest_str);
|
||||
|
||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
@ -41,13 +42,59 @@ impl ReadChunk for LocalChunkReader {
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AsyncReadChunk: Send {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(async move{
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
let magic = blob.magic();
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_fixed_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_dynamic_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
///
|
||||
/// This checks all archives inside a backup snapshot.
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a backup group
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::*;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::*;
|
||||
use proxmox::api::cli::*;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::dump_api;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use futures::*;
|
||||
|
||||
use proxmox::try_block;
|
||||
@ -14,6 +14,8 @@ use proxmox_backup::config;
|
||||
use proxmox_backup::buildcfg;
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
@ -34,6 +36,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
}
|
||||
@ -45,7 +49,7 @@ async fn run() -> Result<(), Error> {
|
||||
let _ = csrf_secret(); // load with lazy_static
|
||||
|
||||
let config = server::ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED);
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
|
@ -1,13 +1,25 @@
|
||||
use failure::*;
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use std::os::unix::io::RawFd;
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use std::io::{self, Write, Seek, SeekFrom};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use serde_json::{json, Value};
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use tokio::sync::mpsc;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||
use proxmox::sys::linux::tty;
|
||||
@ -15,44 +27,48 @@ use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::backup::*;
|
||||
use proxmox_backup::pxar::{ self, catalog::* };
|
||||
|
||||
//use proxmox_backup::backup::image_index::*;
|
||||
//use proxmox_backup::config::datastore;
|
||||
//use proxmox_backup::pxar::encoder::*;
|
||||
//use proxmox_backup::backup::datastore::*;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
//use hyper::Body;
|
||||
use std::sync::{Arc, Mutex};
|
||||
//use regex::Regex;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use futures::*;
|
||||
use tokio::sync::mpsc;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
encrypt_key_with_passphrase,
|
||||
load_and_decrypt_key,
|
||||
store_key_config,
|
||||
verify_chunk_size,
|
||||
ArchiveType,
|
||||
AsyncReadChunk,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BackupManifest,
|
||||
BufferedDynamicReader,
|
||||
CatalogReader,
|
||||
CatalogWriter,
|
||||
CATALOG_NAME,
|
||||
ChunkStream,
|
||||
CryptConfig,
|
||||
DataBlob,
|
||||
DynamicIndexReader,
|
||||
FixedChunkStream,
|
||||
FixedIndexReader,
|
||||
IndexFile,
|
||||
KeyConfig,
|
||||
MANIFEST_BLOB_NAME,
|
||||
Shell,
|
||||
};
|
||||
|
||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||
|
||||
proxmox::const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||
.format(&BACKUP_REPO_URL)
|
||||
.max_length(256)
|
||||
.schema();
|
||||
|
||||
const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||
"Path to encryption key. All data will be encrypted using this key.")
|
||||
.schema();
|
||||
@ -245,18 +261,17 @@ async fn api_datastore_latest_snapshot(
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
|
||||
|
||||
async fn backup_directory<P: AsRef<Path>>(
|
||||
client: &BackupWriter,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
dir_path: P,
|
||||
archive_name: &str,
|
||||
chunk_size: Option<usize>,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
exclude_pattern: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
@ -284,7 +299,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
});
|
||||
|
||||
let stats = client
|
||||
.upload_stream(archive_name, stream, "dynamic", None, crypt_config)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -292,12 +307,12 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
|
||||
async fn backup_image<P: AsRef<Path>>(
|
||||
client: &BackupWriter,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
image_path: P,
|
||||
archive_name: &str,
|
||||
image_size: u64,
|
||||
chunk_size: Option<usize>,
|
||||
_verbose: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let path = image_path.as_ref().to_owned();
|
||||
@ -310,7 +325,7 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||
|
||||
let stats = client
|
||||
.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -412,8 +427,8 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let group = if let Some(path) = param["group"].as_str() {
|
||||
Some(BackupGroup::parse(path)?)
|
||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||
Some(path.parse()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -430,7 +445,11 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
Ok(tools::format::render_backup_file_list(&item.files))
|
||||
let mut filenames = Vec::new();
|
||||
for file in &item.files {
|
||||
filenames.push(file.filename.to_string());
|
||||
}
|
||||
Ok(tools::format::render_backup_file_list(&filenames[..]))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
@ -469,7 +488,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
@ -549,7 +568,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
@ -627,7 +646,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
@ -688,17 +707,8 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
|
||||
|
||||
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
||||
return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
|
||||
}
|
||||
bail!("unable to parse directory specification '{}'", value);
|
||||
}
|
||||
|
||||
fn spawn_catalog_upload(
|
||||
client: Arc<BackupWriter>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
client: Arc<BackupWriter>
|
||||
) -> Result<
|
||||
(
|
||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
@ -716,7 +726,7 @@ fn spawn_catalog_upload(
|
||||
|
||||
tokio::spawn(async move {
|
||||
let catalog_upload_result = client
|
||||
.upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
||||
.await;
|
||||
|
||||
if let Err(ref err) = catalog_upload_result {
|
||||
@ -790,7 +800,7 @@ fn spawn_catalog_upload(
|
||||
type: Integer,
|
||||
description: "Max number of entries to hold in memory.",
|
||||
optional: true,
|
||||
default: pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
},
|
||||
"verbose": {
|
||||
type: Boolean,
|
||||
@ -833,17 +843,19 @@ async fn create_backup(
|
||||
|
||||
let include_dev = param["include-dev"].as_array();
|
||||
|
||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
let entries_max = param["entries-max"].as_u64()
|
||||
.unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
|
||||
let empty = Vec::new();
|
||||
let arg_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
||||
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
|
||||
|
||||
let mut pattern_list = Vec::with_capacity(arg_pattern.len());
|
||||
for s in arg_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
let mut pattern_list = Vec::with_capacity(exclude_args.len());
|
||||
for entry in exclude_args {
|
||||
let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
pattern_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||
.map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
|
||||
);
|
||||
}
|
||||
|
||||
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||
@ -865,12 +877,10 @@ async fn create_backup(
|
||||
|
||||
let mut upload_list = vec![];
|
||||
|
||||
enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
|
||||
|
||||
let mut upload_catalog = false;
|
||||
|
||||
for backupspec in backupspec_list {
|
||||
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
|
||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
let filename = &spec.config_string;
|
||||
let target = &spec.archive_name;
|
||||
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
@ -878,19 +888,14 @@ async fn create_backup(
|
||||
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
||||
let file_type = metadata.file_type();
|
||||
|
||||
let extension = target.rsplit('.').next()
|
||||
.ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
|
||||
|
||||
match extension {
|
||||
"pxar" => {
|
||||
match spec.spec_type {
|
||||
BackupSpecificationType::PXAR => {
|
||||
if !file_type.is_dir() {
|
||||
bail!("got unexpected file type (expected directory)");
|
||||
}
|
||||
upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||
upload_catalog = true;
|
||||
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||
}
|
||||
"img" => {
|
||||
|
||||
BackupSpecificationType::IMAGE => {
|
||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||
bail!("got unexpected file type (expected file or block device)");
|
||||
}
|
||||
@ -899,22 +904,19 @@ async fn create_backup(
|
||||
|
||||
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
||||
|
||||
upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
||||
upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
||||
}
|
||||
"conf" => {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
if !file_type.is_file() {
|
||||
bail!("got unexpected file type (expected regular file)");
|
||||
}
|
||||
upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
"log" => {
|
||||
BackupSpecificationType::LOGFILE => {
|
||||
if !file_type.is_file() {
|
||||
bail!("got unexpected file type (expected regular file)");
|
||||
}
|
||||
upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
_ => {
|
||||
bail!("got unknown archive extension '{}'", extension);
|
||||
upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -951,8 +953,11 @@ async fn create_backup(
|
||||
}
|
||||
};
|
||||
|
||||
let is_encrypted = Some(crypt_config.is_some());
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
backup_type,
|
||||
&backup_id,
|
||||
@ -960,64 +965,79 @@ async fn create_backup(
|
||||
verbose,
|
||||
).await?;
|
||||
|
||||
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||
Some(Arc::new(previous_manifest))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
|
||||
let mut catalog = None;
|
||||
let mut catalog_result_tx = None;
|
||||
|
||||
for (backup_type, filename, target, size) in upload_list {
|
||||
match backup_type {
|
||||
BackupType::CONFIG => {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
BackupType::PXAR => {
|
||||
BackupSpecificationType::PXAR => {
|
||||
// start catalog upload on first use
|
||||
if catalog.is_none() {
|
||||
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
||||
catalog = Some(cat);
|
||||
catalog_result_tx = Some(res);
|
||||
}
|
||||
let catalog = catalog.as_ref().unwrap();
|
||||
|
||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||
let stats = backup_directory(
|
||||
&client,
|
||||
previous_manifest.clone(),
|
||||
&filename,
|
||||
&target,
|
||||
chunk_size_opt,
|
||||
devices.clone(),
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
crypt_config.clone(),
|
||||
catalog.clone(),
|
||||
pattern_list.clone(),
|
||||
entries_max as usize,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
catalog.lock().unwrap().end_directory()?;
|
||||
}
|
||||
BackupType::IMAGE => {
|
||||
BackupSpecificationType::IMAGE => {
|
||||
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = backup_image(
|
||||
&client,
|
||||
&filename,
|
||||
previous_manifest.clone(),
|
||||
&filename,
|
||||
&target,
|
||||
size,
|
||||
chunk_size_opt,
|
||||
verbose,
|
||||
crypt_config.clone(),
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finalize and upload catalog
|
||||
if upload_catalog {
|
||||
if let Some(catalog) = catalog {
|
||||
let mutex = Arc::try_unwrap(catalog)
|
||||
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
|
||||
let mut catalog = mutex.into_inner().unwrap();
|
||||
@ -1026,18 +1046,19 @@ async fn create_backup(
|
||||
|
||||
drop(catalog); // close upload stream
|
||||
|
||||
let stats = catalog_result_rx.await??;
|
||||
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
|
||||
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||
let stats = catalog_result_rx.await??;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
||||
let target = "rsa-encrypted.key";
|
||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
||||
.await?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
||||
|
||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||
/*
|
||||
@ -1055,7 +1076,7 @@ async fn create_backup(
|
||||
println!("Upload index.json to '{:?}'", repo);
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
||||
client
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
||||
.await?;
|
||||
|
||||
client.finish().await?;
|
||||
@ -1090,7 +1111,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
|
||||
result
|
||||
}
|
||||
|
||||
fn dump_image<W: Write>(
|
||||
async fn dump_image<W: Write>(
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
index: FixedIndexReader,
|
||||
@ -1110,7 +1131,7 @@ fn dump_image<W: Write>(
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let raw_data = chunk_reader.read_chunk(&digest)?;
|
||||
let raw_data = chunk_reader.read_chunk(&digest).await?;
|
||||
writer.write_all(&raw_data)?;
|
||||
bytes += raw_data.len();
|
||||
if verbose {
|
||||
@ -1135,6 +1156,18 @@ fn dump_image<W: Write>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
||||
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
||||
(name.into(), archive_type(name).unwrap())
|
||||
} else if name.ends_with(".pxar") {
|
||||
(format!("{}.didx", name), ArchiveType::DynamicIndex)
|
||||
} else if name.ends_with(".img") {
|
||||
(format!("{}.fidx", name), ArchiveType::FixedIndex)
|
||||
} else {
|
||||
(format!("{}.blob", name), ArchiveType::Blob)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -1187,10 +1220,10 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -1207,14 +1240,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else if archive_name.ends_with(".img") {
|
||||
format!("{}.fidx", archive_name)
|
||||
} else {
|
||||
format!("{}.blob", archive_name)
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
@ -1227,7 +1252,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
|
||||
if server_archive_name == MANIFEST_BLOB_NAME {
|
||||
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||
|
||||
if archive_name == MANIFEST_BLOB_NAME {
|
||||
let backup_index_data = manifest.into_json().to_string();
|
||||
if let Some(target) = target {
|
||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
||||
@ -1238,9 +1265,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if server_archive_name.ends_with(".blob") {
|
||||
} else if archive_type == ArchiveType::Blob {
|
||||
|
||||
let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
|
||||
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
||||
|
||||
if let Some(target) = target {
|
||||
let mut writer = std::fs::OpenOptions::new()
|
||||
@ -1257,9 +1284,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if server_archive_name.ends_with(".didx") {
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let index = client.download_dynamic_index(&manifest, &archive_name).await?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
@ -1268,18 +1295,19 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
if let Some(target) = target {
|
||||
|
||||
let feature_flags = pxar::flags::DEFAULT;
|
||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
eprintln!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
||||
|
||||
decoder.restore(Path::new(target), &Vec::new())?;
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&[],
|
||||
proxmox_backup::pxar::Flags::DEFAULT,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
)
|
||||
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
||||
} else {
|
||||
let mut writer = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
@ -1289,9 +1317,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
std::io::copy(&mut reader, &mut writer)
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
} else if server_archive_name.ends_with(".fidx") {
|
||||
} else if archive_type == ArchiveType::FixedIndex {
|
||||
|
||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||
let index = client.download_fixed_index(&manifest, &archive_name).await?;
|
||||
|
||||
let mut writer = if let Some(target) = target {
|
||||
std::fs::OpenOptions::new()
|
||||
@ -1307,10 +1335,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
||||
};
|
||||
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
||||
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar of .img)");
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -1345,7 +1370,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(snapshot)?;
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
@ -1390,6 +1415,12 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
("group", false, &StringSchema::new("Backup group.").schema()),
|
||||
], [
|
||||
("output-format", true, &OUTPUT_FORMAT),
|
||||
(
|
||||
"quiet",
|
||||
true,
|
||||
&BooleanSchema::new("Minimal output - only show removals.")
|
||||
.schema()
|
||||
),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
])
|
||||
)
|
||||
@ -1413,22 +1444,59 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||
|
||||
let group = tools::required_string_param(¶m, "group")?;
|
||||
let group = BackupGroup::parse(group)?;
|
||||
let group: BackupGroup = group.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let quiet = param["quiet"].as_bool().unwrap_or(false);
|
||||
|
||||
param.as_object_mut().unwrap().remove("repository");
|
||||
param.as_object_mut().unwrap().remove("group");
|
||||
param.as_object_mut().unwrap().remove("output-format");
|
||||
param.as_object_mut().unwrap().remove("quiet");
|
||||
|
||||
param["backup-type"] = group.backup_type().into();
|
||||
param["backup-id"] = group.backup_id().into();
|
||||
|
||||
let result = client.post(&path, Some(param)).await?;
|
||||
let mut result = client.post(&path, Some(param)).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
||||
Ok(match v.as_bool() {
|
||||
Some(true) => "keep",
|
||||
Some(false) => "remove",
|
||||
None => "unknown",
|
||||
}.to_string())
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.sortby("backup-type", false)
|
||||
.sortby("backup-id", false)
|
||||
.sortby("backup-time", false)
|
||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
||||
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
|
||||
;
|
||||
|
||||
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
|
||||
if quiet {
|
||||
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
||||
item["keep"].as_bool() == Some(false)
|
||||
}).map(|v| v.clone()).collect();
|
||||
data = list.into();
|
||||
}
|
||||
|
||||
format_and_print_result_full(&mut data, info, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -1610,9 +1678,9 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
||||
_ => return result,
|
||||
};
|
||||
|
||||
let snapshot = match param.get("snapshot") {
|
||||
let snapshot: BackupDir = match param.get("snapshot") {
|
||||
Some(path) => {
|
||||
match BackupDir::parse(path) {
|
||||
match path.parse() {
|
||||
Ok(v) => v,
|
||||
_ => return result,
|
||||
}
|
||||
@ -1852,7 +1920,9 @@ fn key_mgmt_cli() -> CliCommandMap {
|
||||
|
||||
const KDF_SCHEMA: Schema =
|
||||
StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
|
||||
.format(&ApiStringFormat::Enum(&["scrypt", "none"]))
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("scrypt", "SCrypt"),
|
||||
EnumEntry::new("none", "Do not encrypt the key")]))
|
||||
.default("scrypt")
|
||||
.schema();
|
||||
|
||||
@ -1946,6 +2016,48 @@ fn mount(
|
||||
}
|
||||
}
|
||||
|
||||
use proxmox_backup::client::RemoteChunkReader;
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
struct BufferedDynamicReadAt {
|
||||
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
||||
}
|
||||
|
||||
impl BufferedDynamicReadAt {
|
||||
fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadAt for BufferedDynamicReadAt {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
@ -1956,10 +2068,10 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -1995,19 +2107,23 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let decoder = pxar::Decoder::new(reader)?;
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
// Mount the session but not call fuse deamonize as this will cause
|
||||
// issues with the runtime after the fork
|
||||
let deamonize = false;
|
||||
session.mount(&Path::new(target), deamonize)?;
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of deamon by redirecting filedescriptors.
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
let nullfd = nix::fcntl::open(
|
||||
"/dev/null",
|
||||
nix::fcntl::OFlag::O_RDWR,
|
||||
@ -2025,8 +2141,13 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
nix::unistd::close(pipe).unwrap();
|
||||
}
|
||||
|
||||
let multithreaded = true;
|
||||
session.run_loop(multithreaded)?;
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar)");
|
||||
}
|
||||
@ -2065,10 +2186,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -2097,7 +2218,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
@ -2109,13 +2230,12 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let mut decoder = pxar::Decoder::new(reader)?;
|
||||
decoder.set_callback(|path| {
|
||||
println!("{:?}", path);
|
||||
Ok(())
|
||||
});
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
|
||||
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||
|
||||
@ -2141,10 +2261,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
catalog_reader,
|
||||
&server_archive_name,
|
||||
decoder,
|
||||
)?;
|
||||
).await?;
|
||||
|
||||
println!("Starting interactive shell");
|
||||
state.shell()?;
|
||||
state.shell().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
@ -2400,7 +2520,8 @@ fn main() {
|
||||
.insert("catalog", catalog_mgmt_cli())
|
||||
.insert("task", task_mgmt_cli());
|
||||
|
||||
run_cli_command(cmd_def, Some(|future| {
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
proxmox_backup::tools::runtime::main(future)
|
||||
}));
|
||||
}
|
||||
|
@ -1,19 +1,20 @@
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment};
|
||||
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::config::{self, remote::{self, Remote}};
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::tools::ticket::*;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
mod proxmox_backup_manager;
|
||||
use proxmox_backup_manager::*;
|
||||
|
||||
async fn view_task_result(
|
||||
client: HttpClient,
|
||||
result: Value,
|
||||
@ -31,6 +32,24 @@ async fn view_task_result(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: local workers should print logs to stdout, so there is no need
|
||||
// to fetch/display logs. We just wait for the worker to finish.
|
||||
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||
|
||||
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
|
||||
|
||||
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
||||
|
||||
loop {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
tokio::time::delay_for(sleep_duration).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn connect() -> Result<HttpClient, Error> {
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
@ -51,88 +70,6 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List configured remotes.
|
||||
fn list_remotes(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::remote::API_METHOD_LIST_REMOTES;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("host"))
|
||||
.column(ColumnConfig::new("userid"))
|
||||
.column(ColumnConfig::new("fingerprint"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn remote_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
|
||||
.insert(
|
||||
"create",
|
||||
// fixme: howto handle password parameter?
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_CREATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_UPDATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_DELETE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
fn datastore_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&api2::config::datastore::API_METHOD_LIST_DATASTORES))
|
||||
.insert("create",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_CREATE_DATASTORE)
|
||||
.arg_param(&["name", "path"])
|
||||
)
|
||||
.insert("update",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -328,97 +265,6 @@ fn task_mgmt_cli() -> CommandLineInterface {
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Diplay node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||
|
||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||
|
||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
||||
|
||||
if let Some(san) = cert.subject_alt_names() {
|
||||
for name in san.iter() {
|
||||
if let Some(v) = name.dnsname() {
|
||||
println!(" DNS:{}", v);
|
||||
} else if let Some(v) = name.ipaddress() {
|
||||
println!(" IP:{:?}", v);
|
||||
} else if let Some(v) = name.email() {
|
||||
println!(" EMAIL:{}", v);
|
||||
} else if let Some(v) = name.uri() {
|
||||
println!(" URI:{}", v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
||||
println!("Validity:");
|
||||
println!(" Not Before: {}", cert.not_before());
|
||||
println!(" Not After : {}", cert.not_after());
|
||||
|
||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
|
||||
println!("Fingerprint (sha256): {}", fp_string);
|
||||
|
||||
let pubkey = cert.public_key()?;
|
||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||
println!("Public key bits: {}", pubkey.bits());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
force: {
|
||||
description: "Force generation of new SSL certifate.",
|
||||
type: Boolean,
|
||||
optional:true,
|
||||
},
|
||||
}
|
||||
},
|
||||
)]
|
||||
/// Update node certificates and generate all needed files/directories.
|
||||
fn update_certs(force: Option<bool>) -> Result<(), Error> {
|
||||
|
||||
config::create_configdir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
}
|
||||
|
||||
if let Err(err) = generate_csrf_key() {
|
||||
bail!("unable to generate csrf key - {}", err);
|
||||
}
|
||||
|
||||
config::update_self_signed_cert(force.unwrap_or(false))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cert_mgmt_cli() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("info", CliCommand::new(&API_METHOD_CERT_INFO))
|
||||
.insert("update", CliCommand::new(&API_METHOD_UPDATE_CERTS));
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
// fixme: avoid API redefinition
|
||||
#[api(
|
||||
input: {
|
||||
@ -432,11 +278,9 @@ fn cert_mgmt_cli() -> CommandLineInterface {
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
type: Boolean,
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
@ -450,7 +294,7 @@ async fn pull_datastore(
|
||||
remote: String,
|
||||
remote_store: String,
|
||||
local_store: String,
|
||||
delete: Option<bool>,
|
||||
remove_vanished: Option<bool>,
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
@ -464,8 +308,8 @@ async fn pull_datastore(
|
||||
"remote-store": remote_store,
|
||||
});
|
||||
|
||||
if let Some(delete) = delete {
|
||||
args["delete"] = delete.into();
|
||||
if let Some(remove_vanished) = remove_vanished {
|
||||
args["remove-vanished"] = Value::from(remove_vanished);
|
||||
}
|
||||
|
||||
let result = client.post("api2/json/pull", Some(args)).await?;
|
||||
@ -475,13 +319,55 @@ async fn pull_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Verify backups
|
||||
async fn verify(
|
||||
store: String,
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect()?;
|
||||
|
||||
let args = json!({});
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||
|
||||
let result = client.post(&path, Some(args)).await?;
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("acl", acl_commands())
|
||||
.insert("datastore", datastore_commands())
|
||||
.insert("disk", disk_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("user", user_commands())
|
||||
.insert("remote", remote_commands())
|
||||
.insert("garbage-collection", garbage_collection_commands())
|
||||
.insert("cert", cert_mgmt_cli())
|
||||
.insert("sync-job", sync_job_commands())
|
||||
.insert("task", task_mgmt_cli())
|
||||
.insert(
|
||||
"pull",
|
||||
@ -490,9 +376,20 @@ fn main() {
|
||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
)
|
||||
.insert(
|
||||
"verify",
|
||||
CliCommand::new(&API_METHOD_VERIFY)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def));
|
||||
|
||||
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
|
||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
@ -502,9 +399,9 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
|
||||
|
||||
let _ = proxmox::try_block!({
|
||||
let remote = param.get("remote").ok_or_else(|| format_err!("no remote"))?;
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let (remote_config, _digest) = config::remote::config()?;
|
||||
|
||||
let remote: Remote = remote_config.lookup("remote", &remote)?;
|
||||
let remote: config::remote::Remote = remote_config.lookup("remote", &remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user