Compare commits
454 Commits
Author | SHA1 | Date | |
---|---|---|---|
a417c8a93e | |||
79e58a903e | |||
9f40e09d0a | |||
553e57f914 | |||
2200a38671 | |||
ba39ab20fb | |||
ff8945fd2f | |||
4876393562 | |||
971bc6f94b | |||
cab92acb3c | |||
a1d90719e4 | |||
eeff085d9d | |||
d43c407a00 | |||
6bc87d3952 | |||
04c1c68f31 | |||
94b17c804a | |||
94352256b7 | |||
b3bed7e41f | |||
a4672dd0b1 | |||
17bbcb57d7 | |||
843146479a | |||
cf1e117fc7 | |||
03eac20b87 | |||
11f5d59396 | |||
6f63c29306 | |||
c0e365fd49 | |||
93fb2e0d21 | |||
c553407e98 | |||
4830de408b | |||
7f78528308 | |||
2843ba9017 | |||
e244b9d03d | |||
657c47db35 | |||
a32bb86df9 | |||
654c56e05d | |||
589c4dad9e | |||
0320deb0a9 | |||
4c4e5c2b1e | |||
924373d2df | |||
3b60b5098f | |||
4abb3edd9f | |||
932e69a837 | |||
ef6d49670b | |||
52ea00e9df | |||
870681013a | |||
c046739461 | |||
8b1289f3e4 | |||
f1d76ecf6c | |||
074503f288 | |||
c6f55139f8 | |||
20cc25d749 | |||
30316192b3 | |||
e93263be1e | |||
2ab2ca9c24 | |||
54fcb7f5d8 | |||
4abd4dbe38 | |||
eac1beef3c | |||
166a48f903 | |||
82775c4764 | |||
88bc9635aa | |||
1037f2bc2d | |||
f24cbee77d | |||
25b4d52dce | |||
2729d134bd | |||
32b75d36a8 | |||
c4430a937d | |||
237314ad0d | |||
caf76ec592 | |||
0af8c26b74 | |||
825dfe7e0d | |||
30a0809553 | |||
6ee3035523 | |||
b627ebbf40 | |||
ef4bdf6b8b | |||
54722acada | |||
0e2bf3aa1d | |||
365126efa9 | |||
03d4c9217d | |||
8498290848 | |||
654db565cb | |||
51f83548ed | |||
5847a6bdb5 | |||
313e5e2047 | |||
7914e62b10 | |||
84d3284609 | |||
70fab5b46e | |||
e36135031d | |||
5a5ee0326e | |||
776dabfb2e | |||
5c4755ad08 | |||
7c1666289d | |||
cded320e92 | |||
b31cdec225 | |||
591b120d35 | |||
e8913fea12 | |||
355a41a763 | |||
5bd4825432 | |||
8f7e5b028a | |||
2a29d9a1ee | |||
e056966bc7 | |||
ef0ea4ba05 | |||
2892624783 | |||
2c10410b0d | |||
d1d74c4367 | |||
8b7f3b8f1d | |||
3f6c2efb8d | |||
227f36497a | |||
5ef4c7bcd3 | |||
70d00e0149 | |||
dcf155dac9 | |||
3c5b523631 | |||
6396bace3d | |||
713a128adf | |||
affc224aca | |||
6f82d32977 | |||
2a06e08618 | |||
1057b1f5a5 | |||
af76234112 | |||
1825c1a9b7 | |||
9a8bf2cac9 | |||
cc5ef79bec | |||
3725d95c65 | |||
4fb068019e | |||
6446a078a0 | |||
1d7fcbece8 | |||
8703a68a31 | |||
9bcdade85f | |||
b0156179b9 | |||
d0a0bad9d6 | |||
a4003d9078 | |||
3f4a62de2f | |||
bf23f63aa5 | |||
fd641b99c3 | |||
225affc9ca | |||
9ce2481a69 | |||
d95c74c6e7 | |||
218ee3269f | |||
5ca5f8daf3 | |||
98cdee781a | |||
9cf4504909 | |||
5f846a3fc1 | |||
c9793d47f9 | |||
be8adca115 | |||
9152a0077f | |||
0b90c67fb4 | |||
b4975d3102 | |||
ee33795b72 | |||
90e16be3ae | |||
cf90a369e2 | |||
6b303323be | |||
1576c7a0d9 | |||
cd5d6103ea | |||
207f763d1a | |||
1bed3aedc8 | |||
ab77d660cc | |||
b74a1daae9 | |||
bec357e2cb | |||
78593b5b5c | |||
7d6f03a7fe | |||
f46573f8c3 | |||
b83e136fb6 | |||
5c4203b20c | |||
7f9eef1d47 | |||
a8a0132766 | |||
831c43c91b | |||
b452e2df74 | |||
7f37cacfac | |||
3bb7e62e88 | |||
3b060167f6 | |||
8a76e71129 | |||
396fd747a6 | |||
16bd08b297 | |||
ccdf327ac8 | |||
8cd63df0dc | |||
b90cb34fd6 | |||
d6c1e12c06 | |||
d33d1c880b | |||
985e84e369 | |||
cc2c5c7762 | |||
40bf636b47 | |||
347cde827b | |||
ac4a1fb35c | |||
6f3714b9aa | |||
d810014eeb | |||
e0f6892625 | |||
9d5b426a6d | |||
8bf5769382 | |||
2970cd3d6d | |||
d41114c5a8 | |||
6c92449702 | |||
db04d10d14 | |||
5a4233f07b | |||
3c715edd07 | |||
bbe05d7fe9 | |||
2af8b8ef91 | |||
d4bfdfe749 | |||
1d14c31658 | |||
9bd81bb384 | |||
d64226efee | |||
2440eaa2df | |||
e8bf4f31f2 | |||
6682461d88 | |||
41f1132e0e | |||
d938c9337a | |||
9896a75caf | |||
7eefd0c3d7 | |||
2e268e311c | |||
3e182fd828 | |||
7b60850334 | |||
1552d9699c | |||
7507b19cd2 | |||
16f9ea6708 | |||
d984a9acf0 | |||
955f4aefcd | |||
858bbfbbd1 | |||
c1570b373f | |||
d336363771 | |||
e57aa36d3e | |||
b488f850aa | |||
ec07a280ba | |||
5006632550 | |||
7eb9f48485 | |||
31cba7098d | |||
f4571b0b50 | |||
3832911d50 | |||
28c86760da | |||
c4604ca468 | |||
464c409aa3 | |||
08ec39be0c | |||
25350f3370 | |||
0023cfa385 | |||
ed24142767 | |||
917230e4f8 | |||
05228f17f5 | |||
e8653b96be | |||
1cf191c597 | |||
3d3e31b7f8 | |||
8730cfcc3e | |||
5830e5620d | |||
46d53e3e90 | |||
3554fe6480 | |||
0dadf66dc7 | |||
a941bbd0c9 | |||
21e3ed3449 | |||
81678129fb | |||
52d8db7925 | |||
875d375d7a | |||
cba167b874 | |||
e68c0e68bd | |||
ff2bc2d21f | |||
4961404c7c | |||
3fbf2311e4 | |||
41685061f7 | |||
35a7ab5778 | |||
e1beaae4a2 | |||
965bd58693 | |||
00fdaaf12b | |||
60473d234a | |||
4f688e09a4 | |||
24e84128e4 | |||
e63457b6b2 | |||
a83cedc2ac | |||
076afa6197 | |||
423e3cbd18 | |||
0263396187 | |||
043018cfbe | |||
2037d9af03 | |||
7f07991035 | |||
18ce01caff | |||
5bc8e80a99 | |||
6252df4c18 | |||
451856d21d | |||
aa30663ca5 | |||
8616a4afe5 | |||
bc2358319b | |||
0bf4b81370 | |||
c9dd5a2452 | |||
cf95f616c5 | |||
1adbc7c13c | |||
9d28974c27 | |||
3dbc35b5c1 | |||
fee0fe5422 | |||
86d9f4e733 | |||
3f16f1b006 | |||
cbd9899389 | |||
cd44fb8d84 | |||
aca4c2b5a9 | |||
85eedfb78b | |||
f26276bc4e | |||
6d62e69f9a | |||
4188fd59a0 | |||
5b9f575648 | |||
0d890ec414 | |||
926d05ef0b | |||
8be48ddfc7 | |||
41e66bfaf6 | |||
47a7241410 | |||
54c77b3d62 | |||
a1c5575308 | |||
a44c934b5d | |||
546d2653ee | |||
33c06b3388 | |||
1917ea3ce1 | |||
70842b9ef2 | |||
e6122a657e | |||
9e860ac01a | |||
7690a8e7bd | |||
1860208560 | |||
1689296d46 | |||
7aa4851b77 | |||
6ef8e2902f | |||
aa16b7b284 | |||
9bbd83b1f2 | |||
65535670f9 | |||
9d42fe4d3b | |||
918a367258 | |||
970a70b41e | |||
4094fe5a31 | |||
dea8e2cb54 | |||
0514a4308f | |||
d0647e5a02 | |||
bbe06f97be | |||
f1a83e9759 | |||
38a81c6b46 | |||
6afb60abf5 | |||
a42212fc1e | |||
2e21948156 | |||
5279ee745f | |||
227501c063 | |||
89d25b1931 | |||
b57c0dbe30 | |||
8b910bb6bc | |||
dfde34e612 | |||
2530811e22 | |||
85205bc253 | |||
3cdd1a3424 | |||
002865405c | |||
8a73ef897a | |||
be61c56c21 | |||
dbaef7d04d | |||
2048073355 | |||
a585e1f696 | |||
415737b2b8 | |||
54f7007cc5 | |||
b0338178d7 | |||
159100b944 | |||
41a8db3576 | |||
fe291ab794 | |||
adb65b9889 | |||
8513626b9f | |||
7ca0ba4515 | |||
42200c405a | |||
be327dbccd | |||
c724dc3892 | |||
70dc2ff3ab | |||
81f5d03e8d | |||
e50c6b94c1 | |||
28eaff20bd | |||
8d1a1b2976 | |||
92eaec53db | |||
b3c2c57897 | |||
f458e97fda | |||
80bf9ae99b | |||
bebd4a7ca4 | |||
9468e94412 | |||
6b66c8507f | |||
167e5406c3 | |||
c111c9a931 | |||
bb71e3a023 | |||
7b1bf4c098 | |||
32b88d928a | |||
f8e1932337 | |||
7c9fb570cc | |||
56d22c66c0 | |||
85cdc4f371 | |||
96bcfb9b1f | |||
4a874665eb | |||
6f6b69946e | |||
5b7f44555e | |||
2ca396c015 | |||
d8dae16035 | |||
8f02db04f9 | |||
9f35e44681 | |||
6279b8f5a5 | |||
3084232cb5 | |||
67cc79ec52 | |||
b9a09a9501 | |||
4a0d3a3e3f | |||
2322a980d0 | |||
c19f5b85a3 | |||
7f9d8438ab | |||
51c80c5a52 | |||
6477ebcf6f | |||
bc02c2789c | |||
c0b3d09236 | |||
3ddbab6193 | |||
befd95a90a | |||
ab6cd4229b | |||
9213744ecb | |||
41c0333814 | |||
afcf8b3ed6 | |||
69ebbec40b | |||
b22a9c14a4 | |||
54067d8225 | |||
d64c4eeab0 | |||
15d2c7786e | |||
73a1da5ed6 | |||
fbf8779388 | |||
3231c35fb8 | |||
ced7838de4 | |||
2f26b8668a | |||
9432838914 | |||
1a89a7794e | |||
c0a87c12fb | |||
c6a7ea0a2f | |||
5bb057e5a2 | |||
2924b37d6d | |||
42c0f784e2 | |||
05f17d1ec4 | |||
777690a121 | |||
a98e228766 | |||
4c9174ce26 | |||
1d70e3812c | |||
e2225aa882 | |||
99dd709f3e | |||
f197c286d5 | |||
b121711baa | |||
085655b21b | |||
4c209d6b10 | |||
8dc45e291a | |||
ec1ae7e631 | |||
25aa55b5f5 | |||
b5c6088130 | |||
a65eb0ec29 | |||
42eef1451c | |||
11ecf058e4 | |||
5f1f7ef564 | |||
2e4e698633 | |||
02dce8cad0 | |||
8aa4842fa8 | |||
efc09f63cc | |||
3253d8a2e4 | |||
1531185dd0 | |||
baf9c3704e | |||
cdf39e62b3 | |||
b81e37f6ab | |||
ddebbb52fd | |||
983e929e25 | |||
f47e035721 | |||
a80d72f999 | |||
8de9a9917f | |||
fa016c1697 | |||
7d2c156eb1 | |||
04cec92e8d |
10
.gitignore
vendored
10
.gitignore
vendored
@ -1,6 +1,16 @@
|
|||||||
local.mak
|
local.mak
|
||||||
/target
|
/target
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
|
*~
|
||||||
|
*.backup
|
||||||
|
*.backup[0-9]
|
||||||
|
*.backup[0-9][0-9]
|
||||||
|
*.old
|
||||||
|
*.old[0-9]
|
||||||
|
*.old[0-9][0-9]
|
||||||
|
*.5
|
||||||
|
*.7
|
||||||
|
__pycache__/
|
||||||
/etc/proxmox-backup.service
|
/etc/proxmox-backup.service
|
||||||
/etc/proxmox-backup-proxy.service
|
/etc/proxmox-backup-proxy.service
|
||||||
build/
|
build/
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.7"
|
version = "1.0.13"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -48,11 +48,11 @@ percent-encoding = "2.1"
|
|||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
|
4
Makefile
4
Makefile
@ -10,7 +10,9 @@ SUBDIRS := etc www docs
|
|||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
pxar \
|
pxar \
|
||||||
pmtx
|
proxmox-tape \
|
||||||
|
pmtx \
|
||||||
|
pmt
|
||||||
|
|
||||||
# Binaries usable by admins
|
# Binaries usable by admins
|
||||||
USR_SBIN := \
|
USR_SBIN := \
|
||||||
|
101
debian/changelog
vendored
101
debian/changelog
vendored
@ -1,3 +1,104 @@
|
|||||||
|
rust-proxmox-backup (1.0.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: improve handling ACL entries on create and restore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Apr 2021 15:32:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: write catalogs to tape (speedup catalog restore)
|
||||||
|
|
||||||
|
* tape: add --scan option for catalog restore
|
||||||
|
|
||||||
|
* tape: improve locking (lock media-sets)
|
||||||
|
|
||||||
|
* tape: ui: enable datastore mappings
|
||||||
|
|
||||||
|
* fix #3359: fix blocking writes in async code during pxar create
|
||||||
|
|
||||||
|
* api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
|
||||||
|
|
||||||
|
* docu improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Mar 2021 14:08:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix feature flag logic in pxar create
|
||||||
|
|
||||||
|
* tools/zip: add missing start_disk field for zip64 extension to improve
|
||||||
|
compatibility with some strict archive tools
|
||||||
|
|
||||||
|
* tape: speedup backup by doing read/write in parallel
|
||||||
|
|
||||||
|
* tape: store datastore name in tape archives and media catalog
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Mar 2021 12:36:01 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
||||||
|
label text
|
||||||
|
|
||||||
|
* api: tape backup: continue on vanishing snapshots, as a prune during long
|
||||||
|
running tape backup jobs is OK
|
||||||
|
|
||||||
|
* tape: fix scsi volume_statistics and cartridge_memory for quantum drives
|
||||||
|
|
||||||
|
* typo fixes all over the place
|
||||||
|
|
||||||
|
* d/postinst: restart, not reload, when updating from a to old version
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 11 Mar 2021 08:24:31 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.9-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: track key source, print when used
|
||||||
|
|
||||||
|
* fix #3026: pxar: metadata: apply flags _after_ updating mtime
|
||||||
|
|
||||||
|
* docs: add acl.cfg, datastore.cfg, remote.cfg, sync.cfg, user.cfg and
|
||||||
|
verification.cfg manual page pages
|
||||||
|
|
||||||
|
* docs: add API viewer
|
||||||
|
|
||||||
|
* proxmox-backup-manger: add verify-job command group with various sub
|
||||||
|
commands
|
||||||
|
|
||||||
|
* add experimental opt-in tape backup support
|
||||||
|
|
||||||
|
* lto-barcode: fix page offset calibration
|
||||||
|
|
||||||
|
* lto-barcode: fix avery 3420 paper format properties
|
||||||
|
|
||||||
|
* asyncify pxar create archive
|
||||||
|
|
||||||
|
* client: raise HTTP_TIMEOUT for simple requests to 120s
|
||||||
|
|
||||||
|
* docs: depend on mathjax library package from debian instead of CDN
|
||||||
|
|
||||||
|
* fix #3321: docs: client: fix interactive restore command explanation
|
||||||
|
|
||||||
|
* ui: use shorter datetime format for encryption key creation time
|
||||||
|
|
||||||
|
* docs: TFA: improve language
|
||||||
|
|
||||||
|
* config/TFA: webauthn: disallow registering the same token more than once,
|
||||||
|
that can lead to buggy behavior in some token/browser combinations.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 08 Mar 2021 15:54:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.8-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Https Connector: use hostname instead of URL again to avoid certificate
|
||||||
|
verification issues.
|
||||||
|
|
||||||
|
* ui: task summary: add verification jobs to count
|
||||||
|
|
||||||
|
* docs: explain some technical details about datastores/chunks
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Feb 2021 12:39:49 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
* fix #3197: skip fingerprint check when restoring key
|
* fix #3197: skip fingerprint check when restoring key
|
||||||
|
13
debian/control
vendored
13
debian/control
vendored
@ -36,13 +36,13 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-1+default-dev,
|
librust-pin-project-1+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
|
librust-proxmox-0.11+api-macro-dev,
|
||||||
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
|
librust-proxmox-0.11+default-dev,
|
||||||
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
|
librust-proxmox-0.11+sortable-macro-dev,
|
||||||
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
|
librust-proxmox-0.11+websocket-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.8+default-dev,
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-pxar-0.8+tokio-io-dev,
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
@ -141,6 +141,7 @@ Package: proxmox-backup-docs
|
|||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
Depends: libjs-extjs,
|
Depends: libjs-extjs,
|
||||||
|
libjs-mathjax,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
|
1
debian/control.in
vendored
1
debian/control.in
vendored
@ -38,6 +38,7 @@ Package: proxmox-backup-docs
|
|||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
Depends: libjs-extjs,
|
Depends: libjs-extjs,
|
||||||
|
libjs-mathjax,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
|
2
debian/copyright
vendored
2
debian/copyright
vendored
@ -1,4 +1,4 @@
|
|||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019 - 2021 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
|
3
debian/pmt.bc
vendored
Normal file
3
debian/pmt.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# pmt bash completion
|
||||||
|
|
||||||
|
complete -C 'pmt bashcomplete' pmt
|
15
debian/postinst
vendored
15
debian/postinst
vendored
@ -6,13 +6,21 @@ set -e
|
|||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
# need to have user backup in the tapoe group
|
# need to have user backup in the tape group
|
||||||
usermod -a -G tape backup
|
usermod -a -G tape backup
|
||||||
|
|
||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
||||||
|
# there was an issue with reloading and systemd being confused in older daemon versions
|
||||||
|
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
||||||
|
# FIXME: remove with PBS 2.1
|
||||||
|
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
||||||
|
_dh_action=try-restart
|
||||||
|
else
|
||||||
_dh_action=try-reload-or-restart
|
_dh_action=try-reload-or-restart
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
_dh_action=start
|
_dh_action=start
|
||||||
fi
|
fi
|
||||||
@ -40,12 +48,17 @@ case "$1" in
|
|||||||
/etc/proxmox-backup/remote.cfg || true
|
/etc/proxmox-backup/remote.cfg || true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
# FIXME: remove with 2.0
|
||||||
|
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
||||||
|
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
||||||
|
chmod 0750 /var/lib/proxmox-backup/tape || true
|
||||||
fi
|
fi
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
2
debian/proxmox-backup-docs.links
vendored
2
debian/proxmox-backup-docs.links
vendored
@ -1,3 +1,5 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||||
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
||||||
|
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
||||||
|
2
debian/proxmox-backup-server.bash-completion
vendored
2
debian/proxmox-backup-server.bash-completion
vendored
@ -1,2 +1,4 @@
|
|||||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||||
|
debian/proxmox-tape.bc proxmox-tape
|
||||||
debian/pmtx.bc pmtx
|
debian/pmtx.bc pmtx
|
||||||
|
debian/pmt.bc pmt
|
||||||
|
15
debian/proxmox-backup-server.install
vendored
15
debian/proxmox-backup-server.install
vendored
@ -11,12 +11,27 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
|||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||||
usr/sbin/proxmox-backup-manager
|
usr/sbin/proxmox-backup-manager
|
||||||
usr/bin/pmtx
|
usr/bin/pmtx
|
||||||
|
usr/bin/pmt
|
||||||
|
usr/bin/proxmox-tape
|
||||||
usr/share/javascript/proxmox-backup/index.hbs
|
usr/share/javascript/proxmox-backup/index.hbs
|
||||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||||
usr/share/javascript/proxmox-backup/images
|
usr/share/javascript/proxmox-backup/images
|
||||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||||
usr/share/man/man1/proxmox-backup-manager.1
|
usr/share/man/man1/proxmox-backup-manager.1
|
||||||
usr/share/man/man1/proxmox-backup-proxy.1
|
usr/share/man/man1/proxmox-backup-proxy.1
|
||||||
|
usr/share/man/man1/proxmox-tape.1
|
||||||
usr/share/man/man1/pmtx.1
|
usr/share/man/man1/pmtx.1
|
||||||
|
usr/share/man/man1/pmt.1
|
||||||
|
usr/share/man/man5/acl.cfg.5
|
||||||
|
usr/share/man/man5/datastore.cfg.5
|
||||||
|
usr/share/man/man5/user.cfg.5
|
||||||
|
usr/share/man/man5/remote.cfg.5
|
||||||
|
usr/share/man/man5/sync.cfg.5
|
||||||
|
usr/share/man/man5/verification.cfg.5
|
||||||
|
usr/share/man/man5/media-pool.cfg.5
|
||||||
|
usr/share/man/man5/tape.cfg.5
|
||||||
|
usr/share/man/man5/tape-job.cfg.5
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||||
usr/share/zsh/vendor-completions/_pmtx
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
usr/share/zsh/vendor-completions/_pmt
|
||||||
|
3
debian/proxmox-tape.bc
vendored
Normal file
3
debian/proxmox-tape.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# proxmox-tape bash completion
|
||||||
|
|
||||||
|
complete -C 'proxmox-tape bashcomplete' proxmox-tape
|
@ -5,6 +5,7 @@ LIBDIR = $(PREFIX)/lib
|
|||||||
LIBEXECDIR = $(LIBDIR)
|
LIBEXECDIR = $(LIBDIR)
|
||||||
DATAROOTDIR = $(PREFIX)/share
|
DATAROOTDIR = $(PREFIX)/share
|
||||||
MAN1DIR = $(PREFIX)/share/man/man1
|
MAN1DIR = $(PREFIX)/share/man/man1
|
||||||
|
MAN5DIR = $(PREFIX)/share/man/man5
|
||||||
DOCDIR = $(PREFIX)/share/doc/proxmox-backup
|
DOCDIR = $(PREFIX)/share/doc/proxmox-backup
|
||||||
JSDIR = $(DATAROOTDIR)/javascript/proxmox-backup
|
JSDIR = $(DATAROOTDIR)/javascript/proxmox-backup
|
||||||
SYSCONFDIR = /etc
|
SYSCONFDIR = /etc
|
||||||
|
126
docs/Makefile
126
docs/Makefile
@ -1,21 +1,43 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
GENERATED_SYNOPSIS := \
|
GENERATED_SYNOPSIS := \
|
||||||
|
proxmox-tape/synopsis.rst \
|
||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
pmtx/synopsis.rst \
|
pmtx/synopsis.rst \
|
||||||
backup-protocol-api.rst \
|
pmt/synopsis.rst \
|
||||||
reader-protocol-api.rst
|
config/media-pool/config.rst \
|
||||||
|
config/tape/config.rst \
|
||||||
|
config/tape-job/config.rst \
|
||||||
|
config/user/config.rst \
|
||||||
|
config/remote/config.rst \
|
||||||
|
config/sync/config.rst \
|
||||||
|
config/verification/config.rst \
|
||||||
|
config/acl/roles.rst \
|
||||||
|
config/datastore/config.rst
|
||||||
|
|
||||||
MANUAL_PAGES := \
|
MAN1_PAGES := \
|
||||||
pxar.1 \
|
pxar.1 \
|
||||||
pmtx.1 \
|
pmtx.1 \
|
||||||
|
pmt.1 \
|
||||||
|
proxmox-tape.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1
|
||||||
|
|
||||||
|
MAN5_PAGES := \
|
||||||
|
media-pool.cfg.5 \
|
||||||
|
tape.cfg.5 \
|
||||||
|
tape-job.cfg.5 \
|
||||||
|
acl.cfg.5 \
|
||||||
|
user.cfg.5 \
|
||||||
|
remote.cfg.5 \
|
||||||
|
sync.cfg.5 \
|
||||||
|
verification.cfg.5 \
|
||||||
|
datastore.cfg.5
|
||||||
|
|
||||||
PRUNE_SIMULATOR_FILES := \
|
PRUNE_SIMULATOR_FILES := \
|
||||||
prune-simulator/index.html \
|
prune-simulator/index.html \
|
||||||
prune-simulator/documentation.html \
|
prune-simulator/documentation.html \
|
||||||
@ -35,6 +57,10 @@ LTO_BARCODE_FILES := \
|
|||||||
lto-barcode/label-setup.js \
|
lto-barcode/label-setup.js \
|
||||||
lto-barcode/lto-barcode.js
|
lto-barcode/lto-barcode.js
|
||||||
|
|
||||||
|
API_VIEWER_SOURCES= \
|
||||||
|
api-viewer/index.html \
|
||||||
|
api-viewer/apidoc.js
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS =
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
@ -51,15 +77,7 @@ endif
|
|||||||
# Sphinx internal variables.
|
# Sphinx internal variables.
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||||
|
|
||||||
all: ${MANUAL_PAGES}
|
all: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||||
|
|
||||||
# Extract backup protocol docs
|
|
||||||
backup-protocol-api.rst: ${COMPILEDIR}/dump-backup-api
|
|
||||||
${COMPILEDIR}/dump-backup-api >$@
|
|
||||||
|
|
||||||
# Extract reader protocol docs
|
|
||||||
reader-protocol-api.rst: ${COMPILEDIR}/dump-reader-api
|
|
||||||
${COMPILEDIR}/dump-backup-api >$@
|
|
||||||
|
|
||||||
# Build manual pages using rst2man
|
# Build manual pages using rst2man
|
||||||
|
|
||||||
@ -77,6 +95,72 @@ pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
|
|||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
|
||||||
|
pmt/synopsis.rst: ${COMPILEDIR}/pmt
|
||||||
|
${COMPILEDIR}/pmt printdoc > pmt/synopsis.rst
|
||||||
|
|
||||||
|
pmt.1: pmt/man1.rst pmt/description.rst pmt/options.rst pmt/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/datastore/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen datastore.cfg >$@
|
||||||
|
|
||||||
|
datastore.cfg.5: config/datastore/man5.rst config/datastore/config.rst config/datastore/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/user/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen user.cfg >$@
|
||||||
|
|
||||||
|
user.cfg.5: config/user/man5.rst config/user/config.rst config/user/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/remote/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen remote.cfg >$@
|
||||||
|
|
||||||
|
remote.cfg.5: config/remote/man5.rst config/remote/config.rst config/remote/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/sync/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen sync.cfg >$@
|
||||||
|
|
||||||
|
sync.cfg.5: config/sync/man5.rst config/sync/config.rst config/sync/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/verification/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen verification.cfg >$@
|
||||||
|
|
||||||
|
verification.cfg.5: config/verification/man5.rst config/verification/config.rst config/verification/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/acl/roles.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen "config::acl::Role" >$@
|
||||||
|
|
||||||
|
acl.cfg.5: config/acl/man5.rst config/acl/roles.rst config/acl/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/media-pool/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen media-pool.cfg >$@
|
||||||
|
|
||||||
|
media-pool.cfg.5: config/media-pool/man5.rst config/media-pool/config.rst config/media-pool/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/tape/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen tape.cfg >$@
|
||||||
|
|
||||||
|
tape.cfg.5: config/tape/man5.rst config/tape/config.rst config/tape/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
config/tape-job/config.rst: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen tape-job.cfg >$@
|
||||||
|
|
||||||
|
tape-job.cfg.5: config/tape-job/man5.rst config/tape-job/config.rst config/tape-job/format.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-tape/synopsis.rst: ${COMPILEDIR}/proxmox-tape
|
||||||
|
${COMPILEDIR}/proxmox-tape printdoc > proxmox-tape/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-tape.1: proxmox-tape/man1.rst proxmox-tape/description.rst proxmox-tape/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
||||||
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
||||||
|
|
||||||
@ -101,14 +185,22 @@ onlinehelpinfo:
|
|||||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||||
|
|
||||||
|
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||||
|
${COMPILEDIR}/docgen apidata.js >$@
|
||||||
|
|
||||||
|
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
|
||||||
|
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
|
||||||
|
|
||||||
.PHONY: html
|
.PHONY: html
|
||||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
|
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||||
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||||
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||||
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||||
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
||||||
|
install -dm 0755 $(BUILDDIR)/html/api-viewer
|
||||||
|
install -m 0644 ${API_VIEWER_SOURCES} $(BUILDDIR)/html/api-viewer
|
||||||
@echo
|
@echo
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
@ -127,12 +219,14 @@ epub3: ${GENERATED_SYNOPSIS}
|
|||||||
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
|
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS}
|
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
||||||
|
|
||||||
|
|
||||||
install_manual_pages: ${MANUAL_PAGES}
|
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||||
install -dm755 $(DESTDIR)$(MAN1DIR)
|
install -dm755 $(DESTDIR)$(MAN1DIR)
|
||||||
for i in ${MANUAL_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
for i in ${MAN1_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
||||||
|
install -dm755 $(DESTDIR)$(MAN5DIR)
|
||||||
|
for i in ${MAN5_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN5DIR)/ ; done
|
||||||
|
|
||||||
install_html: html
|
install_html: html
|
||||||
install -dm755 $(DESTDIR)$(DOCDIR)
|
install -dm755 $(DESTDIR)$(DOCDIR)
|
||||||
|
@ -90,7 +90,18 @@ class ReflabelMapper(Builder):
|
|||||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||||
filename = self.env.doc2path(docname)
|
filename = self.env.doc2path(docname)
|
||||||
filename_html = re.sub('.rst', '.html', filename)
|
filename_html = re.sub('.rst', '.html', filename)
|
||||||
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
|
|
||||||
|
# node['ids'][0] contains a normalized version of the
|
||||||
|
# headline. If the ref and headline are the same
|
||||||
|
# (normalized) sphinx will set the node['ids'][1] to a
|
||||||
|
# generic id in the format `idX` where X is numeric. If the
|
||||||
|
# ref and headline are not the same, the ref name will be
|
||||||
|
# stored in node['ids'][1]
|
||||||
|
if re.match('^id[0-9]*$', node['ids'][1]):
|
||||||
|
labelid = node['ids'][0]
|
||||||
|
else:
|
||||||
|
labelid = node['ids'][1]
|
||||||
|
|
||||||
title = cast(nodes.title, node[0])
|
title = cast(nodes.title, node[0])
|
||||||
logger.info('traversing section {}'.format(title.astext()))
|
logger.info('traversing section {}'.format(title.astext()))
|
||||||
ref_name = getattr(title, 'rawsource', title.astext())
|
ref_name = getattr(title, 'rawsource', title.astext())
|
||||||
|
511
docs/api-viewer/PBSAPI.js
Normal file
511
docs/api-viewer/PBSAPI.js
Normal file
@ -0,0 +1,511 @@
|
|||||||
|
// avoid errors when running without development tools
|
||||||
|
if (!Ext.isDefined(Ext.global.console)) {
|
||||||
|
var console = {
|
||||||
|
dir: function() {},
|
||||||
|
log: function() {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.onReady(function() {
|
||||||
|
|
||||||
|
Ext.define('pve-param-schema', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
'name', 'type', 'typetext', 'description', 'verbose_description',
|
||||||
|
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
|
||||||
|
'pattern', 'title', 'requires', 'format', 'default',
|
||||||
|
'disallow', 'extends', 'links',
|
||||||
|
{
|
||||||
|
name: 'optional',
|
||||||
|
type: 'boolean'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
var store = Ext.define('pve-updated-treestore', {
|
||||||
|
extend: 'Ext.data.TreeStore',
|
||||||
|
model: Ext.define('pve-api-doc', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
'path', 'info', 'text',
|
||||||
|
]
|
||||||
|
}),
|
||||||
|
proxy: {
|
||||||
|
type: 'memory',
|
||||||
|
data: pbsapi
|
||||||
|
},
|
||||||
|
sorters: [{
|
||||||
|
property: 'leaf',
|
||||||
|
direction: 'ASC'
|
||||||
|
}, {
|
||||||
|
property: 'text',
|
||||||
|
direction: 'ASC'
|
||||||
|
}],
|
||||||
|
filterer: 'bottomup',
|
||||||
|
doFilter: function(node) {
|
||||||
|
this.filterNodes(node, this.getFilters().getFilterFn(), true);
|
||||||
|
},
|
||||||
|
|
||||||
|
filterNodes: function(node, filterFn, parentVisible) {
|
||||||
|
var me = this,
|
||||||
|
bottomUpFiltering = me.filterer === 'bottomup',
|
||||||
|
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
|
||||||
|
childNodes = node.childNodes,
|
||||||
|
len = childNodes && childNodes.length, i, matchingChildren;
|
||||||
|
|
||||||
|
if (len) {
|
||||||
|
for (i = 0; i < len; ++i) {
|
||||||
|
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
|
||||||
|
}
|
||||||
|
if (bottomUpFiltering) {
|
||||||
|
match = matchingChildren || match;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
node.set("visible", match, me._silentOptions);
|
||||||
|
return match;
|
||||||
|
},
|
||||||
|
|
||||||
|
}).create();
|
||||||
|
|
||||||
|
var render_description = function(value, metaData, record) {
|
||||||
|
var pdef = record.data;
|
||||||
|
|
||||||
|
value = pdef.verbose_description || value;
|
||||||
|
|
||||||
|
// TODO: try to render asciidoc correctly
|
||||||
|
|
||||||
|
metaData.style = 'white-space:pre-wrap;'
|
||||||
|
|
||||||
|
return Ext.htmlEncode(value);
|
||||||
|
};
|
||||||
|
|
||||||
|
var render_type = function(value, metaData, record) {
|
||||||
|
var pdef = record.data;
|
||||||
|
|
||||||
|
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
||||||
|
};
|
||||||
|
|
||||||
|
var render_format = function(value, metaData, record) {
|
||||||
|
var pdef = record.data;
|
||||||
|
|
||||||
|
metaData.style = 'white-space:normal;'
|
||||||
|
|
||||||
|
if (pdef.typetext)
|
||||||
|
return Ext.htmlEncode(pdef.typetext);
|
||||||
|
|
||||||
|
if (pdef['enum'])
|
||||||
|
return pdef['enum'].join(' | ');
|
||||||
|
|
||||||
|
if (pdef.format)
|
||||||
|
return pdef.format;
|
||||||
|
|
||||||
|
if (pdef.pattern)
|
||||||
|
return Ext.htmlEncode(pdef.pattern);
|
||||||
|
|
||||||
|
return '';
|
||||||
|
};
|
||||||
|
|
||||||
|
var real_path = function(path) {
|
||||||
|
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
|
||||||
|
};
|
||||||
|
|
||||||
|
var permission_text = function(permission) {
|
||||||
|
let permhtml = "";
|
||||||
|
|
||||||
|
if (permission.user) {
|
||||||
|
if (!permission.description) {
|
||||||
|
if (permission.user === 'world') {
|
||||||
|
permhtml += "Accessible without any authentication.";
|
||||||
|
} else if (permission.user === 'all') {
|
||||||
|
permhtml += "Accessible by all authenticated users.";
|
||||||
|
} else {
|
||||||
|
permhtml += 'Onyl accessible by user "' +
|
||||||
|
permission.user + '"';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (permission.check) {
|
||||||
|
permhtml += "<pre>Check: " +
|
||||||
|
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
|
||||||
|
} else if (permission.userParam) {
|
||||||
|
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
|
||||||
|
} else if (permission.or) {
|
||||||
|
permhtml += "<div>Or<div style='padding-left: 10px;'>";
|
||||||
|
Ext.Array.each(permission.or, function(sub_permission) {
|
||||||
|
permhtml += permission_text(sub_permission);
|
||||||
|
})
|
||||||
|
permhtml += "</div></div>";
|
||||||
|
} else if (permission.and) {
|
||||||
|
permhtml += "<div>And<div style='padding-left: 10px;'>";
|
||||||
|
Ext.Array.each(permission.and, function(sub_permission) {
|
||||||
|
permhtml += permission_text(sub_permission);
|
||||||
|
})
|
||||||
|
permhtml += "</div></div>";
|
||||||
|
} else {
|
||||||
|
//console.log(permission);
|
||||||
|
permhtml += "Unknown systax!";
|
||||||
|
}
|
||||||
|
|
||||||
|
return permhtml;
|
||||||
|
};
|
||||||
|
|
||||||
|
var render_docu = function(data) {
|
||||||
|
var md = data.info;
|
||||||
|
|
||||||
|
// console.dir(data);
|
||||||
|
|
||||||
|
var items = [];
|
||||||
|
|
||||||
|
var clicmdhash = {
|
||||||
|
GET: 'get',
|
||||||
|
POST: 'create',
|
||||||
|
PUT: 'set',
|
||||||
|
DELETE: 'delete'
|
||||||
|
};
|
||||||
|
|
||||||
|
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
|
||||||
|
var info = md[method];
|
||||||
|
if (info) {
|
||||||
|
|
||||||
|
var usage = "";
|
||||||
|
|
||||||
|
usage += "<table><tr><td>HTTP: </td><td>"
|
||||||
|
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
|
||||||
|
|
||||||
|
var sections = [
|
||||||
|
{
|
||||||
|
title: 'Description',
|
||||||
|
html: Ext.htmlEncode(info.description),
|
||||||
|
bodyPadding: 10
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: 'Usage',
|
||||||
|
html: usage,
|
||||||
|
bodyPadding: 10
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
if (info.parameters && info.parameters.properties) {
|
||||||
|
|
||||||
|
var pstore = Ext.create('Ext.data.Store', {
|
||||||
|
model: 'pve-param-schema',
|
||||||
|
proxy: {
|
||||||
|
type: 'memory'
|
||||||
|
},
|
||||||
|
groupField: 'optional',
|
||||||
|
sorters: [
|
||||||
|
{
|
||||||
|
property: 'name',
|
||||||
|
direction: 'ASC'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.Object.each(info.parameters.properties, function(name, pdef) {
|
||||||
|
pdef.name = name;
|
||||||
|
pstore.add(pdef);
|
||||||
|
});
|
||||||
|
|
||||||
|
pstore.sort();
|
||||||
|
|
||||||
|
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||||
|
enableGroupingMenu: false,
|
||||||
|
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
|
||||||
|
});
|
||||||
|
|
||||||
|
sections.push({
|
||||||
|
xtype: 'gridpanel',
|
||||||
|
title: 'Parameters',
|
||||||
|
features: [groupingFeature],
|
||||||
|
store: pstore,
|
||||||
|
viewConfig: {
|
||||||
|
trackOver: false,
|
||||||
|
stripeRows: true
|
||||||
|
},
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: 'Name',
|
||||||
|
dataIndex: 'name',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Type',
|
||||||
|
dataIndex: 'type',
|
||||||
|
renderer: render_type,
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Default',
|
||||||
|
dataIndex: 'default',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Format',
|
||||||
|
dataIndex: 'type',
|
||||||
|
renderer: render_format,
|
||||||
|
flex: 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Description',
|
||||||
|
dataIndex: 'description',
|
||||||
|
renderer: render_description,
|
||||||
|
flex: 6
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.returns) {
|
||||||
|
|
||||||
|
var retinf = info.returns;
|
||||||
|
var rtype = retinf.type;
|
||||||
|
if (!rtype && retinf.items)
|
||||||
|
rtype = 'array';
|
||||||
|
if (!rtype)
|
||||||
|
rtype = 'object';
|
||||||
|
|
||||||
|
var rpstore = Ext.create('Ext.data.Store', {
|
||||||
|
model: 'pve-param-schema',
|
||||||
|
proxy: {
|
||||||
|
type: 'memory'
|
||||||
|
},
|
||||||
|
groupField: 'optional',
|
||||||
|
sorters: [
|
||||||
|
{
|
||||||
|
property: 'name',
|
||||||
|
direction: 'ASC'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
var properties;
|
||||||
|
if (rtype === 'array' && retinf.items.properties) {
|
||||||
|
properties = retinf.items.properties;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rtype === 'object' && retinf.properties) {
|
||||||
|
properties = retinf.properties;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.Object.each(properties, function(name, pdef) {
|
||||||
|
pdef.name = name;
|
||||||
|
rpstore.add(pdef);
|
||||||
|
});
|
||||||
|
|
||||||
|
rpstore.sort();
|
||||||
|
|
||||||
|
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||||
|
enableGroupingMenu: false,
|
||||||
|
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
|
||||||
|
});
|
||||||
|
var returnhtml;
|
||||||
|
if (retinf.items) {
|
||||||
|
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (retinf.properties) {
|
||||||
|
returnhtml = returnhtml || '';
|
||||||
|
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawSection = Ext.create('Ext.panel.Panel', {
|
||||||
|
bodyPadding: '0px 10px 10px 10px',
|
||||||
|
html: returnhtml,
|
||||||
|
hidden: true
|
||||||
|
});
|
||||||
|
|
||||||
|
sections.push({
|
||||||
|
xtype: 'gridpanel',
|
||||||
|
title: 'Returns: ' + rtype,
|
||||||
|
features: [groupingFeature],
|
||||||
|
store: rpstore,
|
||||||
|
viewConfig: {
|
||||||
|
trackOver: false,
|
||||||
|
stripeRows: true
|
||||||
|
},
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: 'Name',
|
||||||
|
dataIndex: 'name',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Type',
|
||||||
|
dataIndex: 'type',
|
||||||
|
renderer: render_type,
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Default',
|
||||||
|
dataIndex: 'default',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Format',
|
||||||
|
dataIndex: 'type',
|
||||||
|
renderer: render_format,
|
||||||
|
flex: 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Description',
|
||||||
|
dataIndex: 'description',
|
||||||
|
renderer: render_description,
|
||||||
|
flex: 6
|
||||||
|
}
|
||||||
|
],
|
||||||
|
bbar: [
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
text: 'Show RAW',
|
||||||
|
handler: function(btn) {
|
||||||
|
rawSection.setVisible(!rawSection.isVisible());
|
||||||
|
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
sections.push(rawSection);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!data.path.match(/\/_upgrade_/)) {
|
||||||
|
var permhtml = '';
|
||||||
|
|
||||||
|
if (!info.permissions) {
|
||||||
|
permhtml = "Root only.";
|
||||||
|
} else {
|
||||||
|
if (info.permissions.description) {
|
||||||
|
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
|
||||||
|
Ext.htmlEncode(info.permissions.description) + "</div>";
|
||||||
|
}
|
||||||
|
permhtml += permission_text(info.permissions);
|
||||||
|
}
|
||||||
|
|
||||||
|
// we do not have this information for PBS api
|
||||||
|
//if (!info.allowtoken) {
|
||||||
|
// permhtml += "<br />This API endpoint is not available for API tokens."
|
||||||
|
//}
|
||||||
|
|
||||||
|
sections.push({
|
||||||
|
title: 'Required permissions',
|
||||||
|
bodyPadding: 10,
|
||||||
|
html: permhtml
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
items.push({
|
||||||
|
title: method,
|
||||||
|
autoScroll: true,
|
||||||
|
defaults: {
|
||||||
|
border: false
|
||||||
|
},
|
||||||
|
items: sections
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
var ct = Ext.getCmp('docview');
|
||||||
|
ct.setTitle("Path: " + real_path(data.path));
|
||||||
|
ct.removeAll(true);
|
||||||
|
ct.add(items);
|
||||||
|
ct.setActiveTab(0);
|
||||||
|
};
|
||||||
|
|
||||||
|
Ext.define('Ext.form.SearchField', {
|
||||||
|
extend: 'Ext.form.field.Text',
|
||||||
|
alias: 'widget.searchfield',
|
||||||
|
|
||||||
|
emptyText: 'Search...',
|
||||||
|
|
||||||
|
flex: 1,
|
||||||
|
|
||||||
|
inputType: 'search',
|
||||||
|
listeners: {
|
||||||
|
'change': function(){
|
||||||
|
|
||||||
|
var value = this.getValue();
|
||||||
|
if (!Ext.isEmpty(value)) {
|
||||||
|
store.filter({
|
||||||
|
property: 'path',
|
||||||
|
value: value,
|
||||||
|
anyMatch: true
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
store.clearFilter();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
var tree = Ext.create('Ext.tree.Panel', {
|
||||||
|
title: 'Resource Tree',
|
||||||
|
tbar: [
|
||||||
|
{
|
||||||
|
xtype: 'searchfield',
|
||||||
|
}
|
||||||
|
],
|
||||||
|
tools: [
|
||||||
|
{
|
||||||
|
type: 'expand',
|
||||||
|
tooltip: 'Expand all',
|
||||||
|
tooltipType: 'title',
|
||||||
|
callback: (tree) => tree.expandAll(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'collapse',
|
||||||
|
tooltip: 'Collapse all',
|
||||||
|
tooltipType: 'title',
|
||||||
|
callback: (tree) => tree.collapseAll(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
store: store,
|
||||||
|
width: 200,
|
||||||
|
region: 'west',
|
||||||
|
split: true,
|
||||||
|
margins: '5 0 5 5',
|
||||||
|
rootVisible: false,
|
||||||
|
listeners: {
|
||||||
|
selectionchange: function(v, selections) {
|
||||||
|
if (!selections[0])
|
||||||
|
return;
|
||||||
|
var rec = selections[0];
|
||||||
|
render_docu(rec.data);
|
||||||
|
location.hash = '#' + rec.data.path;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.create('Ext.container.Viewport', {
|
||||||
|
layout: 'border',
|
||||||
|
renderTo: Ext.getBody(),
|
||||||
|
items: [
|
||||||
|
tree,
|
||||||
|
{
|
||||||
|
xtype: 'tabpanel',
|
||||||
|
title: 'Documentation',
|
||||||
|
id: 'docview',
|
||||||
|
region: 'center',
|
||||||
|
margins: '5 5 5 0',
|
||||||
|
layout: 'fit',
|
||||||
|
items: []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
var deepLink = function() {
|
||||||
|
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
|
||||||
|
var endpoint = store.findNode('path', path);
|
||||||
|
|
||||||
|
if (endpoint) {
|
||||||
|
tree.getSelectionModel().select(endpoint);
|
||||||
|
tree.expandPath(endpoint.getPath());
|
||||||
|
render_docu(endpoint.data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
window.onhashchange = deepLink;
|
||||||
|
|
||||||
|
deepLink();
|
||||||
|
|
||||||
|
});
|
13
docs/api-viewer/index.html
Normal file
13
docs/api-viewer/index.html
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||||
|
<title>Proxmox Backup Server API Documentation</title>
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||||
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
<script type="text/javascript" src="apidoc.js"></script>
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>
|
@ -3,6 +3,7 @@ Backup Client Usage
|
|||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
.. _client_repository:
|
||||||
|
|
||||||
Repository Locations
|
Repository Locations
|
||||||
--------------------
|
--------------------
|
||||||
@ -60,33 +61,10 @@ Environment Variables
|
|||||||
Output Format
|
Output Format
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
Most commands support the ``--output-format`` parameter. It accepts
|
.. include:: output-format.rst
|
||||||
the following values:
|
|
||||||
|
|
||||||
:``text``: Text format (default). Structured data is rendered as a table.
|
|
||||||
|
|
||||||
:``json``: JSON (single line).
|
|
||||||
|
|
||||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
|
||||||
|
|
||||||
|
|
||||||
Please use the following environment variables to modify output behavior:
|
.. _client_creating_backups:
|
||||||
|
|
||||||
``PROXMOX_OUTPUT_FORMAT``
|
|
||||||
Defines the default output format.
|
|
||||||
|
|
||||||
``PROXMOX_OUTPUT_NO_BORDER``
|
|
||||||
If set (to any value), do not render table borders.
|
|
||||||
|
|
||||||
``PROXMOX_OUTPUT_NO_HEADER``
|
|
||||||
If set (to any value), do not render table headers.
|
|
||||||
|
|
||||||
.. note:: The ``text`` format is designed to be human readable, and
|
|
||||||
not meant to be parsed by automation tools. Please use the ``json``
|
|
||||||
format if you need to process the output.
|
|
||||||
|
|
||||||
|
|
||||||
.. _creating-backups:
|
|
||||||
|
|
||||||
Creating Backups
|
Creating Backups
|
||||||
----------------
|
----------------
|
||||||
@ -246,7 +224,7 @@ Restoring this backup will result in:
|
|||||||
. .. file2
|
. .. file2
|
||||||
|
|
||||||
|
|
||||||
.. _encryption:
|
.. _client_encryption:
|
||||||
|
|
||||||
Encryption
|
Encryption
|
||||||
----------
|
----------
|
||||||
@ -483,16 +461,15 @@ subdirectory and add the corresponding pattern to the list for subsequent restor
|
|||||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||||
host. This will scan the whole archive.
|
host. This will scan the whole archive.
|
||||||
|
|
||||||
With ``restore /target/path`` you can restore the sub-archive given by the current
|
The ``restore`` command can be used to restore all the files contained within
|
||||||
working directory to the local target path ``/target/path`` on your host.
|
the backup archive. This is most helpful when paired with the ``--pattern
|
||||||
By additionally passing a glob pattern with ``--pattern <glob>``, the restore is
|
<glob>`` option, as it allows you to restore all files matching a specific
|
||||||
further limited to files matching the pattern.
|
pattern. For example, if you wanted to restore configuration files
|
||||||
For example:
|
located in ``/etc``, you could do the following:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
pxar:/ > cd /etc/
|
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||||
pxar:/etc/ > restore /target/ --pattern **/*.conf
|
|
||||||
...
|
...
|
||||||
|
|
||||||
The above will scan trough all the directories below ``/etc`` and restore all
|
The above will scan trough all the directories below ``/etc`` and restore all
|
||||||
@ -657,10 +634,10 @@ shows the list of existing snapshots and what actions prune would take.
|
|||||||
|
|
||||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||||
space you need to perform :ref:`garbage-collection`.
|
space you need to perform :ref:`client_garbage-collection`.
|
||||||
|
|
||||||
|
|
||||||
.. _garbage-collection:
|
.. _client_garbage-collection:
|
||||||
|
|
||||||
Garbage Collection
|
Garbage Collection
|
||||||
------------------
|
------------------
|
||||||
@ -715,38 +692,46 @@ Benchmarking
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
The backup client also comes with a benchmarking tool. This tool measures
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
various metrics relating to compression and encryption speeds. You can run a
|
various metrics relating to compression and encryption speeds. If a Proxmox
|
||||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
Backup repository (remote or local) is specified, the TLS upload speed will get
|
||||||
|
measured too.
|
||||||
|
|
||||||
|
You can run a benchmark using the ``benchmark`` subcommand of
|
||||||
|
``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. note:: The TLS speed test is only included if a :ref:`backup server
|
||||||
|
repository is specified <client_repository>`.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client benchmark
|
# proxmox-backup-client benchmark
|
||||||
Uploaded 656 chunks in 5 seconds.
|
Uploaded 1517 chunks in 5 seconds.
|
||||||
Time per request: 7659 microseconds.
|
Time per request: 3309 microseconds.
|
||||||
TLS speed: 547.60 MB/s
|
TLS speed: 1267.41 MB/s
|
||||||
SHA256 speed: 585.76 MB/s
|
SHA256 speed: 2066.73 MB/s
|
||||||
Compression speed: 1923.96 MB/s
|
Compression speed: 775.11 MB/s
|
||||||
Decompress speed: 7885.24 MB/s
|
Decompress speed: 1233.35 MB/s
|
||||||
AES256/GCM speed: 3974.03 MB/s
|
AES256/GCM speed: 3688.27 MB/s
|
||||||
|
Verify speed: 783.43 MB/s
|
||||||
┌───────────────────────────────────┬─────────────────────┐
|
┌───────────────────────────────────┬─────────────────────┐
|
||||||
│ Name │ Value │
|
│ Name │ Value │
|
||||||
╞═══════════════════════════════════╪═════════════════════╡
|
╞═══════════════════════════════════╪═════════════════════╡
|
||||||
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
│ TLS (maximal backup upload speed) │ 1267.41 MB/s (103%) │
|
||||||
├───────────────────────────────────┼─────────────────────┤
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
│ SHA256 checksum computation speed │ 2066.73 MB/s (102%) │
|
||||||
├───────────────────────────────────┼─────────────────────┤
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
│ ZStd level 1 compression speed │ 775.11 MB/s (103%) │
|
||||||
├───────────────────────────────────┼─────────────────────┤
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
│ ZStd level 1 decompression speed │ 1233.35 MB/s (103%) │
|
||||||
├───────────────────────────────────┼─────────────────────┤
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
│ Chunk verification speed │ 783.43 MB/s (103%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ AES256 GCM encryption speed │ 3688.27 MB/s (101%) │
|
||||||
└───────────────────────────────────┴─────────────────────┘
|
└───────────────────────────────────┴─────────────────────┘
|
||||||
|
|
||||||
|
|
||||||
.. note:: The percentages given in the output table correspond to a
|
.. note:: The percentages given in the output table correspond to a
|
||||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
comparison against a Ryzen 7 2700X.
|
||||||
local host, so there is no network involved.
|
|
||||||
|
|
||||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
rather than the default table format.
|
rather than the default table format.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,19 +1,140 @@
|
|||||||
Backup Protocol
|
Backup Protocol
|
||||||
===============
|
===============
|
||||||
|
|
||||||
.. todo:: add introduction to HTTP2 based backup protocols
|
Proxmox Backup Server uses a REST based API. While the management
|
||||||
|
interface use normal HTTP, the actual backup and restore interface use
|
||||||
|
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
||||||
|
standards, so the following section assumes that you are familiar on
|
||||||
|
how to use them.
|
||||||
|
|
||||||
|
|
||||||
Backup Protocol API
|
Backup Protocol API
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
.. todo:: describe backup writer protocol
|
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
||||||
|
be upgraded to a HTTP/2 connection using
|
||||||
|
``proxmox-backup-protocol-v1`` as protocol name::
|
||||||
|
|
||||||
.. include:: backup-protocol-api.rst
|
GET /api2/json/backup HTTP/1.1
|
||||||
|
UPGRADE: proxmox-backup-protocol-v1
|
||||||
|
|
||||||
|
The server replies with HTTP 101 Switching Protocol status code,
|
||||||
|
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||||
|
|
||||||
|
The backup protocol allows you to upload three different kind of files:
|
||||||
|
|
||||||
|
- Chunks and blobs (binary data)
|
||||||
|
|
||||||
|
- Fixed Indexes (List of chunks with fixed size)
|
||||||
|
|
||||||
|
- Dynamic Indexes (List of chunk with variable size)
|
||||||
|
|
||||||
|
The following section gives a short introduction how to upload such
|
||||||
|
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
||||||
|
details about available REST commands.
|
||||||
|
|
||||||
|
|
||||||
Reader Protocol API
|
Upload Blobs
|
||||||
-------------------
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
.. todo:: describe backup reader protocol
|
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
|
||||||
|
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||||
|
|
||||||
.. include:: reader-protocol-api.rst
|
The file name needs to end with ``.blob``, and is automatically added
|
||||||
|
to the backup manifest.
|
||||||
|
|
||||||
|
|
||||||
|
Upload Chunks
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Chunks belong to an index, so you first need to open an index (see
|
||||||
|
below). After that, you can upload chunks using ``POST /fixed_chunk``
|
||||||
|
and ``POST /dynamic_chunk``. The HTTP body contains the chunk data
|
||||||
|
encoded as :ref:`Data Blob <data-blob-format>`).
|
||||||
|
|
||||||
|
|
||||||
|
Upload Fixed Indexes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Fixed indexes are use to store VM image data. The VM image is split
|
||||||
|
into equally sized chunks, which are uploaded individually. The index
|
||||||
|
file simply contains a list to chunk digests.
|
||||||
|
|
||||||
|
You create a fixed index with ``POST /fixed_index``. Then upload
|
||||||
|
chunks with ``POST /fixed_chunk``, and append them to the index with
|
||||||
|
``PUT /fixed_index``. When finished, you need to close the index using
|
||||||
|
``POST /fixed_close``.
|
||||||
|
|
||||||
|
The file name needs to end with ``.fidx``, and is automatically added
|
||||||
|
to the backup manifest.
|
||||||
|
|
||||||
|
|
||||||
|
Upload Dynamic Indexes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Dynamic indexes are use to store file archive data. The archive data
|
||||||
|
is split into dynamically sized chunks, which are uploaded
|
||||||
|
individually. The index file simply contains a list to chunk digests
|
||||||
|
and offsets.
|
||||||
|
|
||||||
|
You create a dynamic sized index with ``POST /dynamic_index``. Then
|
||||||
|
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
||||||
|
``PUT /dynamic_index``. When finished, you need to close the index using
|
||||||
|
``POST /dynamic_close``.
|
||||||
|
|
||||||
|
The file name needs to end with ``.didx``, and is automatically added
|
||||||
|
to the backup manifest.
|
||||||
|
|
||||||
|
Finish Backup
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Once you have uploaded all data, you need to call ``POST
|
||||||
|
/finish``. This commits all data and ends the backup protocol.
|
||||||
|
|
||||||
|
|
||||||
|
Restore/Reader Protocol API
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
To start a new reader, the API call ``GET /api2/json/reader`` needs to
|
||||||
|
be upgraded to a HTTP/2 connection using
|
||||||
|
``proxmox-backup-reader-protocol-v1`` as protocol name::
|
||||||
|
|
||||||
|
GET /api2/json/reader HTTP/1.1
|
||||||
|
UPGRADE: proxmox-backup-reader-protocol-v1
|
||||||
|
|
||||||
|
The server replies with HTTP 101 Switching Protocol status code,
|
||||||
|
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||||
|
|
||||||
|
The reader protocol allows you to download three different kind of files:
|
||||||
|
|
||||||
|
- Chunks and blobs (binary data)
|
||||||
|
|
||||||
|
- Fixed Indexes (List of chunks with fixed size)
|
||||||
|
|
||||||
|
- Dynamic Indexes (List of chunk with variable size)
|
||||||
|
|
||||||
|
The following section gives a short introduction how to download such
|
||||||
|
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
||||||
|
available REST commands.
|
||||||
|
|
||||||
|
|
||||||
|
Download Blobs
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Downloading blobs is done using ``GET /download``. The HTTP body contains the
|
||||||
|
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||||
|
|
||||||
|
|
||||||
|
Download Chunks
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
|
||||||
|
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||||
|
|
||||||
|
|
||||||
|
Download Index Files
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Downloading index files is done using ``GET /download``. The HTTP body
|
||||||
|
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
||||||
|
or :ref:`Dynamic Index <dynamic-index-format>`.
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
|
.. _calendar-event-scheduling:
|
||||||
.. _calendar-events:
|
|
||||||
|
|
||||||
Calendar Events
|
Calendar Events
|
||||||
===============
|
===============
|
||||||
|
18
docs/conf.py
18
docs/conf.py
@ -74,7 +74,7 @@ rst_epilog = epilog_file.read()
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = 'Proxmox Backup'
|
project = 'Proxmox Backup'
|
||||||
copyright = '2019-2020, Proxmox Server Solutions GmbH'
|
copyright = '2019-2021, Proxmox Server Solutions GmbH'
|
||||||
author = 'Proxmox Support Team'
|
author = 'Proxmox Support Team'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
@ -107,10 +107,8 @@ today_fmt = '%A, %d %B %Y'
|
|||||||
# This patterns also effect to html_static_path and html_extra_path
|
# This patterns also effect to html_static_path and html_extra_path
|
||||||
exclude_patterns = [
|
exclude_patterns = [
|
||||||
'_build', 'Thumbs.db', '.DS_Store',
|
'_build', 'Thumbs.db', '.DS_Store',
|
||||||
'proxmox-backup-client/man1.rst',
|
'*/man1.rst',
|
||||||
'proxmox-backup-manager/man1.rst',
|
'config/*/man5.rst',
|
||||||
'proxmox-backup-proxy/man1.rst',
|
|
||||||
'pxar/man1.rst',
|
|
||||||
'epilog.rst',
|
'epilog.rst',
|
||||||
'pbs-copyright.rst',
|
'pbs-copyright.rst',
|
||||||
'local-zfs.rst'
|
'local-zfs.rst'
|
||||||
@ -171,6 +169,7 @@ html_theme_options = {
|
|||||||
'extra_nav_links': {
|
'extra_nav_links': {
|
||||||
'Proxmox Homepage': 'https://proxmox.com',
|
'Proxmox Homepage': 'https://proxmox.com',
|
||||||
'PDF': 'proxmox-backup.pdf',
|
'PDF': 'proxmox-backup.pdf',
|
||||||
|
'API Viewer' : 'api-viewer/index.html',
|
||||||
'Prune Simulator' : 'prune-simulator/index.html',
|
'Prune Simulator' : 'prune-simulator/index.html',
|
||||||
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
||||||
},
|
},
|
||||||
@ -246,10 +245,8 @@ html_js_files = [
|
|||||||
#
|
#
|
||||||
# html_last_updated_fmt = None
|
# html_last_updated_fmt = None
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
# We need to disable smatquotes, else Option Lists do not display long options
|
||||||
# typographically correct entities.
|
smartquotes = False
|
||||||
#
|
|
||||||
# html_use_smartypants = True
|
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
# template names.
|
# template names.
|
||||||
@ -467,3 +464,6 @@ epub_exclude_files = ['search.html']
|
|||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
#
|
#
|
||||||
# epub_use_index = True
|
# epub_use_index = True
|
||||||
|
|
||||||
|
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||||
|
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||||
|
22
docs/config/acl/format.rst
Normal file
22
docs/config/acl/format.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
This file contains the access control list for the Proxmox Backup
|
||||||
|
Server API.
|
||||||
|
|
||||||
|
Each line starts with ``acl:``, followed by 4 additional values
|
||||||
|
separated by collon.
|
||||||
|
|
||||||
|
:propagate: Propagate permissions down the hierachrchy
|
||||||
|
|
||||||
|
:path: The object path
|
||||||
|
|
||||||
|
:User/Token: List of users and token
|
||||||
|
|
||||||
|
:Role: List of assigned roles
|
||||||
|
|
||||||
|
Here is an example list::
|
||||||
|
|
||||||
|
acl:1:/:root@pam!test:Admin
|
||||||
|
acl:1:/datastore/store1:user1@pbs:DatastoreAdmin
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager acl`` command to manipulate
|
||||||
|
this file.
|
35
docs/config/acl/man5.rst
Normal file
35
docs/config/acl/man5.rst
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
==========================
|
||||||
|
acl.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Access Control Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the access control configuration for the API.
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Roles
|
||||||
|
=====
|
||||||
|
|
||||||
|
The following roles exist:
|
||||||
|
|
||||||
|
.. include:: roles.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
18
docs/config/datastore/format.rst
Normal file
18
docs/config/datastore/format.rst
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
The file contains a list of datastore configuration sections. Each
|
||||||
|
section starts with a header ``datastore: <name>``, followed by the
|
||||||
|
datastore configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
datastore: <name1>
|
||||||
|
path <path1>
|
||||||
|
<option1> <value1>
|
||||||
|
...
|
||||||
|
|
||||||
|
datastore: <name2>
|
||||||
|
path <path2>
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager datastore`` command to manipulate
|
||||||
|
this file.
|
33
docs/config/datastore/man5.rst
Normal file
33
docs/config/datastore/man5.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
==========================
|
||||||
|
datastore.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Datastore Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/datastore.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the Datastore configuration.
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
13
docs/config/media-pool/format.rst
Normal file
13
docs/config/media-pool/format.rst
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Each entry starts with a header ``pool: <name>``, followed by the
|
||||||
|
media pool configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
pool: company1
|
||||||
|
allocation always
|
||||||
|
retention overwrite
|
||||||
|
|
||||||
|
pool: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-tape pool`` command to manipulate this file.
|
35
docs/config/media-pool/man5.rst
Normal file
35
docs/config/media-pool/man5.rst
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
==========================
|
||||||
|
media-pool.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Media Pool Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/media-pool.cfg is a configuration file
|
||||||
|
for Proxmox Backup Server. It contains the medila pool configuration
|
||||||
|
for tape backups.
|
||||||
|
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
17
docs/config/remote/format.rst
Normal file
17
docs/config/remote/format.rst
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
This file contains information used to access remote servers.
|
||||||
|
|
||||||
|
Each entry starts with a header ``remote: <name>``, followed by the
|
||||||
|
remote configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
remote: server1
|
||||||
|
host server1.local
|
||||||
|
auth-id sync@pbs
|
||||||
|
...
|
||||||
|
|
||||||
|
remote: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager remote`` command to manipulate
|
||||||
|
this file.
|
35
docs/config/remote/man5.rst
Normal file
35
docs/config/remote/man5.rst
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
==========================
|
||||||
|
remote.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Remote Server Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/remote.cfg is a configuration file for
|
||||||
|
Proxmox Backup Server. It contains information about remote servers,
|
||||||
|
usable for synchronization jobs.
|
||||||
|
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
15
docs/config/sync/format.rst
Normal file
15
docs/config/sync/format.rst
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Each entry starts with a header ``sync: <name>``, followed by the
|
||||||
|
job configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
sync: job1
|
||||||
|
store store1
|
||||||
|
remote-store store1
|
||||||
|
remote lina
|
||||||
|
|
||||||
|
sync: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
|
||||||
|
this file.
|
35
docs/config/sync/man5.rst
Normal file
35
docs/config/sync/man5.rst
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
==========================
|
||||||
|
sync.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Synchronization Job Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||||
|
Proxmox Backup Server. It contains the synchronization job
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
16
docs/config/tape-job/format.rst
Normal file
16
docs/config/tape-job/format.rst
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
Each entry starts with a header ``backup: <name>``, followed by the
|
||||||
|
job configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
backup: job1
|
||||||
|
drive hh8
|
||||||
|
pool p4
|
||||||
|
store store3
|
||||||
|
schedule daily
|
||||||
|
|
||||||
|
backup: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-tape backup-job`` command to manipulate
|
||||||
|
this file.
|
34
docs/config/tape-job/man5.rst
Normal file
34
docs/config/tape-job/man5.rst
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
==========================
|
||||||
|
tape-job.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Tape Job Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file ``/etc/proxmox-backup/tape-job.cfg`` is a configuration file for
|
||||||
|
Proxmox Backup Server. It contains the tape job configuration.
|
||||||
|
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
22
docs/config/tape/format.rst
Normal file
22
docs/config/tape/format.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Each drive configuration section starts with a header ``linux: <name>``,
|
||||||
|
followed by the drive configuration options.
|
||||||
|
|
||||||
|
Tape changer configurations starts with ``changer: <name>``,
|
||||||
|
followed by the changer configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
linux: hh8
|
||||||
|
changer sl3
|
||||||
|
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||||
|
|
||||||
|
changer: sl3
|
||||||
|
export-slots 14,15,16
|
||||||
|
path /dev/tape/by-id/scsi-CJ0JBE0059
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
||||||
|
commands to manipulate this file.
|
||||||
|
|
||||||
|
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
|
||||||
|
for debugging.
|
33
docs/config/tape/man5.rst
Normal file
33
docs/config/tape/man5.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
==========================
|
||||||
|
tape.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Tape Drive and Changer Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/tape.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the tape drive and changer configuration.
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
28
docs/config/user/format.rst
Normal file
28
docs/config/user/format.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
This file contains the list of API users and API tokens.
|
||||||
|
|
||||||
|
Each user configuration section starts with a header ``user: <name>``,
|
||||||
|
followed by the user configuration options.
|
||||||
|
|
||||||
|
API token configuration starts with a header ``token:
|
||||||
|
<userid!token_name>``, followed by the token configuration. The data
|
||||||
|
used to authenticate tokens is stored in a separate file
|
||||||
|
(``token.shadow``).
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
user: root@pam
|
||||||
|
comment Superuser
|
||||||
|
email test@example.local
|
||||||
|
...
|
||||||
|
|
||||||
|
token: root@pam!token1
|
||||||
|
comment API test token
|
||||||
|
enable true
|
||||||
|
expire 0
|
||||||
|
|
||||||
|
user: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager user`` command to manipulate
|
||||||
|
this file.
|
33
docs/config/user/man5.rst
Normal file
33
docs/config/user/man5.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
==========================
|
||||||
|
user.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
User Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the user configuration.
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
16
docs/config/verification/format.rst
Normal file
16
docs/config/verification/format.rst
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
Each entry starts with a header ``verification: <name>``, followed by the
|
||||||
|
job configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
verification: verify-store2
|
||||||
|
ignore-verified true
|
||||||
|
outdated-after 7
|
||||||
|
schedule daily
|
||||||
|
store store2
|
||||||
|
|
||||||
|
verification: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager verify-job`` command to manipulate
|
||||||
|
this file.
|
35
docs/config/verification/man5.rst
Normal file
35
docs/config/verification/man5.rst
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
==========================
|
||||||
|
verification.cfg
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Verification Job Configuration
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 5
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||||
|
Proxmox Backup Server. It contains the verification job
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
97
docs/configuration-files.rst
Normal file
97
docs/configuration-files.rst
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
Configuration Files
|
||||||
|
===================
|
||||||
|
|
||||||
|
All Proxmox Backup Server configuration files resides inside directory
|
||||||
|
``/etc/proxmox-backup/``.
|
||||||
|
|
||||||
|
|
||||||
|
``acl.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/acl/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Roles
|
||||||
|
^^^^^
|
||||||
|
|
||||||
|
The following roles exist:
|
||||||
|
|
||||||
|
.. include:: config/acl/roles.rst
|
||||||
|
|
||||||
|
|
||||||
|
``datastore.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/datastore/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/datastore/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``user.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/user/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/user/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``remote.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/remote/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/remote/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``sync.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/sync/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/sync/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``verification.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/verification/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/verification/config.rst
|
@ -14,6 +14,10 @@ pre {
|
|||||||
padding: 5px 10px;
|
padding: 5px 10px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.topic {
|
||||||
|
background-color: #FAFAFA;
|
||||||
|
}
|
||||||
|
|
||||||
li a.current {
|
li a.current {
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
border-bottom: 1px solid #000;
|
border-bottom: 1px solid #000;
|
||||||
@ -25,6 +29,23 @@ ul li.toctree-l1 > a {
|
|||||||
color: #000;
|
color: #000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul {
|
||||||
|
color: #444;
|
||||||
|
}
|
||||||
|
div.sphinxsidebar ul ul {
|
||||||
|
list-style: circle;
|
||||||
|
}
|
||||||
|
div.sphinxsidebar ul ul ul {
|
||||||
|
list-style: square;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul a code {
|
||||||
|
font-weight: normal;
|
||||||
|
}
|
||||||
|
div.sphinxsidebar ul ul a {
|
||||||
|
border-bottom: 1px dotted #CCC;
|
||||||
|
}
|
||||||
|
|
||||||
div.sphinxsidebar form.search {
|
div.sphinxsidebar form.search {
|
||||||
margin-bottom: 5px;
|
margin-bottom: 5px;
|
||||||
}
|
}
|
||||||
@ -36,6 +57,11 @@ div.sphinxsidebar h3 {
|
|||||||
div.sphinxsidebar h1.logo-name {
|
div.sphinxsidebar h1.logo-name {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.document, div.footer {
|
||||||
|
width: min(100%, 1320px);
|
||||||
|
}
|
||||||
|
|
||||||
@media screen and (max-width: 875px) {
|
@media screen and (max-width: 875px) {
|
||||||
div.sphinxsidebar p.logo {
|
div.sphinxsidebar p.logo {
|
||||||
display: initial;
|
display: initial;
|
||||||
@ -44,9 +70,19 @@ div.sphinxsidebar h1.logo-name {
|
|||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
div.sphinxsidebar span {
|
div.sphinxsidebar span {
|
||||||
color: #AAA;
|
color: #EEE;
|
||||||
}
|
}
|
||||||
ul li.toctree-l1 > a {
|
.sphinxsidebar ul li.toctree-l1 > a, div.sphinxsidebar a {
|
||||||
color: #FFF;
|
color: #FFF;
|
||||||
}
|
}
|
||||||
|
div.sphinxsidebar {
|
||||||
|
background-color: #555;
|
||||||
|
}
|
||||||
|
div.body {
|
||||||
|
min-width: 300px;
|
||||||
|
}
|
||||||
|
div.footer {
|
||||||
|
display: block;
|
||||||
|
margin: 15px auto 0px auto;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,113 @@ File Formats
|
|||||||
Proxmox File Archive Format (``.pxar``)
|
Proxmox File Archive Format (``.pxar``)
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
|
|
||||||
.. graphviz:: pxar-format-overview.dot
|
.. graphviz:: pxar-format-overview.dot
|
||||||
|
|
||||||
|
|
||||||
|
.. _data-blob-format:
|
||||||
|
|
||||||
|
Data Blob Format (``.blob``)
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
The data blob format is used to store small binary data. The magic number decides the exact format:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: auto
|
||||||
|
|
||||||
|
* - ``[66, 171, 56, 7, 190, 131, 112, 161]``
|
||||||
|
- unencrypted
|
||||||
|
- uncompressed
|
||||||
|
* - ``[49, 185, 88, 66, 111, 182, 163, 127]``
|
||||||
|
- unencrypted
|
||||||
|
- compressed
|
||||||
|
* - ``[123, 103, 133, 190, 34, 45, 76, 240]``
|
||||||
|
- encrypted
|
||||||
|
- uncompressed
|
||||||
|
* - ``[230, 89, 27, 191, 11, 191, 216, 11]``
|
||||||
|
- encrypted
|
||||||
|
- compressed
|
||||||
|
|
||||||
|
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
|
||||||
|
|
||||||
|
Unencrypted blobs use the following format:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: auto
|
||||||
|
|
||||||
|
* - ``MAGIC: [u8; 8]``
|
||||||
|
* - ``CRC32: [u8; 4]``
|
||||||
|
* - ``Data: (max 16MiB)``
|
||||||
|
|
||||||
|
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
|
||||||
|
byte Authenticated Encyryption (AE) tag, followed by the encrypted
|
||||||
|
data:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - ``MAGIC: [u8; 8]``
|
||||||
|
* - ``CRC32: [u8; 4]``
|
||||||
|
* - ``ÌV: [u8; 16]``
|
||||||
|
* - ``TAG: [u8; 16]``
|
||||||
|
* - ``Data: (max 16MiB)``
|
||||||
|
|
||||||
|
|
||||||
|
.. _fixed-index-format:
|
||||||
|
|
||||||
|
Fixed Index Format (``.fidx``)
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
All numbers are stored as little-endian.
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - ``MAGIC: [u8; 8]``
|
||||||
|
- ``[47, 127, 65, 237, 145, 253, 15, 205]``
|
||||||
|
* - ``uuid: [u8; 16]``,
|
||||||
|
- Unique ID
|
||||||
|
* - ``ctime: i64``,
|
||||||
|
- Creation Time (epoch)
|
||||||
|
* - ``index_csum: [u8; 32]``,
|
||||||
|
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||||
|
* - ``size: u64``,
|
||||||
|
- Image size
|
||||||
|
* - ``chunk_size: u64``,
|
||||||
|
- Chunk size
|
||||||
|
* - ``reserved: [u8; 4016]``,
|
||||||
|
- overall header size is one page (4096 bytes)
|
||||||
|
* - ``digest1: [u8; 32]``
|
||||||
|
- first chunk digest
|
||||||
|
* - ``digest2: [u8; 32]``
|
||||||
|
- next chunk
|
||||||
|
* - ...
|
||||||
|
- next chunk ...
|
||||||
|
|
||||||
|
|
||||||
|
.. _dynamic-index-format:
|
||||||
|
|
||||||
|
Dynamic Index Format (``.didx``)
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
All numbers are stored as little-endian.
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - ``MAGIC: [u8; 8]``
|
||||||
|
- ``[28, 145, 78, 165, 25, 186, 179, 205]``
|
||||||
|
* - ``uuid: [u8; 16]``,
|
||||||
|
- Unique ID
|
||||||
|
* - ``ctime: i64``,
|
||||||
|
- Creation Time (epoch)
|
||||||
|
* - ``index_csum: [u8; 32]``,
|
||||||
|
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||||
|
* - ``reserved: [u8; 4032]``,
|
||||||
|
- Overall header size is one page (4096 bytes)
|
||||||
|
* - ``offset1: u64``
|
||||||
|
- End of first chunk
|
||||||
|
* - ``digest1: [u8; 32]``
|
||||||
|
- first chunk digest
|
||||||
|
* - ``offset2: u64``
|
||||||
|
- End of second chunk
|
||||||
|
* - ``digest2: [u8; 32]``
|
||||||
|
- second chunk digest
|
||||||
|
* - ...
|
||||||
|
- next chunk offset/digest
|
||||||
|
@ -129,7 +129,7 @@ top panel to view:
|
|||||||
* **Content**: Information on the datastore's backup groups and their respective
|
* **Content**: Information on the datastore's backup groups and their respective
|
||||||
contents
|
contents
|
||||||
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
|
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
|
||||||
collection <garbage-collection>` operations, and run garbage collection
|
collection <client_garbage-collection>` operations, and run garbage collection
|
||||||
manually
|
manually
|
||||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Welcome to the Proxmox Backup documentation!
|
Welcome to the Proxmox Backup documentation!
|
||||||
============================================
|
============================================
|
||||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
| Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
|
||||||
| Version |version| -- |today|
|
| Version |version| -- |today|
|
||||||
|
|
||||||
Permission is granted to copy, distribute and/or modify this document under the
|
Permission is granted to copy, distribute and/or modify this document under the
|
||||||
@ -33,6 +33,7 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
pve-integration.rst
|
pve-integration.rst
|
||||||
pxar-tool.rst
|
pxar-tool.rst
|
||||||
sysadmin.rst
|
sysadmin.rst
|
||||||
|
technical-overview.rst
|
||||||
faq.rst
|
faq.rst
|
||||||
|
|
||||||
.. raw:: latex
|
.. raw:: latex
|
||||||
@ -44,6 +45,7 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
:caption: Appendix
|
:caption: Appendix
|
||||||
|
|
||||||
command-syntax.rst
|
command-syntax.rst
|
||||||
|
configuration-files.rst
|
||||||
file-formats.rst
|
file-formats.rst
|
||||||
backup-protocol.rst
|
backup-protocol.rst
|
||||||
calendarevents.rst
|
calendarevents.rst
|
||||||
|
@ -15,7 +15,7 @@ encryption (AE_). Using :term:`Rust` as the implementation language guarantees h
|
|||||||
performance, low resource usage, and a safe, high-quality codebase.
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
Proxmox Backup uses state of the art cryptography for both client-server
|
Proxmox Backup uses state of the art cryptography for both client-server
|
||||||
communication and backup content :ref:`encryption <encryption>`. All
|
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||||
client-server communication uses `TLS
|
client-server communication uses `TLS
|
||||||
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
||||||
be encrypted on the client-side before sending, making it safer to back up data
|
be encrypted on the client-side before sending, making it safer to back up data
|
||||||
@ -65,10 +65,10 @@ Main Features
|
|||||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 GCM_.
|
||||||
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
This authenticated encryption (AE_) mode provides very high performance on
|
||||||
provides very high performance on modern hardware. In addition to client-side
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
encryption, all data is transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
@ -76,8 +76,16 @@ Main Features
|
|||||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
software. The source code is licensed under AGPL, v3.
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
:No Limits: Proxmox Backup Server has no artifical limits for backup storage or
|
||||||
phase is over.
|
backup-clients.
|
||||||
|
|
||||||
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
|
form of `Proxmox Backup Server Subscription Plans
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
|
Standard or Premium subscription, users have access to the :ref:`Proxmox
|
||||||
|
Customer Portal <get_help_enterprise_support>`.
|
||||||
|
|
||||||
|
|
||||||
Reasons for Data Backup?
|
Reasons for Data Backup?
|
||||||
@ -117,8 +125,8 @@ Proxmox Backup Server consists of multiple components:
|
|||||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
any `Linux amd64` environment
|
any `Linux amd64` environment
|
||||||
|
|
||||||
Aside from the web interface, everything is written in the Rust programming
|
Aside from the web interface, most parts of Proxmox Backup Server are written in
|
||||||
language.
|
the Rust programming language.
|
||||||
|
|
||||||
"The Rust programming language helps you write faster, more reliable software.
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
High-level ergonomics and low-level control are often at odds in programming
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
@ -134,6 +142,17 @@ language.
|
|||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
.. _get_help_enterprise_support:
|
||||||
|
|
||||||
|
Enterprise Support
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
||||||
|
Proxmox Customer Portal. The Customer Portal provides support with guaranteed
|
||||||
|
response times from the Proxmox developers.
|
||||||
|
For more information or for volume discounts, please contact office@proxmox.com.
|
||||||
|
|
||||||
Community Support Forum
|
Community Support Forum
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -161,7 +180,7 @@ of the issue and will send a notification once it has been solved.
|
|||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
// IBM LTO Ultrium Cartridge Label Specification
|
// IBM LTO Ultrium Cartridge Label Specification
|
||||||
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
||||||
|
|
||||||
let code39_codes = {
|
const code39_codes = {
|
||||||
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
||||||
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
||||||
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
||||||
@ -53,10 +53,10 @@ let code39_codes = {
|
|||||||
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
||||||
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
||||||
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
||||||
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
|
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b'],
|
||||||
};
|
};
|
||||||
|
|
||||||
let colors = [
|
const colors = [
|
||||||
'#BB282E',
|
'#BB282E',
|
||||||
'#FAE54A',
|
'#FAE54A',
|
||||||
'#9AC653',
|
'#9AC653',
|
||||||
@ -66,25 +66,22 @@ let colors = [
|
|||||||
'#E27B99',
|
'#E27B99',
|
||||||
'#67A945',
|
'#67A945',
|
||||||
'#F6B855',
|
'#F6B855',
|
||||||
'#705A81'
|
'#705A81',
|
||||||
];
|
];
|
||||||
|
|
||||||
let lto_label_width = 70;
|
const lto_label_width = 70;
|
||||||
let lto_label_height = 17;
|
const lto_label_height = 16.9;
|
||||||
|
|
||||||
function foreach_label(page_layout, callback) {
|
function foreach_label(page_layout, callback) {
|
||||||
|
|
||||||
let count = 0;
|
let count = 0;
|
||||||
let row = 0;
|
let row = 0;
|
||||||
let height = page_layout.margin_top;
|
let height = page_layout.margin_top;
|
||||||
|
|
||||||
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
||||||
|
|
||||||
let column = 0;
|
let column = 0;
|
||||||
let width = page_layout.margin_left;
|
let width = page_layout.margin_left;
|
||||||
|
|
||||||
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
||||||
|
|
||||||
callback(column, row, count, width, height);
|
callback(column, row, count, width, height);
|
||||||
count += 1;
|
count += 1;
|
||||||
|
|
||||||
@ -97,11 +94,9 @@ function foreach_label(page_layout, callback) {
|
|||||||
height += page_layout.label_height;
|
height += page_layout.label_height;
|
||||||
height += page_layout.row_spacing;
|
height += page_layout.row_spacing;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function compute_max_labels(page_layout) {
|
function compute_max_labels(page_layout) {
|
||||||
|
|
||||||
let max_labels = 0;
|
let max_labels = 0;
|
||||||
foreach_label(page_layout, function() { max_labels += 1; });
|
foreach_label(page_layout, function() { max_labels += 1; });
|
||||||
return max_labels;
|
return max_labels;
|
||||||
@ -110,10 +105,10 @@ function compute_max_labels(page_layout) {
|
|||||||
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||||
let svg = "";
|
let svg = "";
|
||||||
|
|
||||||
if (label.length != 6) {
|
if (label.length !== 6) {
|
||||||
throw "wrong label length";
|
throw "wrong label length";
|
||||||
}
|
}
|
||||||
if (label_type.length != 2) {
|
if (label_type.length !== 2) {
|
||||||
throw "wrong label_type length";
|
throw "wrong label_type length";
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,20 +121,22 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
|||||||
let xpos = pagex + code_width;
|
let xpos = pagex + code_width;
|
||||||
let height = 12;
|
let height = 12;
|
||||||
|
|
||||||
|
let label_rect = `x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}'`;
|
||||||
|
|
||||||
if (mode === 'placeholder') {
|
if (mode === 'placeholder') {
|
||||||
if (label_borders) {
|
if (label_borders) {
|
||||||
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
svg += `<rect class='unprintable' ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
}
|
}
|
||||||
return svg;
|
return svg;
|
||||||
}
|
}
|
||||||
if (label_borders) {
|
if (label_borders) {
|
||||||
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
svg += `<rect ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mode === "color" || mode == "frame") {
|
if (mode === "color" || mode === "frame") {
|
||||||
let w = lto_label_width/8;
|
let w = lto_label_width/8;
|
||||||
let h = lto_label_height - height;
|
let h = lto_label_height - height;
|
||||||
for (var i = 0; i < 7; i++) {
|
for (let i = 0; i < 7; i++) {
|
||||||
let textx = w/2 + pagex + i*w;
|
let textx = w/2 + pagex + i*w;
|
||||||
let texty = pagey;
|
let texty = pagey;
|
||||||
|
|
||||||
@ -168,7 +165,7 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
|||||||
|
|
||||||
let raw_label = `*${label}${label_type}*`;
|
let raw_label = `*${label}${label_type}*`;
|
||||||
|
|
||||||
for (var i = 0; i < raw_label.length; i++) {
|
for (let i = 0; i < raw_label.length; i++) {
|
||||||
let letter = raw_label.charAt(i);
|
let letter = raw_label.charAt(i);
|
||||||
|
|
||||||
let code = code39_codes[letter];
|
let code = code39_codes[letter];
|
||||||
@ -186,7 +183,6 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (let c of code) {
|
for (let c of code) {
|
||||||
|
|
||||||
if (c === 's') {
|
if (c === 's') {
|
||||||
xpos += small;
|
xpos += small;
|
||||||
continue;
|
continue;
|
||||||
@ -241,7 +237,6 @@ function printBarcodePage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
||||||
|
|
||||||
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
let c = calibration;
|
let c = calibration;
|
||||||
@ -255,7 +250,6 @@ function generate_barcode_page(target_id, page_layout, label_list, calibration)
|
|||||||
svg += '>';
|
svg += '>';
|
||||||
|
|
||||||
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
||||||
|
|
||||||
if (count >= label_list.length) { return; }
|
if (count >= label_list.length) { return; }
|
||||||
|
|
||||||
let item = label_list[count];
|
let item = label_list[count];
|
||||||
@ -297,7 +291,6 @@ function setupPrintFrame(frame, page_width, page_height) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function generate_calibration_page(target_id, page_layout, calibration) {
|
function generate_calibration_page(target_id, page_layout, calibration) {
|
||||||
|
|
||||||
let frame = document.getElementById(target_id);
|
let frame = document.getElementById(target_id);
|
||||||
|
|
||||||
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||||
|
@ -4,7 +4,7 @@ Ext.define('LabelList', {
|
|||||||
|
|
||||||
plugins: {
|
plugins: {
|
||||||
ptype: 'cellediting',
|
ptype: 'cellediting',
|
||||||
clicksToEdit: 1
|
clicksToEdit: 1,
|
||||||
},
|
},
|
||||||
|
|
||||||
selModel: 'cellmodel',
|
selModel: 'cellmodel',
|
||||||
@ -133,7 +133,7 @@ Ext.define('LabelList', {
|
|||||||
handler: function(grid, rowIndex) {
|
handler: function(grid, rowIndex) {
|
||||||
grid.getStore().removeAt(rowIndex);
|
grid.getStore().removeAt(rowIndex);
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
@ -4,7 +4,6 @@ if (Ext.isFirefox) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function draw_labels(target_id, label_list, page_layout, calibration) {
|
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||||
|
|
||||||
let max_labels = compute_max_labels(page_layout);
|
let max_labels = compute_max_labels(page_layout);
|
||||||
|
|
||||||
let count_fixed = 0;
|
let count_fixed = 0;
|
||||||
@ -44,20 +43,16 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
|||||||
count = fill_size;
|
count = fill_size;
|
||||||
}
|
}
|
||||||
rest -= count;
|
rest -= count;
|
||||||
} else {
|
} else if (item.end <= item.start) {
|
||||||
if (item.end <= item.start) {
|
|
||||||
count = 1;
|
count = 1;
|
||||||
} else {
|
} else {
|
||||||
count = (item.end - item.start) + 1;
|
count = (item.end - item.start) + 1;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < count; j++) {
|
for (j = 0; j < count; j++) {
|
||||||
|
|
||||||
let id = item.start + j;
|
let id = item.start + j;
|
||||||
|
|
||||||
if (item.prefix.length == 6) {
|
if (item.prefix.length == 6) {
|
||||||
|
|
||||||
list.push({
|
list.push({
|
||||||
label: item.prefix,
|
label: item.prefix,
|
||||||
tape_type: item.tape_type,
|
tape_type: item.tape_type,
|
||||||
@ -66,9 +61,7 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
|||||||
});
|
});
|
||||||
rest += count - j - 1;
|
rest += count - j - 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
let pad_len = 6-item.prefix.length;
|
let pad_len = 6-item.prefix.length;
|
||||||
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
||||||
|
|
||||||
@ -200,14 +193,13 @@ Ext.define('MainView', {
|
|||||||
tooltip: 'Open Print Dialog',
|
tooltip: 'Open Print Dialog',
|
||||||
handler: function(event, toolEl, panelHeader) {
|
handler: function(event, toolEl, panelHeader) {
|
||||||
printBarcodePage();
|
printBarcodePage();
|
||||||
}
|
},
|
||||||
}],
|
}],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.onReady(function() {
|
Ext.onReady(function() {
|
||||||
|
|
||||||
Ext.create('MainView', {
|
Ext.create('MainView', {
|
||||||
renderTo: Ext.getBody(),
|
renderTo: Ext.getBody(),
|
||||||
});
|
});
|
||||||
|
@ -31,8 +31,8 @@ Ext.define('PageCalibration', {
|
|||||||
scalex = 100/values.d_x;
|
scalex = 100/values.d_x;
|
||||||
scaley = 100/values.d_y;
|
scaley = 100/values.d_y;
|
||||||
|
|
||||||
let offsetx = ((50*scalex) - values.s_x)/scalex;
|
let offsetx = ((50 - values.s_x) - (50*scalex - 50))/scalex;
|
||||||
let offsety = ((50*scaley) - values.s_y)/scaley;
|
let offsety = ((50 - values.s_y) - (50*scaley - 50))/scaley;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
scalex: scalex,
|
scalex: scalex,
|
||||||
@ -139,4 +139,4 @@ Ext.define('PageCalibration', {
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
})
|
});
|
||||||
|
@ -106,7 +106,7 @@ Ext.define('PageLayoutPanel', {
|
|||||||
xtype: 'numberfield',
|
xtype: 'numberfield',
|
||||||
name: 'label_height',
|
name: 'label_height',
|
||||||
fieldLabel: 'Label height',
|
fieldLabel: 'Label height',
|
||||||
minValue: 17,
|
minValue: 15,
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
value: 17,
|
value: 17,
|
||||||
},
|
},
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
let paper_sizes = {
|
const paper_sizes = {
|
||||||
a4: {
|
a4: {
|
||||||
comment: 'A4 (plain)',
|
comment: 'A4 (plain)',
|
||||||
page_width: 210,
|
page_width: 210,
|
||||||
@ -15,13 +15,13 @@ let paper_sizes = {
|
|||||||
page_width: 210,
|
page_width: 210,
|
||||||
page_height: 297,
|
page_height: 297,
|
||||||
label_width: 70,
|
label_width: 70,
|
||||||
label_height: 17,
|
label_height: 16.9,
|
||||||
margin_left: 0,
|
margin_left: 0,
|
||||||
margin_top: 4,
|
margin_top: 5,
|
||||||
column_spacing: 0,
|
column_spacing: 0,
|
||||||
row_spacing: 0,
|
row_spacing: 0,
|
||||||
},
|
},
|
||||||
}
|
};
|
||||||
|
|
||||||
function paper_size_combo_data() {
|
function paper_size_combo_data() {
|
||||||
let data = [];
|
let data = [];
|
||||||
|
@ -118,11 +118,11 @@ high, but you cannot recreate backup snapshots from the past.
|
|||||||
Garbage Collection
|
Garbage Collection
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
You can monitor and run :ref:`garbage collection <client_garbage-collection>` on the
|
||||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
|
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
|
||||||
start garbage collection on an entire datastore and the ``status`` subcommand to
|
start garbage collection on an entire datastore and the ``status`` subcommand to
|
||||||
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||||
|
|
||||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
GC** from the top panel. From here, you can edit the schedule at which garbage
|
||||||
@ -142,7 +142,7 @@ Verification
|
|||||||
Proxmox Backup offers various verification options to ensure that backup data is
|
Proxmox Backup offers various verification options to ensure that backup data is
|
||||||
intact. Verification is generally carried out through the creation of verify
|
intact. Verification is generally carried out through the creation of verify
|
||||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
jobs. These are scheduled tasks that run verification at a given interval (see
|
||||||
:ref:`calendar-events`). With these, you can set whether already verified
|
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
||||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
snapshots are ignored, as well as set a time period, after which verified jobs
|
||||||
are checked again. The interface for creating verify jobs can be found under the
|
are checked again. The interface for creating verify jobs can be found under the
|
||||||
**Verify Jobs** tab of the datastore.
|
**Verify Jobs** tab of the datastore.
|
||||||
|
@ -65,7 +65,7 @@ the ``proxmox-backup-manager sync-job`` command. The configuration information
|
|||||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
||||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
||||||
After creating a sync job, you can either start it manually from the GUI or
|
After creating a sync job, you can either start it manually from the GUI or
|
||||||
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
|
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
24
docs/output-format.rst
Normal file
24
docs/output-format.rst
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
Most commands producing output supports the ``--output-format``
|
||||||
|
parameter. It accepts the following values:
|
||||||
|
|
||||||
|
:``text``: Text format (default). Structured data is rendered as a table.
|
||||||
|
|
||||||
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
|
Also, the following environment variables can modify output behavior:
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_FORMAT``
|
||||||
|
Defines the default output format.
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_NO_BORDER``
|
||||||
|
If set (to any value), do not render table borders.
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_NO_HEADER``
|
||||||
|
If set (to any value), do not render table headers.
|
||||||
|
|
||||||
|
.. note:: The ``text`` format is designed to be human readable, and
|
||||||
|
not meant to be parsed by automation tools. Please use the ``json``
|
||||||
|
format if you need to process the output.
|
@ -69,10 +69,12 @@ Here, the output should be:
|
|||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This will be the default, stable, and recommended repository. It is available for
|
This is the stable, recommended repository. It is available for
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
enabled by default:
|
enabled by default:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
Copyright and Disclaimer
|
Copyright and Disclaimer
|
||||||
========================
|
========================
|
||||||
|
|
||||||
Copyright (C) 2007-2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2007-2021 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License as
|
it under the terms of the GNU Affero General Public License as
|
||||||
|
2
docs/pmt/description.rst
Normal file
2
docs/pmt/description.rst
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
The ``pmt`` command controls Linux tape devices.
|
||||||
|
|
42
docs/pmt/man1.rst
Normal file
42
docs/pmt/man1.rst
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
==========================
|
||||||
|
pmt
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Control Linux Tape Devices
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Common Options
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. include:: options.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
ENVIRONMENT
|
||||||
|
===========
|
||||||
|
|
||||||
|
:TAPE: If set, replaces the `--device` option.
|
||||||
|
|
||||||
|
:PROXMOX_TAPE_DRIVE: If set, replaces the `--drive` option.
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
51
docs/pmt/options.rst
Normal file
51
docs/pmt/options.rst
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
All commands support the following parameters to specify the tape device:
|
||||||
|
|
||||||
|
--device <path> Path to the Linux tape device
|
||||||
|
|
||||||
|
--drive <name> Use drive from Proxmox Backup Server configuration.
|
||||||
|
|
||||||
|
|
||||||
|
Commands which generate output support the ``--output-format``
|
||||||
|
parameter. It accepts the following values:
|
||||||
|
|
||||||
|
:``text``: Text format (default). Human readable.
|
||||||
|
|
||||||
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
|
Device driver options can be specified as integer numbers (see
|
||||||
|
``/usr/include/linux/mtio.h``), or using symbolic names:
|
||||||
|
|
||||||
|
:``buffer-writes``: Enable buffered writes
|
||||||
|
|
||||||
|
:``async-writes``: Enable async writes
|
||||||
|
|
||||||
|
:``read-ahead``: Use read-ahead for fixed block size
|
||||||
|
|
||||||
|
:``debugging``: Enable debugging if compiled into the driver
|
||||||
|
|
||||||
|
:``two-fm``: Write two file marks when closing the file
|
||||||
|
|
||||||
|
:``fast-mteom``: Space directly to eod (and lose file number)
|
||||||
|
|
||||||
|
:``auto-lock``: Automatically lock/unlock drive door
|
||||||
|
|
||||||
|
:``def-writes``: Defaults are meant only for writes
|
||||||
|
|
||||||
|
:``can-bsr``: Indicates that the drive can space backwards
|
||||||
|
|
||||||
|
:``no-blklims``: Drive does not support read block limits
|
||||||
|
|
||||||
|
:``can-partitions``: Drive can handle partitioned tapes
|
||||||
|
|
||||||
|
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
||||||
|
|
||||||
|
:``sysv``: Enable the System V semantics
|
||||||
|
|
||||||
|
:``nowait``: Do not wait for rewind, etc. to complete
|
||||||
|
|
||||||
|
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
||||||
|
in variable block mode to enhance performance when reading blocks
|
||||||
|
shorter than the byte count
|
@ -1,6 +1,3 @@
|
|||||||
Description
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
The ``pmtx`` command controls SCSI media changer devices (tape
|
The ``pmtx`` command controls SCSI media changer devices (tape
|
||||||
autoloader).
|
autoloader).
|
||||||
|
|
||||||
|
@ -18,11 +18,40 @@ Synopsis
|
|||||||
|
|
||||||
.. include:: synopsis.rst
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Common Options
|
||||||
|
==============
|
||||||
|
|
||||||
|
All command supports the following parameters to specify the changer device:
|
||||||
|
|
||||||
|
--device <path> Path to Linux generic SCSI device (e.g. '/dev/sg4')
|
||||||
|
|
||||||
|
--changer <name> Use changer from Proxmox Backup Server configuration.
|
||||||
|
|
||||||
|
|
||||||
|
Commands generating output supports the ``--output-format``
|
||||||
|
parameter. It accepts the following values:
|
||||||
|
|
||||||
|
:``text``: Text format (default). Human readable.
|
||||||
|
|
||||||
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
Description
|
Description
|
||||||
============
|
============
|
||||||
|
|
||||||
.. include:: description.rst
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
.. include:: ../pbs-copyright.rst
|
ENVIRONMENT
|
||||||
|
===========
|
||||||
|
|
||||||
|
:CHANGER: If set, replaces the `--device` option
|
||||||
|
|
||||||
|
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
|
||||||
|
configuration to find the associcated changer device.
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
This is just a test.
|
This tool implements a backup server client, i.e. it can connect to a
|
||||||
|
backup servers to issue management commands and to create or restore
|
||||||
.. NOTE:: No further info.
|
backups.
|
||||||
|
|
||||||
|
@ -31,6 +31,12 @@ Those command are available when you start an intercative restore shell:
|
|||||||
.. include:: catalog-shell-synopsis.rst
|
.. include:: catalog-shell-synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Common Options
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. include:: ../output-format.rst
|
||||||
|
|
||||||
|
|
||||||
Description
|
Description
|
||||||
============
|
============
|
||||||
|
|
||||||
|
@ -1,4 +1,2 @@
|
|||||||
This is just a test.
|
This tool exposes the whole backup server management API on the
|
||||||
|
command line.
|
||||||
.. NOTE:: No further info.
|
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
This is just a test.
|
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||||
|
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||||
.. NOTE:: No further info.
|
permissions. Operation requiring more permissions are forwarded to
|
||||||
|
the local ``proxmox-backup`` service.
|
||||||
|
|
||||||
|
7
docs/proxmox-backup/description.rst
Normal file
7
docs/proxmox-backup/description.rst
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
This daemon exposes the Proxmox Backup Server management API on
|
||||||
|
``127.0.0.1:82``. It runs as ``root`` and has permission to do all
|
||||||
|
privileged operations.
|
||||||
|
|
||||||
|
NOTE: The daemon listens to a local address only, so you cannot access
|
||||||
|
it from outside. The ``proxmox-backup-proxy`` daemon exposes the API
|
||||||
|
to the outside world.
|
41
docs/proxmox-backup/man1.rst
Normal file
41
docs/proxmox-backup/man1.rst
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-backup
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Proxmox Backup Local API Server
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
This daemon is normally started and managed as ``systemd`` service::
|
||||||
|
|
||||||
|
systemctl start proxmox-backup
|
||||||
|
|
||||||
|
systemctl stop proxmox-backup
|
||||||
|
|
||||||
|
systemctl status proxmox-backup
|
||||||
|
|
||||||
|
For debugging, you can start the daemon in foreground using::
|
||||||
|
|
||||||
|
proxmox-backup-api
|
||||||
|
|
||||||
|
.. NOTE:: You need to stop the service before starting the daemon in
|
||||||
|
foreground.
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
1
docs/proxmox-tape/description.rst
Normal file
1
docs/proxmox-tape/description.rst
Normal file
@ -0,0 +1 @@
|
|||||||
|
This tool can configure and manage tape backups.
|
28
docs/proxmox-tape/man1.rst
Normal file
28
docs/proxmox-tape/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-tape
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Proxmox Tape Backup Command Line Tool
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
||||||
|
|
@ -1,6 +1,3 @@
|
|||||||
Description
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
``pxar`` is a command line utility to create and manipulate archives in the
|
``pxar`` is a command line utility to create and manipulate archives in the
|
||||||
:ref:`pxar-format`.
|
:ref:`pxar-format`.
|
||||||
It is inspired by `casync file archive format
|
It is inspired by `casync file archive format
|
||||||
@ -80,7 +77,7 @@ These files must contain one pattern per line, again later patterns win over
|
|||||||
previous ones.
|
previous ones.
|
||||||
The patterns control file exclusions of files present within the given directory
|
The patterns control file exclusions of files present within the given directory
|
||||||
or further below it in the tree.
|
or further below it in the tree.
|
||||||
The behavior is the same as described in :ref:`creating-backups`.
|
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||||
|
|
||||||
Extracting an Archive
|
Extracting an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
@ -4,6 +4,9 @@ pxar
|
|||||||
|
|
||||||
.. include:: ../epilog.rst
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
.. Avoid errors with sphinx ref role
|
||||||
|
.. role:: ref(emphasis)
|
||||||
|
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
Proxmox File Archive Command Line Tool
|
Proxmox File Archive Command Line Tool
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
@ -25,4 +28,3 @@ Description
|
|||||||
|
|
||||||
|
|
||||||
.. include:: ../pbs-copyright.rst
|
.. include:: ../pbs-copyright.rst
|
||||||
|
|
||||||
|
@ -6,3 +6,9 @@ Service Daemons
|
|||||||
|
|
||||||
.. include:: proxmox-backup-proxy/description.rst
|
.. include:: proxmox-backup-proxy/description.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-backup``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-backup/description.rst
|
||||||
|
|
||||||
|
@ -119,8 +119,8 @@ directory on the filesystem. Each datastore also has associated retention
|
|||||||
settings of how many backup snapshots for each interval of ``hourly``,
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
||||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
||||||
|
|
||||||
|
|
||||||
.. _storage_datastore_create:
|
.. _storage_datastore_create:
|
||||||
|
@ -25,4 +25,7 @@ either explain things which are different on `Proxmox Backup`_, or
|
|||||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||||
please refer to the standard Debian documentation.
|
please refer to the standard Debian documentation.
|
||||||
|
|
||||||
|
|
||||||
.. include:: local-zfs.rst
|
.. include:: local-zfs.rst
|
||||||
|
|
||||||
|
.. include:: services.rst
|
||||||
|
@ -1,39 +1,49 @@
|
|||||||
|
.. _tape_backup:
|
||||||
|
|
||||||
Tape Backup
|
Tape Backup
|
||||||
===========
|
===========
|
||||||
|
|
||||||
|
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||||
|
production use. To enable it in the GUI, you need to issue the
|
||||||
|
following command (as root user on the console):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# touch /etc/proxmox-backup/tape.cfg
|
||||||
|
|
||||||
Proxmox tape backup provides an easy way to store datastore content
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
onto magnetic tapes. This increases data safety because you get:
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
|
|
||||||
- an additional copy of the data
|
- an additional copy of the data,
|
||||||
- to a different media type (tape)
|
- on a different media type (tape),
|
||||||
- to an additional location (you can move tapes offsite)
|
- to an additional location (you can move tapes off-site)
|
||||||
|
|
||||||
In most restore jobs, only data from the last backup job is restored.
|
In most restore jobs, only data from the last backup job is restored.
|
||||||
Restore requests further decline the older the data
|
Restore requests further decline, the older the data
|
||||||
gets. Considering this, tape backup may also help to reduce disk
|
gets. Considering this, tape backup may also help to reduce disk
|
||||||
usage, because you can safely remove data from disk once archived on
|
usage, because you can safely remove data from disk, once it's archived on
|
||||||
tape. This is especially true if you need to keep data for several
|
tape. This is especially true if you need to retain data for several
|
||||||
years.
|
years.
|
||||||
|
|
||||||
Tape backups do not provide random access to the stored data. Instead,
|
Tape backups do not provide random access to the stored data. Instead,
|
||||||
you need to restore the data to disk before you can access it
|
you need to restore the data to disk, before you can access it
|
||||||
again. Also, if you store your tapes offsite (using some kind of tape
|
again. Also, if you store your tapes off-site (using some kind of tape
|
||||||
vaulting service), you need to bring them onsite before you can do any
|
vaulting service), you need to bring them back on-site, before you can do any
|
||||||
restore. So please consider that restores from tapes can take much
|
restores. So please consider that restoring from tape can take much
|
||||||
longer than restores from disk.
|
longer than restoring from disk.
|
||||||
|
|
||||||
|
|
||||||
Tape Technology Primer
|
Tape Technology Primer
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
.. _Linear Tape-Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||||
|
|
||||||
As of 2021, the only broadly available tape technology standard is
|
As of 2021, the only widely available tape technology standard is
|
||||||
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
`Linear Tape-Open`_ (LTO). Different vendors offer LTO Ultrium tape
|
||||||
drives, autoloaders and LTO tape cartridges.
|
drives, auto-loaders, and LTO tape cartridges.
|
||||||
|
|
||||||
There are a few vendors offering proprietary drives with
|
There are a few vendors that offer proprietary drives with
|
||||||
slight advantages in performance and capacity, but they have
|
slight advantages in performance and capacity. Nevertheless, they have
|
||||||
significant disadvantages:
|
significant disadvantages:
|
||||||
|
|
||||||
- proprietary (single vendor)
|
- proprietary (single vendor)
|
||||||
@ -43,56 +53,55 @@ So we currently do not test such drives.
|
|||||||
|
|
||||||
In general, LTO tapes offer the following advantages:
|
In general, LTO tapes offer the following advantages:
|
||||||
|
|
||||||
- Durable (30 years)
|
- Durability (30 year lifespan)
|
||||||
- High Capacity (12 TB)
|
- High Capacity (12 TB)
|
||||||
- Relatively low cost per TB
|
- Relatively low cost per TB
|
||||||
- Cold Media
|
- Cold Media
|
||||||
- Movable (storable inside vault)
|
- Movable (storable inside vault)
|
||||||
- Multiple vendors (for both media and drives)
|
- Multiple vendors (for both media and drives)
|
||||||
- Build in AES-CGM Encryption engine
|
- Build in AES-GCM Encryption engine
|
||||||
|
|
||||||
Please note that `Proxmox Backup Server` already stores compressed
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
data, so we do not need/use the tape compression feature.
|
tape compression feature has no advantage.
|
||||||
|
|
||||||
|
|
||||||
Supported Hardware
|
Supported Hardware
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
|
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
||||||
or later. In general, all SCSI2 tape drives supported by the Linux
|
or later. In general, all SCSI-2 tape drives supported by the Linux
|
||||||
kernel should work, but feature like hardware encryptions needs LTO4
|
kernel should work, but features like hardware encryption need LTO-4
|
||||||
or later.
|
or later.
|
||||||
|
|
||||||
Tape changer support is done using the Linux 'mtx' command line
|
Tape changing is carried out using the Linux 'mtx' command line
|
||||||
tool. So any changer device supported by that tool should work.
|
tool, so any changer device supported by this tool should work.
|
||||||
|
|
||||||
|
|
||||||
Drive Performance
|
Drive Performance
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
|
Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
||||||
that it still takes a minimum of 9 hours to completely write or
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
read a single tape (even at maximum speed).
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
The only way to speed up that data rate is to use more than one
|
The only way to speed up that data rate is to use more than one
|
||||||
drive. That way you can run several backup jobs in parallel, or run
|
drive. That way, you can run several backup jobs in parallel, or run
|
||||||
restore jobs while the other dives are used for backups.
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
Also consider that you need to read data first from your datastore
|
Also consider that you first need to read data from your datastore
|
||||||
(disk). But a single spinning disk is unable to deliver data at this
|
(disk). However, a single spinning disk is unable to deliver data at this
|
||||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
|
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||||
to run your tape at full speed, please make sure that the source
|
to write to your tape at full speed, please make sure that the source
|
||||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
datastore is able to deliver that performance (e.g, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
:Tape Labels: are used to uniquely indentify a tape. You normally use
|
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
||||||
some sticky paper labels and apply them on the front of the
|
sticky paper label to the front of the cartridge. We additionally store the
|
||||||
cartridge. We additionally store the label text magnetically on the
|
label text magnetically on the tape (first file on tape).
|
||||||
tape (first file on tape).
|
|
||||||
|
|
||||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
@ -102,14 +111,14 @@ Terminology
|
|||||||
|
|
||||||
:Barcodes: are a special form of tape labels, which are electronically
|
:Barcodes: are a special form of tape labels, which are electronically
|
||||||
readable. Most LTO tape robots use an 8 character string encoded as
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
Specification`_.
|
Specification`_.
|
||||||
|
|
||||||
You can either buy such barcode labels from your cartridge vendor,
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
or print them yourself. You can use our `LTO Barcode Generator`_ App
|
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||||
for that.
|
app, if you would like to print them yourself.
|
||||||
|
|
||||||
.. Note:: Physical labels and the associated adhesive shall have an
|
.. Note:: Physical labels and the associated adhesive should have an
|
||||||
environmental performance to match or exceed the environmental
|
environmental performance to match or exceed the environmental
|
||||||
specifications of the cartridge to which it is applied.
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
@ -122,8 +131,8 @@ Terminology
|
|||||||
:Media Set: A group of continuously written tapes (all from the same
|
:Media Set: A group of continuously written tapes (all from the same
|
||||||
media pool).
|
media pool).
|
||||||
|
|
||||||
:Tape drive: The decive used to read and write data to the tape. There
|
:Tape drive: The device used to read and write data to the tape. There
|
||||||
are standalone drives, but drives often ship within tape libraries.
|
are standalone drives, but drives are usually shipped within tape libraries.
|
||||||
|
|
||||||
:Tape changer: A device which can change the tapes inside a tape drive
|
:Tape changer: A device which can change the tapes inside a tape drive
|
||||||
(tape robot). They are usually part of a tape library.
|
(tape robot). They are usually part of a tape library.
|
||||||
@ -132,10 +141,10 @@ Terminology
|
|||||||
|
|
||||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
:`Tape library`_: A storage device that contains one or more tape drives,
|
||||||
a number of slots to hold tape cartridges, a barcode reader to
|
a number of slots to hold tape cartridges, a barcode reader to
|
||||||
identify tape cartridges and an automated method for loading tapes
|
identify tape cartridges, and an automated method for loading tapes
|
||||||
(a robot).
|
(a robot).
|
||||||
|
|
||||||
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
|
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||||
|
|
||||||
:Inventory: The inventory stores the list of known tapes (with
|
:Inventory: The inventory stores the list of known tapes (with
|
||||||
additional status information).
|
additional status information).
|
||||||
@ -143,14 +152,14 @@ Terminology
|
|||||||
:Catalog: A media catalog stores information about the media content.
|
:Catalog: A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
Tape Quickstart
|
Tape Quick Start
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
1. Configure your tape hardware (drives and changers)
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
2. Configure one or more media pools
|
2. Configure one or more media pools
|
||||||
|
|
||||||
3. Label your tape cartridges.
|
3. Label your tape cartridges
|
||||||
|
|
||||||
4. Start your first tape backup job ...
|
4. Start your first tape backup job ...
|
||||||
|
|
||||||
@ -159,9 +168,10 @@ Configuration
|
|||||||
-------------
|
-------------
|
||||||
|
|
||||||
Please note that you can configure anything using the graphical user
|
Please note that you can configure anything using the graphical user
|
||||||
interface or the command line interface. Both methods results in the
|
interface or the command line interface. Both methods result in the
|
||||||
same configuration.
|
same configuration.
|
||||||
|
|
||||||
|
.. _tape_changer_config:
|
||||||
|
|
||||||
Tape changers
|
Tape changers
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
@ -169,8 +179,10 @@ Tape changers
|
|||||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||||
this step if you are using a standalone drive.
|
this step if you are using a standalone drive.
|
||||||
|
|
||||||
Linux is able to auto detect those devices, and you can get a list
|
Linux is able to auto detect these devices, and you can get a list
|
||||||
of available devices using::
|
of available devices using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer scan
|
# proxmox-tape changer scan
|
||||||
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||||
@ -179,18 +191,22 @@ of available devices using::
|
|||||||
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||||
|
|
||||||
In order to use that device with Proxmox, you need to create a
|
In order to use a device with Proxmox Backup Server, you need to create a
|
||||||
configuration entry::
|
configuration entry:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
||||||
|
|
||||||
Where ``sl3`` is an arbitrary name you can choose.
|
Where ``sl3`` is an arbitrary name you can choose.
|
||||||
|
|
||||||
.. Note:: Please use stable device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
You can show the final configuration with::
|
You can display the final configuration with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer config sl3
|
# proxmox-tape changer config sl3
|
||||||
┌──────┬─────────────────────────────┐
|
┌──────┬─────────────────────────────┐
|
||||||
@ -201,7 +217,9 @@ You can show the final configuration with::
|
|||||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
└──────┴─────────────────────────────┘
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
Or simply list all configured changer devices::
|
Or simply list all configured changer devices:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer list
|
# proxmox-tape changer list
|
||||||
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
||||||
@ -213,7 +231,9 @@ Or simply list all configured changer devices::
|
|||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
To test your setup, please query the status of the changer device with::
|
To test your setup, please query the status of the changer device with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer status sl3
|
# proxmox-tape changer status sl3
|
||||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
@ -231,15 +251,15 @@ To test your setup, please query the status of the changer device with::
|
|||||||
└───────────────┴──────────┴────────────┴─────────────┘
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
Tape libraries usually provide some special import/export slots (also
|
Tape libraries usually provide some special import/export slots (also
|
||||||
called "mail slots"). Tapes inside those slots are acessible from
|
called "mail slots"). Tapes inside those slots are accessible from
|
||||||
outside, making it easy to add/remove tapes to/from the library. Those
|
outside, making it easy to add/remove tapes to/from the library. Those
|
||||||
tapes are considered to be "offline", so backup jobs will not use
|
tapes are considered to be "offline", so backup jobs will not use
|
||||||
them. Those special slots are auto-detected and marked as
|
them. Those special slots are auto-detected and marked as an
|
||||||
``import-export`` slot in the status command.
|
``import-export`` slot in the status command.
|
||||||
|
|
||||||
It's worth noting that some of the smaller tape libraries don't have
|
It's worth noting that some of the smaller tape libraries don't have
|
||||||
such slots. While they have something called "Mail Slot", that slot
|
such slots. While they have something called a "Mail Slot", that slot
|
||||||
is just a way to grab the tape from the gripper. But they are unable
|
is just a way to grab the tape from the gripper. They are unable
|
||||||
to hold media while the robot does other things. They also do not
|
to hold media while the robot does other things. They also do not
|
||||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
||||||
the status output.
|
the status output.
|
||||||
@ -247,12 +267,16 @@ the status output.
|
|||||||
As a workaround, you can mark some of the normal slots as export
|
As a workaround, you can mark some of the normal slots as export
|
||||||
slot. The software treats those slots like real ``import-export``
|
slot. The software treats those slots like real ``import-export``
|
||||||
slots, and the media inside those slots is considered to be 'offline'
|
slots, and the media inside those slots is considered to be 'offline'
|
||||||
(not available for backup)::
|
(not available for backup):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer update sl3 --export-slots 15,16
|
# proxmox-tape changer update sl3 --export-slots 15,16
|
||||||
|
|
||||||
After that, you can see those artificial ``import-export`` slots in
|
After that, you can see those artificial ``import-export`` slots in
|
||||||
the status output::
|
the status output:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer status sl3
|
# proxmox-tape changer status sl3
|
||||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
@ -273,12 +297,15 @@ the status output::
|
|||||||
│ slot │ 14 │ │ │
|
│ slot │ 14 │ │ │
|
||||||
└───────────────┴──────────┴────────────┴─────────────┘
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
.. _tape_drive_config:
|
||||||
|
|
||||||
Tape drives
|
Tape drives
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
Linux is able to auto detect tape drives, and you can get a list
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
of available tape drives using::
|
of available tape drives using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive scan
|
# proxmox-tape drive scan
|
||||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
@ -288,24 +315,30 @@ of available tape drives using::
|
|||||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
In order to use that drive with Proxmox, you need to create a
|
In order to use that drive with Proxmox, you need to create a
|
||||||
configuration entry::
|
configuration entry:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||||
|
|
||||||
.. Note:: Please use stable device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
If you have a tape library, you also need to set the associated
|
If you have a tape library, you also need to set the associated
|
||||||
changer device::
|
changer device:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||||
|
|
||||||
The ``--changer-drivenum`` is only necessary if the tape library
|
The ``--changer-drivenum`` is only necessary if the tape library
|
||||||
includes more than one drive (The changer status command lists all
|
includes more than one drive (the changer status command lists all
|
||||||
drivenums).
|
drive numbers).
|
||||||
|
|
||||||
You can show the final configuration with::
|
You can display the final configuration with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive config mydrive
|
# proxmox-tape drive config mydrive
|
||||||
┌─────────┬────────────────────────────────┐
|
┌─────────┬────────────────────────────────┐
|
||||||
@ -319,9 +352,11 @@ You can show the final configuration with::
|
|||||||
└─────────┴────────────────────────────────┘
|
└─────────┴────────────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||||
configuration, because that is the default.
|
configuration, because it is the default.
|
||||||
|
|
||||||
To list all configured drives use::
|
To list all configured drives use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive list
|
# proxmox-tape drive list
|
||||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
@ -333,7 +368,9 @@ To list all configured drives use::
|
|||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
For testing, you can simply query the drive status with::
|
For testing, you can simply query the drive status with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape status --drive mydrive
|
# proxmox-tape status --drive mydrive
|
||||||
┌───────────┬────────────────────────┐
|
┌───────────┬────────────────────────┐
|
||||||
@ -345,9 +382,11 @@ For testing, you can simply query the drive status with::
|
|||||||
└───────────┴────────────────────────┘
|
└───────────┴────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
mode). This is the default anyways.
|
mode). This is the default anyway.
|
||||||
|
|
||||||
|
|
||||||
|
.. _tape_media_pool_config:
|
||||||
|
|
||||||
Media Pools
|
Media Pools
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
@ -359,11 +398,11 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
A media set is a group of continuously written tapes, used to split
|
A media set is a group of continuously written tapes, used to split
|
||||||
the larger pool into smaller, restorable units. One or more backup
|
the larger pool into smaller, restorable units. One or more backup
|
||||||
jobs write to a media set, producing an ordered group of
|
jobs write to a media set, producing an ordered group of
|
||||||
tapes. Media sets are identified by an unique ID. That ID and the
|
tapes. Media sets are identified by a unique ID. That ID and the
|
||||||
sequence number is stored on each tape of that set (tape label).
|
sequence number are stored on each tape of that set (tape label).
|
||||||
|
|
||||||
Media sets are the basic unit for restore tasks, i.e. you need all
|
Media sets are the basic unit for restore tasks. This means that you need
|
||||||
tapes in the set to restore the media set content. Data is fully
|
every tape in the set to restore the media set contents. Data is fully
|
||||||
deduplicated inside a media set.
|
deduplicated inside a media set.
|
||||||
|
|
||||||
|
|
||||||
@ -372,37 +411,37 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
The pool additionally defines how long backup jobs can append data
|
The pool additionally defines how long backup jobs can append data
|
||||||
to a media set. The following settings are possible:
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
- Try to use the current media set.
|
- Try to use the current media set (``continue``).
|
||||||
|
|
||||||
This setting produce one large media set. While this is very
|
This setting produces one large media set. While this is very
|
||||||
space efficient (deduplication, no unused space), it can lead to
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
long restore times, because restore jobs needs to read all tapes in the
|
long restore times, because restore jobs need to read all tapes in the
|
||||||
set.
|
set.
|
||||||
|
|
||||||
.. NOTE:: Data is fully deduplicated inside a media set. That
|
.. NOTE:: Data is fully deduplicated inside a media set. This
|
||||||
also means that data is randomly distributed over the tapes in
|
also means that data is randomly distributed over the tapes in
|
||||||
the set. So even if you restore a single VM, this may have to
|
the set. Thus, even if you restore a single VM, data may have to be
|
||||||
read data from all tapes inside the media set.
|
read from all tapes inside the media set.
|
||||||
|
|
||||||
Larger media sets are also more error prone, because a single
|
Larger media sets are also more error-prone, because a single
|
||||||
damaged media makes the restore fail.
|
damaged tape makes the restore fail.
|
||||||
|
|
||||||
Usage scenario: Mostly used with tape libraries, and you manually
|
Usage scenario: Mostly used with tape libraries. You manually
|
||||||
trigger new set creation by running a backup job with the
|
trigger new set creation by running a backup job with the
|
||||||
``--export`` option.
|
``--export`` option.
|
||||||
|
|
||||||
.. NOTE:: Retention period starts with the existence of a newer
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
media set.
|
media set.
|
||||||
|
|
||||||
- Always create a new media set.
|
- Always create a new media set (``always``).
|
||||||
|
|
||||||
With this setting each backup job creates a new media set. This
|
With this setting, each backup job creates a new media set. This
|
||||||
is less space efficient, because the last media from the last set
|
is less space efficient, because the media from the last set
|
||||||
may not be fully written, leaving the remaining space unused.
|
may not be fully written, leaving the remaining space unused.
|
||||||
|
|
||||||
The advantage is that this procudes media sets of minimal
|
The advantage is that this procudes media sets of minimal
|
||||||
size. Small set are easier to handle, you can move sets to an
|
size. Small sets are easier to handle, can be moved more conveniently
|
||||||
off-site vault, and restore is much faster.
|
to an off-site vault, and can be restored much faster.
|
||||||
|
|
||||||
.. NOTE:: Retention period starts with the creation time of the
|
.. NOTE:: Retention period starts with the creation time of the
|
||||||
media set.
|
media set.
|
||||||
@ -417,7 +456,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
||||||
will create a new set each week.
|
will create a new set each week.
|
||||||
|
|
||||||
This balances between space efficency and media count.
|
This balances between space efficiency and media count.
|
||||||
|
|
||||||
.. NOTE:: Retention period starts when the calendar event
|
.. NOTE:: Retention period starts when the calendar event
|
||||||
triggers.
|
triggers.
|
||||||
@ -426,13 +465,13 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
- Required tape is offline (and you use a tape library).
|
- Required tape is offline (and you use a tape library).
|
||||||
|
|
||||||
- Current set contains damaged of retired tapes.
|
- Current set contains damaged or retired tapes.
|
||||||
|
|
||||||
- Media pool encryption changed
|
- Media pool encryption has changed
|
||||||
|
|
||||||
- Database consistency errors, e.g. if the inventory does not
|
- Database consistency errors, for example, if the inventory does not
|
||||||
contain required media info, or contain conflicting infos
|
contain the required media information, or it contains conflicting
|
||||||
(outdated data).
|
information (outdated data).
|
||||||
|
|
||||||
.. topic:: Retention Policy
|
.. topic:: Retention Policy
|
||||||
|
|
||||||
@ -449,40 +488,48 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
.. topic:: Hardware Encryption
|
.. topic:: Hardware Encryption
|
||||||
|
|
||||||
LTO4 (or later) tape drives support hardware encryption. If you
|
LTO-4 (or later) tape drives support hardware encryption. If you
|
||||||
configure the media pool to use encryption, all data written to the
|
configure the media pool to use encryption, all data written to the
|
||||||
tapes is encrypted using the configured key.
|
tapes is encrypted using the configured key.
|
||||||
|
|
||||||
That way, unauthorized users cannot read data from the media,
|
This way, unauthorized users cannot read data from the media,
|
||||||
e.g. if you loose a media while shipping to an offsite location.
|
for example, if you loose a tape while shipping to an offsite location.
|
||||||
|
|
||||||
.. Note:: If the backup client also encrypts data, data on tape
|
.. Note:: If the backup client also encrypts data, data on the tape
|
||||||
will be double encrypted.
|
will be double encrypted.
|
||||||
|
|
||||||
The password protected key is stored on each media, so it is
|
The password protected key is stored on each medium, so that it is
|
||||||
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
|
possbible to `restore the key <tape_restore_encryption_key_>`_ using
|
||||||
you remember the password in case you need to restore the key.
|
the password. Please make sure to remember the password, in case
|
||||||
|
you need to restore the key.
|
||||||
|
|
||||||
|
|
||||||
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
|
.. NOTE:: We use global content namespace, meaning we do not store the
|
||||||
the source datastore, so it is impossible to distinguish
|
source datastore name. Because of this, it is impossible to distinguish
|
||||||
store1:/vm/100 from store2:/vm/100. Please use different media
|
store1:/vm/100 from store2:/vm/100. Please use different media pools
|
||||||
pools if the source is from a different name space)
|
if the sources are from different namespaces with conflicting names
|
||||||
|
(for example, if the sources are from different Proxmox VE clusters).
|
||||||
|
|
||||||
|
|
||||||
The following command creates a new media pool::
|
The following command creates a new media pool:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
||||||
|
|
||||||
# proxmox-tape pool create daily --drive mydrive
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
Additional option can be set later using the update command::
|
Additional option can be set later, using the update command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape pool update daily --allocation daily --retention 7days
|
# proxmox-tape pool update daily --allocation daily --retention 7days
|
||||||
|
|
||||||
|
|
||||||
To list all configured pools use::
|
To list all configured pools use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape pool list
|
# proxmox-tape pool list
|
||||||
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
||||||
@ -491,58 +538,142 @@ To list all configured pools use::
|
|||||||
│ daily │ mydrive │ daily │ 7days │ │
|
│ daily │ mydrive │ daily │ 7days │ │
|
||||||
└───────┴──────────┴────────────┴───────────┴──────────┘
|
└───────┴──────────┴────────────┴───────────┴──────────┘
|
||||||
|
|
||||||
|
.. _tape_backup_job_config:
|
||||||
|
|
||||||
Tape Jobs
|
Tape Backup Jobs
|
||||||
~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To automate tape backup, you can configure tape backup jobs which
|
||||||
|
write datastore content to a media pool, based on a specific time schedule.
|
||||||
|
The required settings are:
|
||||||
|
|
||||||
|
- ``store``: The datastore you want to backup
|
||||||
|
|
||||||
|
- ``pool``: The media pool - only tape cartridges from that pool are
|
||||||
|
used.
|
||||||
|
|
||||||
|
- ``drive``: The tape drive.
|
||||||
|
|
||||||
|
- ``schedule``: Job schedule (see :ref:`calendar-event-scheduling`)
|
||||||
|
|
||||||
|
For example, to configure a tape backup job for datastore ``vmstore1``
|
||||||
|
use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job create job2 --store vmstore1 \
|
||||||
|
--pool yourpool --drive yourdrive --schedule daily
|
||||||
|
|
||||||
|
The backup includes all snapshots from a backup group by default. You can
|
||||||
|
set the ``latest-only`` flag to include only the latest snapshots:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job update job2 --latest-only
|
||||||
|
|
||||||
|
Backup jobs can use email to send tape request notifications or
|
||||||
|
report errors. You can set the notification user with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job update job2 --notify-user root@pam
|
||||||
|
|
||||||
|
.. Note:: The email address is a property of the user (see :ref:`user_mgmt`).
|
||||||
|
|
||||||
|
It is sometimes useful to eject the tape from the drive after a
|
||||||
|
backup. For a standalone drive, the ``eject-media`` option ejects the
|
||||||
|
tape, making sure that the following backup cannot use the tape
|
||||||
|
(unless someone manually loads the tape again). For tape libraries,
|
||||||
|
this option unloads the tape to a free slot, which provides better
|
||||||
|
dust protection than inside a drive:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job update job2 --eject-media
|
||||||
|
|
||||||
|
.. Note:: For failed jobs, the tape remains in the drive.
|
||||||
|
|
||||||
|
For tape libraries, the ``export-media`` option moves all tapes from
|
||||||
|
the media set to an export slot, making sure that the following backup
|
||||||
|
cannot use the tapes. An operator can pick up those tapes and move them
|
||||||
|
to a vault.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job update job2 --export-media
|
||||||
|
|
||||||
|
.. Note:: The ``export-media`` option can be used to force the start
|
||||||
|
of a new media set, because tapes from the current set are no
|
||||||
|
longer online.
|
||||||
|
|
||||||
|
It is also possible to run backup jobs manually:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job run job2
|
||||||
|
|
||||||
|
To remove a job, please use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape backup-job remove job2
|
||||||
|
|
||||||
|
|
||||||
Administration
|
Administration
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Many sub-command of the ``proxmox-tape`` command line tools take a
|
Many sub-commands of the ``proxmox-tape`` command line tools take a
|
||||||
parameter called ``--drive``, which specifies the tape drive you want
|
parameter called ``--drive``, which specifies the tape drive you want
|
||||||
to work on. For convenience, you can set that in an environment
|
to work on. For convenience, you can set this in an environment
|
||||||
variable::
|
variable:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# export PROXMOX_TAPE_DRIVE=mydrive
|
# export PROXMOX_TAPE_DRIVE=mydrive
|
||||||
|
|
||||||
You can then omit the ``--drive`` parameter from the command. If the
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
drive has an associated changer device, you may also omit the changer
|
drive has an associated changer device, you may also omit the changer
|
||||||
parameter from commands that needs a changer device, for example::
|
parameter from commands that needs a changer device, for example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer status
|
# proxmox-tape changer status
|
||||||
|
|
||||||
Should displays the changer status of the changer device associated with
|
should display the changer status of the changer device associated with
|
||||||
drive ``mydrive``.
|
drive ``mydrive``.
|
||||||
|
|
||||||
|
|
||||||
Label Tapes
|
Label Tapes
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
By default, tape cartidges all looks the same, so you need to put a
|
By default, tape cartridges all look the same, so you need to put a
|
||||||
label on them for unique identification. So first, put a sticky paper
|
label on them for unique identification. First, put a sticky paper
|
||||||
label with some human readable text on the cartridge.
|
label with some human readable text on the cartridge.
|
||||||
|
|
||||||
If you use a `Tape Library`_, you should use an 8 character string
|
If you use a `Tape Library`_, you should use an 8 character string
|
||||||
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
encoded as `Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
Specification`_. You can either bye such barcode labels from your
|
Specification`_. You can either buy such barcode labels from your
|
||||||
cartidge vendor, or print them yourself. You can use our `LTO Barcode
|
cartridge vendor, or print them yourself. You can use our `LTO Barcode
|
||||||
Generator`_ App for that.
|
Generator`_ app to print them.
|
||||||
|
|
||||||
Next, you need to write that same label text to the tape, so that the
|
Next, you need to write that same label text to the tape, so that the
|
||||||
software can uniquely identify the tape too.
|
software can uniquely identify the tape too.
|
||||||
|
|
||||||
For a standalone drive, manually insert the new tape cartidge into the
|
For a standalone drive, manually insert the new tape cartridge into the
|
||||||
drive and run::
|
drive and run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
||||||
|
|
||||||
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||||
|
|
||||||
.. Note:: For safety reasons, this command fails if the tape contain
|
.. Note:: For safety reasons, this command fails if the tape contains
|
||||||
any data. If you want to overwrite it anways, erase the tape first.
|
any data. If you want to overwrite it anyway, erase the tape first.
|
||||||
|
|
||||||
You can verify success by reading back the label::
|
You can verify success by reading back the label:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape read-label
|
# proxmox-tape read-label
|
||||||
┌─────────────────┬──────────────────────────────────────┐
|
┌─────────────────┬──────────────────────────────────────┐
|
||||||
@ -566,7 +697,9 @@ You can verify success by reading back the label::
|
|||||||
|
|
||||||
If you have a tape library, apply the sticky barcode label to the tape
|
If you have a tape library, apply the sticky barcode label to the tape
|
||||||
cartridges first. Then load those empty tapes into the library. You
|
cartridges first. Then load those empty tapes into the library. You
|
||||||
can then label all unlabeled tapes with a single command::
|
can then label all unlabeled tapes with a single command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape barcode-label [--pool <pool-name>]
|
# proxmox-tape barcode-label [--pool <pool-name>]
|
||||||
|
|
||||||
@ -574,7 +707,9 @@ can then label all unlabeled tapes with a single command::
|
|||||||
Run Tape Backups
|
Run Tape Backups
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To manually run a backup job use::
|
To manually run a backup job use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape backup <store> <pool> [OPTIONS]
|
# proxmox-tape backup <store> <pool> [OPTIONS]
|
||||||
|
|
||||||
@ -583,11 +718,11 @@ The following options are available:
|
|||||||
--eject-media Eject media upon job completion.
|
--eject-media Eject media upon job completion.
|
||||||
|
|
||||||
It is normally good practice to eject the tape after use. This unmounts the
|
It is normally good practice to eject the tape after use. This unmounts the
|
||||||
tape from the drive and prevents the tape from getting dirty with dust.
|
tape from the drive and prevents the tape from getting dusty.
|
||||||
|
|
||||||
--export-media-set Export media set upon job completion.
|
--export-media-set Export media set upon job completion.
|
||||||
|
|
||||||
After a sucessful backup job, this moves all tapes from the used
|
After a successful backup job, this moves all tapes from the used
|
||||||
media set into import-export slots. The operator can then pick up
|
media set into import-export slots. The operator can then pick up
|
||||||
those tapes and move them to a media vault.
|
those tapes and move them to a media vault.
|
||||||
|
|
||||||
@ -602,7 +737,9 @@ catalogs, you need to restore them first. Please note that you need
|
|||||||
the catalog to find your data, but restoring a complete media-set does
|
the catalog to find your data, but restoring a complete media-set does
|
||||||
not need media catalogs.
|
not need media catalogs.
|
||||||
|
|
||||||
The following command shows the media content (from catalog)::
|
The following command lists the media content (from catalog):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape media content
|
# proxmox-tape media content
|
||||||
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
||||||
@ -615,7 +752,9 @@ The following command shows the media content (from catalog)::
|
|||||||
|
|
||||||
|
|
||||||
A restore job reads the data from the media set and moves it back to
|
A restore job reads the data from the media set and moves it back to
|
||||||
data disk (datastore)::
|
data disk (datastore):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
// proxmox-tape restore <media-set-uuid> <datastore>
|
// proxmox-tape restore <media-set-uuid> <datastore>
|
||||||
|
|
||||||
@ -633,14 +772,18 @@ Restore Catalog
|
|||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Creating a new encryption key::
|
Creating a new encryption key:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape key create --hint "tape pw 2020"
|
# proxmox-tape key create --hint "tape pw 2020"
|
||||||
Tape Encryption Key Password: **********
|
Tape Encryption Key Password: **********
|
||||||
Verify Password: **********
|
Verify Password: **********
|
||||||
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
||||||
|
|
||||||
List existing encryption keys::
|
List existing encryption keys:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape key list
|
# proxmox-tape key list
|
||||||
┌───────────────────────────────────────────────────┬───────────────┐
|
┌───────────────────────────────────────────────────┬───────────────┐
|
||||||
@ -649,7 +792,9 @@ List existing encryption keys::
|
|||||||
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
||||||
└───────────────────────────────────────────────────┴───────────────┘
|
└───────────────────────────────────────────────────┴───────────────┘
|
||||||
|
|
||||||
To show encryption key details::
|
To show encryption key details:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
||||||
┌─────────────┬───────────────────────────────────────────────┐
|
┌─────────────┬───────────────────────────────────────────────┐
|
||||||
@ -668,37 +813,43 @@ To show encryption key details::
|
|||||||
|
|
||||||
The ``paperkey`` subcommand can be used to create a QR encoded
|
The ``paperkey`` subcommand can be used to create a QR encoded
|
||||||
version of a tape encryption key. The following command sends the output of the
|
version of a tape encryption key. The following command sends the output of the
|
||||||
``paperkey`` command to a text file, for easy printing::
|
``paperkey`` command to a text file, for easy printing:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
||||||
|
|
||||||
|
|
||||||
.. _restore_encryption_key:
|
.. _tape_restore_encryption_key:
|
||||||
|
|
||||||
Restoring Encryption Keys
|
Restoring Encryption Keys
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
You can restore the encryption key from the tape, using the password
|
You can restore the encryption key from the tape, using the password
|
||||||
used to generate the key. First, load the tape you want to restore
|
used to generate the key. First, load the tape you want to restore
|
||||||
into the drive. Then run::
|
into the drive. Then run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape key restore
|
# proxmox-tape key restore
|
||||||
Tepe Encryption Key Password: ***********
|
Tepe Encryption Key Password: ***********
|
||||||
|
|
||||||
If the password is correct, the key will get imported to the
|
If the password is correct, the key will get imported to the
|
||||||
database. Further restore jobs automatically use any availbale key.
|
database. Further restore jobs automatically use any available key.
|
||||||
|
|
||||||
|
|
||||||
Tape Cleaning
|
Tape Cleaning
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
LTO tape drives requires regular cleaning. This is done by loading a
|
LTO tape drives require regular cleaning. This is done by loading a
|
||||||
cleaning cartridge into the drive, which is a manual task for
|
cleaning cartridge into the drive, which is a manual task for
|
||||||
standalone drives.
|
standalone drives.
|
||||||
|
|
||||||
For tape libraries, cleaning cartridges are identified using special
|
For tape libraries, cleaning cartridges are identified using special
|
||||||
labels starting with letters "CLN". For example, our tape library has a
|
labels starting with letters "CLN". For example, our tape library has a
|
||||||
cleaning cartridge inside slot 3::
|
cleaning cartridge inside slot 3:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape changer status sl3
|
# proxmox-tape changer status sl3
|
||||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
@ -715,7 +866,9 @@ cleaning cartridge inside slot 3::
|
|||||||
│ ... │ ... │ │ │
|
│ ... │ ... │ │ │
|
||||||
└───────────────┴──────────┴────────────┴─────────────┘
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
To initiate a cleaning operation simply run::
|
To initiate a cleaning operation simply run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape clean
|
# proxmox-tape clean
|
||||||
|
|
||||||
@ -730,3 +883,78 @@ This command does the following:
|
|||||||
- run drive cleaning operation
|
- run drive cleaning operation
|
||||||
|
|
||||||
- unload the cleaning tape (to slot 3)
|
- unload the cleaning tape (to slot 3)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Configuration Files
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
``media-pool.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape.cfg``
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape-job.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Command Syntax
|
||||||
|
--------------
|
||||||
|
|
||||||
|
``proxmox-tape``
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. include:: proxmox-tape/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``pmt``
|
||||||
|
-------
|
||||||
|
|
||||||
|
.. include:: pmt/options.rst
|
||||||
|
|
||||||
|
....
|
||||||
|
|
||||||
|
.. include:: pmt/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``pmtx``
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. include:: pmtx/synopsis.rst
|
||||||
|
166
docs/technical-overview.rst
Normal file
166
docs/technical-overview.rst
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
.. _tech_design_overview:
|
||||||
|
|
||||||
|
Technical Overview
|
||||||
|
==================
|
||||||
|
|
||||||
|
Datastores
|
||||||
|
----------
|
||||||
|
|
||||||
|
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||||
|
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||||
|
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||||
|
stored in the following directory structure:
|
||||||
|
|
||||||
|
<datastore-root>/<type>/<id>/<time>/
|
||||||
|
|
||||||
|
The deduplication of datastores is based on reusing chunks, which are
|
||||||
|
referenced by the indexes in a backup snapshot. This means that multiple
|
||||||
|
indexes can reference the same chunks, reducing the amount of space needed to
|
||||||
|
contain the data (even across backup snapshots).
|
||||||
|
|
||||||
|
Chunks
|
||||||
|
------
|
||||||
|
|
||||||
|
A chunk is some (possibly encrypted) data with a CRC-32 checksum at the end and
|
||||||
|
a type marker at the beginning. It is identified by the SHA-256 checksum of its
|
||||||
|
content.
|
||||||
|
|
||||||
|
To generate such chunks, backup data is split either into fixed-size or
|
||||||
|
dynamically sized chunks. The same content will be hashed to the same checksum.
|
||||||
|
|
||||||
|
The chunks of a datastore are found in
|
||||||
|
|
||||||
|
<datastore-root>/.chunks/
|
||||||
|
|
||||||
|
This chunk directory is further subdivided by the first four byte of the chunks
|
||||||
|
checksum, so the chunk with the checksum
|
||||||
|
|
||||||
|
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||||
|
|
||||||
|
lives in
|
||||||
|
|
||||||
|
<datastore-root>/.chunks/a342/
|
||||||
|
|
||||||
|
This is done to reduce the number of files per directory, as having many files
|
||||||
|
per directory can be bad for file system performance.
|
||||||
|
|
||||||
|
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
||||||
|
is created.
|
||||||
|
|
||||||
|
Fixed-sized Chunks
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
For block based backups (like VMs), fixed-sized chunks are used. The content
|
||||||
|
(disk image), is split into chunks of the same length (typically 4 MiB).
|
||||||
|
|
||||||
|
This works very well for VM images, since the file system on the guest most
|
||||||
|
often tries to allocate files in contiguous pieces, so new files get new
|
||||||
|
blocks, and changing existing files changes only their own blocks.
|
||||||
|
|
||||||
|
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
||||||
|
can track the changed blocks of an image. Since these bitmap are also a
|
||||||
|
representation of the image split into chunks, there is a direct relation
|
||||||
|
between dirty blocks of the image and chunks which need to get uploaded, so
|
||||||
|
only modified chunks of the disk have to be uploaded for a backup.
|
||||||
|
|
||||||
|
Since the image is always split into chunks of the same size, unchanged blocks
|
||||||
|
will result in identical checksums for those chunks, so such chunks do not need
|
||||||
|
to be backed up again. This way storage snapshots are not needed to find the
|
||||||
|
changed blocks.
|
||||||
|
|
||||||
|
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
||||||
|
does not rely on storage snapshots either.
|
||||||
|
|
||||||
|
Dynamically sized Chunks
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If one does not want to backup block-based systems but rather file-based
|
||||||
|
systems, using fixed-sized chunks is not a good idea, since every time a file
|
||||||
|
would change in size, the remaining data gets shifted around and this would
|
||||||
|
result in many chunks changing, reducing the amount of deduplication.
|
||||||
|
|
||||||
|
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
||||||
|
instead. Instead of splitting an image into fixed sizes, it first generates a
|
||||||
|
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
||||||
|
over this on-the-fly generated archive to calculate chunk boundaries.
|
||||||
|
|
||||||
|
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
||||||
|
by continuously calculating a checksum while iterating over the data, and on
|
||||||
|
certain conditions it triggers a hash boundary.
|
||||||
|
|
||||||
|
Assuming that most files of the system that is to be backed up have not
|
||||||
|
changed, eventually the algorithm triggers the boundary on the same data as a
|
||||||
|
previous backup, resulting in chunks that can be reused.
|
||||||
|
|
||||||
|
Encrypted Chunks
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Encrypted chunks are a special case. Both fixed- and dynamically sized chunks
|
||||||
|
can be encrypted, and they are handled in a slightly different manner than
|
||||||
|
normal chunks.
|
||||||
|
|
||||||
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
|
chunk content, but with the plaintext content concatenated with the encryption
|
||||||
|
key. This way, two chunks of the same data encrypted with different keys
|
||||||
|
generate two different checksums and no collisions occur for multiple
|
||||||
|
encryption keys.
|
||||||
|
|
||||||
|
This is done to speed up the client part of the backup, since it only needs to
|
||||||
|
encrypt chunks that are actually getting uploaded. Chunks that exist already in
|
||||||
|
the previous backup, do not need to be encrypted and uploaded.
|
||||||
|
|
||||||
|
Caveats and Limitations
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
Notes on hash collisions
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
||||||
|
more) inputs generate the same checksum. For SHA-256, this chance is
|
||||||
|
negligible. To calculate such a collision, one can use the ideas of the
|
||||||
|
'birthday problem' from probability theory. For big numbers, this is actually
|
||||||
|
infeasible to calculate with regular computers, but there is a good
|
||||||
|
approximation:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
|
||||||
|
p(n, d) = 1 - e^{-n^2/(2d)}
|
||||||
|
|
||||||
|
Where `n` is the number of tries, and `d` is the number of possibilities.
|
||||||
|
For a concrete example lets assume a large datastore of 1 PiB, and an average
|
||||||
|
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
||||||
|
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
||||||
|
will see that the probability of a collision in that scenario is:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
|
||||||
|
3.1115 * 10^{-61}
|
||||||
|
|
||||||
|
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||||
|
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||||
|
collission is about the same as winning 13 such lotto games *in a row*.
|
||||||
|
|
||||||
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
|
accident in a normal datastore.
|
||||||
|
|
||||||
|
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
||||||
|
an upper limit for how big the chunk are, this is not a problem, since a
|
||||||
|
potential attacker cannot arbitrarily add content to the data beyond that
|
||||||
|
limit.
|
||||||
|
|
||||||
|
File-based Backup
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Since dynamically sized chunks (for file-based backups) are created on a custom
|
||||||
|
archive format (pxar) and not over the files directly, there is no relation
|
||||||
|
between files and the chunks. This means that the Proxmox Backup client has to
|
||||||
|
read all files again for every backup, otherwise it would not be possible to
|
||||||
|
generate a consistent independent pxar archive where the original chunks can be
|
||||||
|
reused. Note that there will be still only new or change chunks be uploaded.
|
||||||
|
|
||||||
|
Verification of encrypted chunks
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||||
|
available, making it impossible for the server (without the encryption key), to
|
||||||
|
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _terms:
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
===========
|
===========
|
||||||
|
|
||||||
@ -99,6 +101,7 @@ Backup Group
|
|||||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||||
may contain one or more backup snapshots.
|
may contain one or more backup snapshots.
|
||||||
|
|
||||||
|
.. _term_backup_snapshot:
|
||||||
|
|
||||||
Backup Snapshot
|
Backup Snapshot
|
||||||
---------------
|
---------------
|
||||||
|
@ -286,26 +286,26 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
|||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
|
|
||||||
.. _user_tfa:
|
.. _user_tfa:
|
||||||
|
|
||||||
Two-factor authentication
|
Two-factor authentication
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
Simple authentication requires only secret piece of evidence (one factor) that
|
With simple authentication, only a password (single factor) is required to
|
||||||
a user can successfully claim a identiy (authenticate), for example, that you
|
successfully claim an identity (authenticate), for example, to be able to log in
|
||||||
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
|
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||||
If the password gets stolen, or leaked in another way, anybody can use it to
|
the password gets stolen or leaked, anybody can use it to log in - even if they
|
||||||
login - even if they should not be allowed to do so.
|
should not be allowed to do so.
|
||||||
|
|
||||||
With Two-factor authentication (TFA) a user is asked for an additional factor,
|
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||||
to proof his authenticity. The extra factor is different from a password
|
to verify their authenticity. Rather than relying on something only the user
|
||||||
(something only the user knows), it is something only the user has, for example
|
knows (a password), this extra factor requires something only the user has, for
|
||||||
a piece of hardware (security key) or an secret saved on the users smartphone.
|
example, a piece of hardware (security key) or a secret saved on the user's
|
||||||
|
smartphone. This prevents a remote user from gaining unauthorized access to an
|
||||||
This means that a remote user can never get hold on such a physical object. So,
|
account, as even if they have the password, they will not have access to the
|
||||||
even if that user would know your password they cannot successfully
|
physical object (second factor).
|
||||||
authenticate as you, as your second factor is missing.
|
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
||||||
:align: right
|
:align: right
|
||||||
@ -314,30 +314,33 @@ authenticate as you, as your second factor is missing.
|
|||||||
Available Second Factors
|
Available Second Factors
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You can setup more than one second factor to avoid that losing your smartphone
|
You can set up multiple second factors, in order to avoid a situation in which
|
||||||
or security key permanently locks you out from your account.
|
losing your smartphone or security key locks you out of your account
|
||||||
|
permanently.
|
||||||
|
|
||||||
There are three different two-factor authentication methods supported:
|
Proxmox Backup Server supports three different two-factor authentication
|
||||||
|
methods:
|
||||||
|
|
||||||
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
||||||
A short code derived from a shared secret and the current time, it switches
|
A short code derived from a shared secret and the current time, it changes
|
||||||
every 30 seconds.
|
every 30 seconds.
|
||||||
|
|
||||||
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
||||||
A general standard for authentication. It is implemented by various security
|
A general standard for authentication. It is implemented by various security
|
||||||
devices like hardware keys or trusted platform modules (TPM) from a computer
|
devices, like hardware keys or trusted platform modules (TPM) from a computer
|
||||||
or smart phone.
|
or smart phone.
|
||||||
|
|
||||||
* Single use Recovery Keys. A list of keys which should either be printed out
|
* Single use Recovery Keys. A list of keys which should either be printed out
|
||||||
and locked in a secure fault or saved digitally in a electronic vault.
|
and locked in a secure place or saved digitally in an electronic vault.
|
||||||
Each key can be used only once, they are perfect for ensuring you are not
|
Each key can be used only once. These are perfect for ensuring that you are
|
||||||
locked out even if all of your other second factors are lost or corrupt.
|
not locked out, even if all of your other second factors are lost or corrupt.
|
||||||
|
|
||||||
|
|
||||||
Setup
|
Setup
|
||||||
~~~~~
|
~~~~~
|
||||||
|
|
||||||
.. _user_tfa_setup_totp:
|
.. _user_tfa_setup_totp:
|
||||||
|
|
||||||
TOTP
|
TOTP
|
||||||
^^^^
|
^^^^
|
||||||
|
|
||||||
@ -345,15 +348,16 @@ TOTP
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
There is not server setup required, simply install a TOTP app on your
|
There is no server setup required. Simply install a TOTP app on your
|
||||||
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
||||||
Proxmox Backup Server web-interface to add a TOTP factor.
|
Proxmox Backup Server web-interface to add a TOTP factor.
|
||||||
|
|
||||||
.. _user_tfa_setup_webauthn:
|
.. _user_tfa_setup_webauthn:
|
||||||
|
|
||||||
WebAuthn
|
WebAuthn
|
||||||
^^^^^^^^
|
^^^^^^^^
|
||||||
|
|
||||||
For WebAuthn to work you need to have two things:
|
For WebAuthn to work, you need to have two things:
|
||||||
|
|
||||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||||
@ -361,10 +365,11 @@ For WebAuthn to work you need to have two things:
|
|||||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||||
|
|
||||||
Once you fullfilled both of those requirements, you can add a WebAuthn
|
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||||
configuration in the *Access Control* panel.
|
configuration in the *Access Control* panel.
|
||||||
|
|
||||||
.. _user_tfa_setup_recovery_keys:
|
.. _user_tfa_setup_recovery_keys:
|
||||||
|
|
||||||
Recovery Keys
|
Recovery Keys
|
||||||
^^^^^^^^^^^^^
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
@ -372,7 +377,7 @@ Recovery Keys
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
Recovery key codes do not need any preparation, you can simply create a set of
|
Recovery key codes do not need any preparation; you can simply create a set of
|
||||||
recovery keys in the *Access Control* panel.
|
recovery keys in the *Access Control* panel.
|
||||||
|
|
||||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||||
@ -381,7 +386,7 @@ recovery keys in the *Access Control* panel.
|
|||||||
TFA and Automated Access
|
TFA and Automated Access
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Two-factor authentication is only implemented for the web-interface, you should
|
Two-factor authentication is only implemented for the web-interface. You should
|
||||||
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
||||||
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
|
non-interactive ones (for example, adding a Proxmox Backup Server to Proxmox VE
|
||||||
as a storage).
|
as a storage).
|
||||||
|
@ -15,19 +15,19 @@ fn extract_acl_node_data(
|
|||||||
path: &str,
|
path: &str,
|
||||||
list: &mut Vec<AclListItem>,
|
list: &mut Vec<AclListItem>,
|
||||||
exact: bool,
|
exact: bool,
|
||||||
token_user: &Option<Authid>,
|
auth_id_filter: &Option<Authid>,
|
||||||
) {
|
) {
|
||||||
// tokens can't have tokens, so we can early return
|
// tokens can't have tokens, so we can early return
|
||||||
if let Some(token_user) = token_user {
|
if let Some(auth_id_filter) = auth_id_filter {
|
||||||
if token_user.is_token() {
|
if auth_id_filter.is_token() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (user, roles) in &node.users {
|
for (user, roles) in &node.users {
|
||||||
if let Some(token_user) = token_user {
|
if let Some(auth_id_filter) = auth_id_filter {
|
||||||
if !user.is_token()
|
if !user.is_token()
|
||||||
|| user.user() != token_user.user() {
|
|| user.user() != auth_id_filter.user() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -43,7 +43,7 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (group, roles) in &node.groups {
|
for (group, roles) in &node.groups {
|
||||||
if token_user.is_some() {
|
if auth_id_filter.is_some() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
for (comp, child) in &node.children {
|
for (comp, child) in &node.children {
|
||||||
let new_path = format!("{}/{}", path, comp);
|
let new_path = format!("{}/{}", path, comp);
|
||||||
extract_acl_node_data(child, &new_path, list, exact, token_user);
|
extract_acl_node_data(child, &new_path, list, exact, auth_id_filter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
|||||||
|
|
||||||
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
match {
|
match {
|
||||||
// scope to prevent the temprary iter from borrowing across the whole match
|
// scope to prevent the temporary iter from borrowing across the whole match
|
||||||
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
||||||
entry.map(|(ty, index, _)| (ty, index))
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
} {
|
} {
|
||||||
@ -240,7 +240,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
|||||||
]),
|
]),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Get a single TFA entry.
|
/// Delete a single TFA entry.
|
||||||
fn delete_tfa(
|
fn delete_tfa(
|
||||||
userid: Userid,
|
userid: Userid,
|
||||||
id: String,
|
id: String,
|
||||||
@ -259,7 +259,7 @@ fn delete_tfa(
|
|||||||
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
match {
|
match {
|
||||||
// scope to prevent the temprary iter from borrowing across the whole match
|
// scope to prevent the temporary iter from borrowing across the whole match
|
||||||
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
||||||
entry.map(|(ty, index, _)| (ty, index))
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
} {
|
} {
|
||||||
|
@ -3,8 +3,6 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::pin::Pin;
|
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -22,19 +20,20 @@ use proxmox::api::schema::*;
|
|||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
use pxar::accessor::aio::Accessor;
|
||||||
use pxar::EntryKind;
|
use pxar::EntryKind;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::api2::node::rrd::create_value_from_rrd;
|
use crate::api2::node::rrd::create_value_from_rrd;
|
||||||
|
use crate::api2::helpers;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::pxar::create_zip;
|
||||||
|
|
||||||
use crate::server::{jobstate::Job, WorkerTask};
|
use crate::server::{jobstate::Job, WorkerTask};
|
||||||
use crate::tools::{
|
use crate::tools::{
|
||||||
self,
|
self,
|
||||||
zip::{ZipEncoder, ZipEntry},
|
|
||||||
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1294,7 +1293,7 @@ pub fn catalog(
|
|||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
filepath: String,
|
filepath: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
@ -1326,112 +1325,14 @@ pub fn catalog(
|
|||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
let mut catalog_reader = CatalogReader::new(reader);
|
let mut catalog_reader = CatalogReader::new(reader);
|
||||||
let mut current = catalog_reader.root()?;
|
|
||||||
let mut components = vec![];
|
|
||||||
|
|
||||||
|
let path = if filepath != "root" && filepath != "/" {
|
||||||
if filepath != "root" {
|
base64::decode(filepath)?
|
||||||
components = base64::decode(filepath)?;
|
|
||||||
if !components.is_empty() && components[0] == b'/' {
|
|
||||||
components.remove(0);
|
|
||||||
}
|
|
||||||
for component in components.split(|c| *c == b'/') {
|
|
||||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
|
||||||
current = entry;
|
|
||||||
} else {
|
} else {
|
||||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
vec![b'/']
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut res = Vec::new();
|
|
||||||
|
|
||||||
for direntry in catalog_reader.read_dir(¤t)? {
|
|
||||||
let mut components = components.clone();
|
|
||||||
components.push(b'/');
|
|
||||||
components.extend(&direntry.name);
|
|
||||||
let path = base64::encode(components);
|
|
||||||
let text = String::from_utf8_lossy(&direntry.name);
|
|
||||||
let mut entry = json!({
|
|
||||||
"filepath": path,
|
|
||||||
"text": text,
|
|
||||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
|
||||||
"leaf": true,
|
|
||||||
});
|
|
||||||
match direntry.attr {
|
|
||||||
DirEntryAttribute::Directory { start: _ } => {
|
|
||||||
entry["leaf"] = false.into();
|
|
||||||
},
|
|
||||||
DirEntryAttribute::File { size, mtime } => {
|
|
||||||
entry["size"] = size.into();
|
|
||||||
entry["mtime"] = mtime.into();
|
|
||||||
},
|
|
||||||
_ => {},
|
|
||||||
}
|
|
||||||
res.push(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recurse_files<'a, T, W>(
|
|
||||||
zip: &'a mut ZipEncoder<W>,
|
|
||||||
decoder: &'a mut Accessor<T>,
|
|
||||||
prefix: &'a Path,
|
|
||||||
file: FileEntry<T>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
|
||||||
where
|
|
||||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
|
||||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
|
||||||
{
|
|
||||||
Box::pin(async move {
|
|
||||||
let metadata = file.entry().metadata();
|
|
||||||
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
|
||||||
|
|
||||||
match file.kind() {
|
|
||||||
EntryKind::File { .. } => {
|
|
||||||
let entry = ZipEntry::new(
|
|
||||||
path,
|
|
||||||
metadata.stat.mtime.secs,
|
|
||||||
metadata.stat.mode as u16,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
zip.add_entry(entry, Some(file.contents().await?))
|
|
||||||
.await
|
|
||||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
|
||||||
}
|
|
||||||
EntryKind::Hardlink(_) => {
|
|
||||||
let realfile = decoder.follow_hardlink(&file).await?;
|
|
||||||
let entry = ZipEntry::new(
|
|
||||||
path,
|
|
||||||
metadata.stat.mtime.secs,
|
|
||||||
metadata.stat.mode as u16,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
zip.add_entry(entry, Some(realfile.contents().await?))
|
|
||||||
.await
|
|
||||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
|
||||||
}
|
|
||||||
EntryKind::Directory => {
|
|
||||||
let dir = file.enter_directory().await?;
|
|
||||||
let mut readdir = dir.read_dir();
|
|
||||||
let entry = ZipEntry::new(
|
|
||||||
path,
|
|
||||||
metadata.stat.mtime.secs,
|
|
||||||
metadata.stat.mode as u16,
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
|
||||||
while let Some(entry) = readdir.next().await {
|
|
||||||
let entry = entry?.decode_entry().await?;
|
|
||||||
recurse_files(zip, decoder, prefix, entry).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {} // ignore all else
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(())
|
helpers::list_dir_content(&mut catalog_reader, &path)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
@ -1509,9 +1410,10 @@ pub fn pxar_file_download(
|
|||||||
|
|
||||||
let decoder = Accessor::new(reader, archive_size).await?;
|
let decoder = Accessor::new(reader, archive_size).await?;
|
||||||
let root = decoder.open_root().await?;
|
let root = decoder.open_root().await?;
|
||||||
|
let path = OsStr::from_bytes(file_path).to_os_string();
|
||||||
let file = root
|
let file = root
|
||||||
.lookup(OsStr::from_bytes(file_path)).await?
|
.lookup(&path).await?
|
||||||
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
|
||||||
|
|
||||||
let body = match file.kind() {
|
let body = match file.kind() {
|
||||||
EntryKind::File { .. } => Body::wrap_stream(
|
EntryKind::File { .. } => Body::wrap_stream(
|
||||||
@ -1525,37 +1427,19 @@ pub fn pxar_file_download(
|
|||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"error during streaming of hardlink '{:?}' - {}",
|
"error during streaming of hardlink '{:?}' - {}",
|
||||||
filepath, err
|
path, err
|
||||||
);
|
);
|
||||||
err
|
err
|
||||||
}),
|
}),
|
||||||
),
|
),
|
||||||
EntryKind::Directory => {
|
EntryKind::Directory => {
|
||||||
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
||||||
let mut prefix = PathBuf::new();
|
|
||||||
let mut components = file.entry().path().components();
|
|
||||||
components.next_back(); // discar last
|
|
||||||
for comp in components {
|
|
||||||
prefix.push(comp);
|
|
||||||
}
|
|
||||||
|
|
||||||
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
||||||
|
crate::server::spawn_internal_task(
|
||||||
crate::server::spawn_internal_task(async move {
|
create_zip(channelwriter, decoder, path.clone(), false)
|
||||||
let mut zipencoder = ZipEncoder::new(channelwriter);
|
);
|
||||||
let mut decoder = decoder;
|
|
||||||
recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
|
|
||||||
.await
|
|
||||||
.map_err(|err| eprintln!("error during creating of zip: {}", err))?;
|
|
||||||
|
|
||||||
zipencoder
|
|
||||||
.finish()
|
|
||||||
.await
|
|
||||||
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
|
||||||
});
|
|
||||||
|
|
||||||
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
|
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
|
||||||
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
eprintln!("error during streaming of zip '{:?}' - {}", path, err);
|
||||||
err
|
err
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Datastore Syncronization Job Management
|
//! Datastore Synchronization Job Management
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -7,16 +7,35 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
|||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::{list_subdirs_api_method, sortable};
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::{
|
||||||
use crate::api2::pull::do_sync_job;
|
api2::{
|
||||||
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
|
types::{
|
||||||
|
DATASTORE_SCHEMA,
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
JOB_ID_SCHEMA,
|
||||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
Authid,
|
||||||
use crate::server::UPID;
|
},
|
||||||
use crate::server::jobstate::{Job, JobState};
|
pull::do_sync_job,
|
||||||
use crate::tools::systemd::time::{
|
config::sync::{
|
||||||
parse_calendar_event, compute_next_event};
|
check_sync_job_modify_access,
|
||||||
|
check_sync_job_read_access,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
config::{
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
sync::{
|
||||||
|
self,
|
||||||
|
SyncJobStatus,
|
||||||
|
SyncJobConfig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
server::{
|
||||||
|
jobstate::{
|
||||||
|
Job,
|
||||||
|
JobState,
|
||||||
|
compute_schedule_status,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -30,7 +49,7 @@ use crate::tools::systemd::time::{
|
|||||||
returns: {
|
returns: {
|
||||||
description: "List configured jobs and their status.",
|
description: "List configured jobs and their status.",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: { type: sync::SyncJobStatus },
|
items: { type: SyncJobStatus },
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
||||||
@ -49,48 +68,29 @@ pub fn list_sync_jobs(
|
|||||||
|
|
||||||
let (config, digest) = sync::config()?;
|
let (config, digest) = sync::config()?;
|
||||||
|
|
||||||
let mut list: Vec<SyncJobStatus> = config
|
let job_config_iter = config
|
||||||
.convert_to_typed_array("sync")?
|
.convert_to_typed_array("sync")?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|job: &SyncJobStatus| {
|
.filter(|job: &SyncJobConfig| {
|
||||||
if let Some(store) = &store {
|
if let Some(store) = &store {
|
||||||
&job.store == store
|
&job.store == store
|
||||||
} else {
|
} else {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.filter(|job: &SyncJobStatus| {
|
.filter(|job: &SyncJobConfig| {
|
||||||
let as_config: SyncJobConfig = job.into();
|
check_sync_job_read_access(&user_info, &auth_id, &job)
|
||||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
});
|
||||||
}).collect();
|
|
||||||
|
|
||||||
for job in &mut list {
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for job in job_config_iter {
|
||||||
let last_state = JobState::load("syncjob", &job.id)
|
let last_state = JobState::load("syncjob", &job.id)
|
||||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
let (upid, endtime, state, starttime) = match last_state {
|
|
||||||
JobState::Created { time } => (None, None, None, time),
|
|
||||||
JobState::Started { upid } => {
|
|
||||||
let parsed_upid: UPID = upid.parse()?;
|
|
||||||
(Some(upid), None, None, parsed_upid.starttime)
|
|
||||||
},
|
|
||||||
JobState::Finished { upid, state } => {
|
|
||||||
let parsed_upid: UPID = upid.parse()?;
|
|
||||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
job.last_run_upid = upid;
|
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||||
job.last_run_state = state;
|
|
||||||
job.last_run_endtime = endtime;
|
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
list.push(SyncJobStatus { config: job, status });
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
|
||||||
let schedule = job.schedule.as_ref()?;
|
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
|
||||||
// ignore errors
|
|
||||||
compute_next_event(&event, last, false).unwrap_or(None)
|
|
||||||
})();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
@ -1,24 +1,40 @@
|
|||||||
//! Datastore Verify Job Management
|
//! Datastore Verify Job Management
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::{list_subdirs_api_method, sortable};
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::{
|
||||||
use crate::server::do_verification_job;
|
api2::types::{
|
||||||
use crate::server::jobstate::{Job, JobState};
|
DATASTORE_SCHEMA,
|
||||||
use crate::config::acl::{
|
JOB_ID_SCHEMA,
|
||||||
|
Authid,
|
||||||
|
},
|
||||||
|
server::{
|
||||||
|
do_verification_job,
|
||||||
|
jobstate::{
|
||||||
|
Job,
|
||||||
|
JobState,
|
||||||
|
compute_schedule_status,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
config::{
|
||||||
|
acl::{
|
||||||
PRIV_DATASTORE_AUDIT,
|
PRIV_DATASTORE_AUDIT,
|
||||||
PRIV_DATASTORE_VERIFY,
|
PRIV_DATASTORE_VERIFY,
|
||||||
|
},
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
verify::{
|
||||||
|
self,
|
||||||
|
VerificationJobConfig,
|
||||||
|
VerificationJobStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
|
||||||
use crate::config::verify;
|
|
||||||
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
|
||||||
use serde_json::Value;
|
|
||||||
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
|
||||||
use crate::server::UPID;
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -52,10 +68,10 @@ pub fn list_verification_jobs(
|
|||||||
|
|
||||||
let (config, digest) = verify::config()?;
|
let (config, digest) = verify::config()?;
|
||||||
|
|
||||||
let mut list: Vec<VerificationJobStatus> = config
|
let job_config_iter = config
|
||||||
.convert_to_typed_array("verification")?
|
.convert_to_typed_array("verification")?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|job: &VerificationJobStatus| {
|
.filter(|job: &VerificationJobConfig| {
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||||
if privs & required_privs == 0 {
|
if privs & required_privs == 0 {
|
||||||
return false;
|
return false;
|
||||||
@ -66,36 +82,17 @@ pub fn list_verification_jobs(
|
|||||||
} else {
|
} else {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}).collect();
|
});
|
||||||
|
|
||||||
for job in &mut list {
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for job in job_config_iter {
|
||||||
let last_state = JobState::load("verificationjob", &job.id)
|
let last_state = JobState::load("verificationjob", &job.id)
|
||||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
|
|
||||||
let (upid, endtime, state, starttime) = match last_state {
|
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||||
JobState::Created { time } => (None, None, None, time),
|
|
||||||
JobState::Started { upid } => {
|
|
||||||
let parsed_upid: UPID = upid.parse()?;
|
|
||||||
(Some(upid), None, None, parsed_upid.starttime)
|
|
||||||
},
|
|
||||||
JobState::Finished { upid, state } => {
|
|
||||||
let parsed_upid: UPID = upid.parse()?;
|
|
||||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
job.last_run_upid = upid;
|
list.push(VerificationJobStatus { config: job, status });
|
||||||
job.last_run_state = state;
|
|
||||||
job.last_run_endtime = endtime;
|
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
|
||||||
let schedule = job.schedule.as_ref()?;
|
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
|
||||||
// ignore errors
|
|
||||||
compute_next_event(&event, last, false).unwrap_or(None)
|
|
||||||
})();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
@ -268,7 +268,7 @@ async move {
|
|||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||||
(
|
(
|
||||||
"blob", &Router::new()
|
"blob", &Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_BLOB)
|
.upload(&API_METHOD_UPLOAD_BLOB)
|
||||||
|
@ -12,6 +12,7 @@ pub mod drive;
|
|||||||
pub mod changer;
|
pub mod changer;
|
||||||
pub mod media_pool;
|
pub mod media_pool;
|
||||||
pub mod tape_encryption_keys;
|
pub mod tape_encryption_keys;
|
||||||
|
pub mod tape_backup_job;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("access", &access::ROUTER),
|
("access", &access::ROUTER),
|
||||||
@ -21,6 +22,7 @@ const SUBDIRS: SubdirMap = &[
|
|||||||
("media-pool", &media_pool::ROUTER),
|
("media-pool", &media_pool::ROUTER),
|
||||||
("remote", &remote::ROUTER),
|
("remote", &remote::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
|
("tape-backup-job", &tape_backup_job::ROUTER),
|
||||||
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
||||||
("verify", &verify::ROUTER),
|
("verify", &verify::ROUTER),
|
||||||
];
|
];
|
||||||
|
@ -5,6 +5,7 @@ use anyhow::Error;
|
|||||||
|
|
||||||
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
||||||
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
||||||
|
use proxmox::api::schema::Updatable;
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
||||||
@ -73,9 +74,9 @@ pub fn update_webauthn_config(
|
|||||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
||||||
}
|
}
|
||||||
webauthn.apply_to(wa);
|
wa.update_from::<&str>(webauthn, &[])?;
|
||||||
} else {
|
} else {
|
||||||
tfa.webauthn = Some(webauthn.build()?);
|
tfa.webauthn = Some(WebauthnConfig::try_build_from(webauthn)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
tfa::write(&tfa)?;
|
tfa::write(&tfa)?;
|
||||||
|
@ -6,15 +6,24 @@ use proxmox::api::{
|
|||||||
api,
|
api,
|
||||||
Router,
|
Router,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
|
Permission,
|
||||||
schema::parse_property_string,
|
schema::parse_property_string,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config,
|
config::{
|
||||||
|
self,
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_MODIFY,
|
||||||
|
},
|
||||||
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
|
Authid,
|
||||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
LINUX_DRIVE_PATH_SCHEMA,
|
SCSI_CHANGER_PATH_SCHEMA,
|
||||||
SLOT_ARRAY_SCHEMA,
|
SLOT_ARRAY_SCHEMA,
|
||||||
EXPORT_SLOT_LIST_SCHEMA,
|
EXPORT_SLOT_LIST_SCHEMA,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
@ -34,7 +43,7 @@ use crate::{
|
|||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
"export-slots": {
|
"export-slots": {
|
||||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
@ -42,6 +51,9 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Create a new changer device
|
/// Create a new changer device
|
||||||
pub fn create_changer(
|
pub fn create_changer(
|
||||||
@ -94,7 +106,9 @@ pub fn create_changer(
|
|||||||
returns: {
|
returns: {
|
||||||
type: ScsiTapeChanger,
|
type: ScsiTapeChanger,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Get tape changer configuration
|
/// Get tape changer configuration
|
||||||
pub fn get_config(
|
pub fn get_config(
|
||||||
@ -123,17 +137,31 @@ pub fn get_config(
|
|||||||
type: ScsiTapeChanger,
|
type: ScsiTapeChanger,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List changers
|
/// List changers
|
||||||
pub fn list_changers(
|
pub fn list_changers(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
let list = list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|changer| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &changer.name]);
|
||||||
|
privs & PRIV_TAPE_AUDIT != 0
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
@ -156,7 +184,7 @@ pub enum DeletableProperty {
|
|||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"export-slots": {
|
"export-slots": {
|
||||||
@ -177,6 +205,9 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Update a tape changer configuration
|
/// Update a tape changer configuration
|
||||||
pub fn update_changer(
|
pub fn update_changer(
|
||||||
@ -251,6 +282,9 @@ pub fn update_changer(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Delete a tape changer configuration
|
/// Delete a tape changer configuration
|
||||||
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
@ -2,11 +2,19 @@ use anyhow::{bail, Error};
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config,
|
config::{
|
||||||
|
self,
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_MODIFY,
|
||||||
|
},
|
||||||
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
|
Authid,
|
||||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
@ -41,6 +49,9 @@ use crate::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Create a new drive
|
/// Create a new drive
|
||||||
pub fn create_drive(param: Value) -> Result<(), Error> {
|
pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||||
@ -84,6 +95,9 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
returns: {
|
returns: {
|
||||||
type: LinuxTapeDrive,
|
type: LinuxTapeDrive,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Get drive configuration
|
/// Get drive configuration
|
||||||
pub fn get_config(
|
pub fn get_config(
|
||||||
@ -112,17 +126,31 @@ pub fn get_config(
|
|||||||
type: LinuxTapeDrive,
|
type: LinuxTapeDrive,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured tape drives filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List drives
|
/// List drives
|
||||||
pub fn list_drives(
|
pub fn list_drives(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
|
||||||
|
let drive_list = drive_list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|drive| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &drive.name]);
|
||||||
|
privs & PRIV_TAPE_AUDIT != 0
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
Ok(drive_list)
|
Ok(drive_list)
|
||||||
@ -173,6 +201,9 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Update a drive configuration
|
/// Update a drive configuration
|
||||||
pub fn update_drive(
|
pub fn update_drive(
|
||||||
@ -246,6 +277,9 @@ pub fn update_drive(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Delete a drive configuration
|
/// Delete a drive configuration
|
||||||
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
@ -6,75 +6,61 @@ use proxmox::{
|
|||||||
api,
|
api,
|
||||||
Router,
|
Router,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
|
Permission,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api2::types::{
|
api2::types::{
|
||||||
|
Authid,
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
MEDIA_RETENTION_POLICY_SCHEMA,
|
MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
MediaPoolConfig,
|
MediaPoolConfig,
|
||||||
},
|
},
|
||||||
config,
|
config::{
|
||||||
|
self,
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_MODIFY,
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
protected: true,
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
name: {
|
config: {
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
type: MediaPoolConfig,
|
||||||
},
|
flatten: true,
|
||||||
allocation: {
|
|
||||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
retention: {
|
|
||||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
template: {
|
|
||||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
encrypt: {
|
|
||||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Create a new media pool
|
/// Create a new media pool
|
||||||
pub fn create_pool(
|
pub fn create_pool(
|
||||||
name: String,
|
config: MediaPoolConfig,
|
||||||
allocation: Option<String>,
|
|
||||||
retention: Option<String>,
|
|
||||||
template: Option<String>,
|
|
||||||
encrypt: Option<String>,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = config::media_pool::lock()?;
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
let (mut config, _digest) = config::media_pool::config()?;
|
let (mut section_config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
if config.sections.get(&name).is_some() {
|
if section_config.sections.get(&config.name).is_some() {
|
||||||
bail!("Media pool '{}' already exists", name);
|
bail!("Media pool '{}' already exists", config.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
let item = MediaPoolConfig {
|
section_config.set_data(&config.name, "pool", &config)?;
|
||||||
name: name.clone(),
|
|
||||||
allocation,
|
|
||||||
retention,
|
|
||||||
template,
|
|
||||||
encrypt,
|
|
||||||
};
|
|
||||||
|
|
||||||
config.set_data(&name, "pool", &item)?;
|
config::media_pool::save_config(§ion_config)?;
|
||||||
|
|
||||||
config::media_pool::save_config(&config)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -87,15 +73,29 @@ pub fn create_pool(
|
|||||||
type: MediaPoolConfig,
|
type: MediaPoolConfig,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured media pools filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List media pools
|
/// List media pools
|
||||||
pub fn list_pools(
|
pub fn list_pools(
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<MediaPoolConfig>, Error> {
|
) -> Result<Vec<MediaPoolConfig>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = config::media_pool::config()?;
|
let (config, digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
let list = config.convert_to_typed_array("pool")?;
|
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
|
||||||
|
|
||||||
|
let list = list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|pool| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
|
||||||
|
privs & PRIV_TAPE_AUDIT != 0
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
@ -113,6 +113,9 @@ pub fn list_pools(
|
|||||||
returns: {
|
returns: {
|
||||||
type: MediaPoolConfig,
|
type: MediaPoolConfig,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Get media pool configuration
|
/// Get media pool configuration
|
||||||
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
||||||
@ -137,6 +140,8 @@ pub enum DeletableProperty {
|
|||||||
template,
|
template,
|
||||||
/// Delete encryption fingerprint
|
/// Delete encryption fingerprint
|
||||||
encrypt,
|
encrypt,
|
||||||
|
/// Delete comment
|
||||||
|
comment,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -162,6 +167,10 @@ pub enum DeletableProperty {
|
|||||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
delete: {
|
delete: {
|
||||||
description: "List of properties to delete.",
|
description: "List of properties to delete.",
|
||||||
type: Array,
|
type: Array,
|
||||||
@ -172,6 +181,9 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Update media pool settings
|
/// Update media pool settings
|
||||||
pub fn update_pool(
|
pub fn update_pool(
|
||||||
@ -180,6 +192,7 @@ pub fn update_pool(
|
|||||||
retention: Option<String>,
|
retention: Option<String>,
|
||||||
template: Option<String>,
|
template: Option<String>,
|
||||||
encrypt: Option<String>,
|
encrypt: Option<String>,
|
||||||
|
comment: Option<String>,
|
||||||
delete: Option<Vec<DeletableProperty>>,
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
@ -196,6 +209,7 @@ pub fn update_pool(
|
|||||||
DeletableProperty::retention => { data.retention = None; },
|
DeletableProperty::retention => { data.retention = None; },
|
||||||
DeletableProperty::template => { data.template = None; },
|
DeletableProperty::template => { data.template = None; },
|
||||||
DeletableProperty::encrypt => { data.encrypt = None; },
|
DeletableProperty::encrypt => { data.encrypt = None; },
|
||||||
|
DeletableProperty::comment => { data.comment = None; },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -205,6 +219,15 @@ pub fn update_pool(
|
|||||||
if template.is_some() { data.template = template; }
|
if template.is_some() { data.template = template; }
|
||||||
if encrypt.is_some() { data.encrypt = encrypt; }
|
if encrypt.is_some() { data.encrypt = encrypt; }
|
||||||
|
|
||||||
|
if let Some(comment) = comment {
|
||||||
|
let comment = comment.trim();
|
||||||
|
if comment.is_empty() {
|
||||||
|
data.comment = None;
|
||||||
|
} else {
|
||||||
|
data.comment = Some(comment.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config.set_data(&name, "pool", &data)?;
|
config.set_data(&name, "pool", &data)?;
|
||||||
|
|
||||||
config::media_pool::save_config(&config)?;
|
config::media_pool::save_config(&config)?;
|
||||||
@ -221,6 +244,9 @@ pub fn update_pool(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Delete a media pool configuration
|
/// Delete a media pool configuration
|
||||||
pub fn delete_pool(name: String) -> Result<(), Error> {
|
pub fn delete_pool(name: String) -> Result<(), Error> {
|
||||||
|
341
src/api2/config/tape_backup_job.rs
Normal file
341
src/api2/config/tape_backup_job.rs
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api2::types::{
|
||||||
|
Authid,
|
||||||
|
Userid,
|
||||||
|
JOB_ID_SCHEMA,
|
||||||
|
DATASTORE_SCHEMA,
|
||||||
|
DRIVE_NAME_SCHEMA,
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
config::{
|
||||||
|
self,
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_MODIFY,
|
||||||
|
},
|
||||||
|
tape_job::{
|
||||||
|
TAPE_JOB_CFG_LOCKFILE,
|
||||||
|
TapeBackupJobConfig,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: TapeBackupJobConfig },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all tape backup jobs
|
||||||
|
pub fn list_tape_backup_jobs(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<TapeBackupJobConfig>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
let list = config.convert_to_typed_array::<TapeBackupJobConfig>("backup")?;
|
||||||
|
|
||||||
|
let list = list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||||
|
privs & PRIV_TAPE_AUDIT != 0
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
job: {
|
||||||
|
type: TapeBackupJobConfig,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "job"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new tape backup job.
|
||||||
|
pub fn create_tape_backup_job(
|
||||||
|
job: TapeBackupJobConfig,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
if config.sections.get(&job.id).is_some() {
|
||||||
|
bail!("job '{}' already exists.", job.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&job.id, "backup", &job)?;
|
||||||
|
|
||||||
|
config::tape_job::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::create_state_file("tape-backup-job", &job.id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: { type: TapeBackupJobConfig },
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read a tape backup job configuration.
|
||||||
|
pub fn read_tape_backup_job(
|
||||||
|
id: String,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<TapeBackupJobConfig, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
let job = config.lookup("backup", &id)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(job)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the comment property.
|
||||||
|
Comment,
|
||||||
|
/// Delete the job schedule.
|
||||||
|
Schedule,
|
||||||
|
/// Delete the eject-media property
|
||||||
|
EjectMedia,
|
||||||
|
/// Delete the export-media-set property
|
||||||
|
ExportMediaSet,
|
||||||
|
/// Delete the 'latest-only' property
|
||||||
|
LatestOnly,
|
||||||
|
/// Delete the 'notify-user' property
|
||||||
|
NotifyUser,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"eject-media": {
|
||||||
|
description: "Eject media upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-media-set": {
|
||||||
|
description: "Export media set upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"latest-only": {
|
||||||
|
description: "Backup latest snapshots only.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the tape backup job
|
||||||
|
pub fn update_tape_backup_job(
|
||||||
|
id: String,
|
||||||
|
store: Option<String>,
|
||||||
|
pool: Option<String>,
|
||||||
|
drive: Option<String>,
|
||||||
|
eject_media: Option<bool>,
|
||||||
|
export_media_set: Option<bool>,
|
||||||
|
latest_only: Option<bool>,
|
||||||
|
notify_user: Option<Userid>,
|
||||||
|
comment: Option<String>,
|
||||||
|
schedule: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
let mut data: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::EjectMedia => { data.setup.eject_media = None; },
|
||||||
|
DeletableProperty::ExportMediaSet => { data.setup.export_media_set = None; },
|
||||||
|
DeletableProperty::LatestOnly => { data.setup.latest_only = None; },
|
||||||
|
DeletableProperty::NotifyUser => { data.setup.notify_user = None; },
|
||||||
|
DeletableProperty::Schedule => { data.schedule = None; },
|
||||||
|
DeletableProperty::Comment => { data.comment = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(store) = store { data.setup.store = store; }
|
||||||
|
if let Some(pool) = pool { data.setup.pool = pool; }
|
||||||
|
if let Some(drive) = drive { data.setup.drive = drive; }
|
||||||
|
|
||||||
|
if eject_media.is_some() { data.setup.eject_media = eject_media; };
|
||||||
|
if export_media_set.is_some() { data.setup.export_media_set = export_media_set; }
|
||||||
|
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
||||||
|
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
||||||
|
|
||||||
|
if schedule.is_some() { data.schedule = schedule; }
|
||||||
|
|
||||||
|
if let Some(comment) = comment {
|
||||||
|
let comment = comment.trim();
|
||||||
|
if comment.is_empty() {
|
||||||
|
data.comment = None;
|
||||||
|
} else {
|
||||||
|
data.comment = Some(comment.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&id, "backup", &data)?;
|
||||||
|
|
||||||
|
config::tape_job::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Remove a tape backup job configuration
|
||||||
|
pub fn delete_tape_backup_job(
|
||||||
|
id: String,
|
||||||
|
digest: Option<String>,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match config.lookup::<TapeBackupJobConfig>("backup", &id) {
|
||||||
|
Ok(_job) => {
|
||||||
|
config.sections.remove(&id);
|
||||||
|
},
|
||||||
|
Err(_) => { bail!("job '{}' does not exist.", id) },
|
||||||
|
};
|
||||||
|
|
||||||
|
config::tape_job::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::remove_state_file("tape-backup-job", &id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_TAPE_BACKUP_JOB)
|
||||||
|
.put(&API_METHOD_UPDATE_TAPE_BACKUP_JOB)
|
||||||
|
.delete(&API_METHOD_DELETE_TAPE_BACKUP_JOB);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||||
|
.post(&API_METHOD_CREATE_TAPE_BACKUP_JOB)
|
||||||
|
.match_all("id", &ITEM_ROUTER);
|
@ -7,12 +7,17 @@ use proxmox::{
|
|||||||
ApiMethod,
|
ApiMethod,
|
||||||
Router,
|
Router,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
|
Permission,
|
||||||
},
|
},
|
||||||
tools::fs::open_file_locked,
|
tools::fs::open_file_locked,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::{
|
config::{
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_MODIFY,
|
||||||
|
},
|
||||||
tape_encryption_keys::{
|
tape_encryption_keys::{
|
||||||
TAPE_KEYS_LOCKFILE,
|
TAPE_KEYS_LOCKFILE,
|
||||||
load_keys,
|
load_keys,
|
||||||
@ -44,6 +49,9 @@ use crate::{
|
|||||||
type: Array,
|
type: Array,
|
||||||
items: { type: KeyInfo },
|
items: { type: KeyInfo },
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List existing keys
|
/// List existing keys
|
||||||
pub fn list_keys(
|
pub fn list_keys(
|
||||||
@ -93,6 +101,9 @@ pub fn list_keys(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Change the encryption key's password (and password hint).
|
/// Change the encryption key's password (and password hint).
|
||||||
pub fn change_passphrase(
|
pub fn change_passphrase(
|
||||||
@ -108,7 +119,7 @@ pub fn change_passphrase(
|
|||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
if let Kdf::None = kdf {
|
if let Kdf::None = kdf {
|
||||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||||
}
|
}
|
||||||
|
|
||||||
let _lock = open_file_locked(
|
let _lock = open_file_locked(
|
||||||
@ -161,6 +172,9 @@ pub fn change_passphrase(
|
|||||||
returns: {
|
returns: {
|
||||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Create a new encryption key
|
/// Create a new encryption key
|
||||||
pub fn create_key(
|
pub fn create_key(
|
||||||
@ -173,7 +187,7 @@ pub fn create_key(
|
|||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
if let Kdf::None = kdf {
|
if let Kdf::None = kdf {
|
||||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
||||||
@ -198,6 +212,9 @@ pub fn create_key(
|
|||||||
returns: {
|
returns: {
|
||||||
type: KeyInfo,
|
type: KeyInfo,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Get key config (public key part)
|
/// Get key config (public key part)
|
||||||
pub fn read_key(
|
pub fn read_key(
|
||||||
@ -232,6 +249,9 @@ pub fn read_key(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Remove a encryption key from the database
|
/// Remove a encryption key from the database
|
||||||
///
|
///
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use std::io::{Read, Seek};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
@ -6,6 +7,9 @@ use hyper::{Body, Response, StatusCode, header};
|
|||||||
|
|
||||||
use proxmox::http_bail;
|
use proxmox::http_bail;
|
||||||
|
|
||||||
|
use crate::api2::types::ArchiveEntry;
|
||||||
|
use crate::backup::{CatalogReader, DirEntryAttribute};
|
||||||
|
|
||||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||||
let file = match tokio::fs::File::open(path.clone()).await {
|
let file = match tokio::fs::File::open(path.clone()).await {
|
||||||
Ok(file) => file,
|
Ok(file) => file,
|
||||||
@ -27,3 +31,30 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
|||||||
.body(body)
|
.body(body)
|
||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the list of content of the given path
|
||||||
|
pub fn list_dir_content<R: Read + Seek>(
|
||||||
|
reader: &mut CatalogReader<R>,
|
||||||
|
path: &[u8],
|
||||||
|
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||||
|
let dir = reader.lookup_recursive(path)?;
|
||||||
|
let mut res = vec![];
|
||||||
|
let mut path = path.to_vec();
|
||||||
|
if !path.is_empty() && path[0] == b'/' {
|
||||||
|
path.remove(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for direntry in reader.read_dir(&dir)? {
|
||||||
|
let mut components = path.clone();
|
||||||
|
components.push(b'/');
|
||||||
|
components.extend(&direntry.name);
|
||||||
|
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
|
||||||
|
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
||||||
|
entry.size = size.into();
|
||||||
|
entry.mtime = mtime.into();
|
||||||
|
}
|
||||||
|
res.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
@ -85,7 +85,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
notify: {
|
notify: {
|
||||||
type: bool,
|
type: bool,
|
||||||
description: r#"Send notification mail about new package updates availanle to the
|
description: r#"Send notification mail about new package updates available to the
|
||||||
email address configured for 'root@pam')."#,
|
email address configured for 'root@pam')."#,
|
||||||
default: false,
|
default: false,
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -32,9 +32,6 @@ use crate::api2::types::{NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid};
|
|||||||
pub fn check_subscription(
|
pub fn check_subscription(
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
|
|
||||||
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
|
|
||||||
|
|
||||||
let info = match subscription::read_subscription() {
|
let info = match subscription::read_subscription() {
|
||||||
Err(err) => bail!("could not read subscription status: {}", err),
|
Err(err) => bail!("could not read subscription status: {}", err),
|
||||||
Ok(Some(info)) => info,
|
Ok(Some(info)) => info,
|
||||||
|
@ -7,19 +7,61 @@ use hyper::http::request::Parts;
|
|||||||
use hyper::{Body, Response, Request, StatusCode};
|
use hyper::{Body, Response, Request, StatusCode};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::{
|
||||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
http_err,
|
||||||
use proxmox::api::schema::*;
|
sortable,
|
||||||
use proxmox::http_err;
|
identity,
|
||||||
|
list_subdirs_api_method,
|
||||||
|
api::{
|
||||||
|
ApiResponseFuture,
|
||||||
|
ApiHandler,
|
||||||
|
ApiMethod,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
Permission,
|
||||||
|
router::SubdirMap,
|
||||||
|
schema::{
|
||||||
|
ObjectSchema,
|
||||||
|
BooleanSchema,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::{
|
||||||
use crate::backup::*;
|
api2::{
|
||||||
use crate::server::{WorkerTask, H2Service};
|
helpers,
|
||||||
use crate::tools;
|
types::{
|
||||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
DATASTORE_SCHEMA,
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
BACKUP_TYPE_SCHEMA,
|
||||||
use crate::api2::helpers;
|
BACKUP_TIME_SCHEMA,
|
||||||
use crate::tools::fs::lock_dir_noblock_shared;
|
BACKUP_ID_SCHEMA,
|
||||||
|
CHUNK_DIGEST_SCHEMA,
|
||||||
|
Authid,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
backup::{
|
||||||
|
DataStore,
|
||||||
|
ArchiveType,
|
||||||
|
BackupDir,
|
||||||
|
IndexFile,
|
||||||
|
archive_type,
|
||||||
|
},
|
||||||
|
server::{
|
||||||
|
WorkerTask,
|
||||||
|
H2Service,
|
||||||
|
},
|
||||||
|
tools::{
|
||||||
|
self,
|
||||||
|
fs::lock_dir_noblock_shared,
|
||||||
|
},
|
||||||
|
config::{
|
||||||
|
acl::{
|
||||||
|
PRIV_DATASTORE_READ,
|
||||||
|
PRIV_DATASTORE_BACKUP,
|
||||||
|
},
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -171,8 +213,7 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const READER_API_ROUTER: Router = Router::new()
|
const READER_API_SUBDIRS: SubdirMap = &[
|
||||||
.subdirs(&[
|
|
||||||
(
|
(
|
||||||
"chunk", &Router::new()
|
"chunk", &Router::new()
|
||||||
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
||||||
@ -185,7 +226,11 @@ pub const READER_API_ROUTER: Router = Router::new()
|
|||||||
"speedtest", &Router::new()
|
"speedtest", &Router::new()
|
||||||
.download(&API_METHOD_SPEEDTEST)
|
.download(&API_METHOD_SPEEDTEST)
|
||||||
),
|
),
|
||||||
]);
|
];
|
||||||
|
|
||||||
|
pub const READER_API_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(READER_API_SUBDIRS))
|
||||||
|
.subdirs(READER_API_SUBDIRS);
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||||
|
@ -160,12 +160,11 @@ pub fn datastore_status(
|
|||||||
|
|
||||||
// we skip the calculation for datastores with not enough data
|
// we skip the calculation for datastores with not enough data
|
||||||
if usage_list.len() >= 7 {
|
if usage_list.len() >= 7 {
|
||||||
|
entry["estimated-full-date"] = Value::from(0);
|
||||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||||
if b != 0.0 {
|
if b != 0.0 {
|
||||||
let estimate = (1.0 - a) / b;
|
let estimate = (1.0 - a) / b;
|
||||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||||
} else {
|
|
||||||
entry["estimated-full-date"] = Value::from(0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,36 +1,58 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::{Mutex, Arc};
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
|
try_block,
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
RpcEnvironmentType,
|
RpcEnvironmentType,
|
||||||
Router,
|
Router,
|
||||||
|
Permission,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
task_log,
|
task_log,
|
||||||
|
task_warn,
|
||||||
config::{
|
config::{
|
||||||
self,
|
self,
|
||||||
drive::check_drive_exists,
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_DATASTORE_READ,
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_WRITE,
|
||||||
|
},
|
||||||
|
tape_job::{
|
||||||
|
TapeBackupJobConfig,
|
||||||
|
TapeBackupJobSetup,
|
||||||
|
TapeBackupJobStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
server::{
|
||||||
|
lookup_user_email,
|
||||||
|
TapeBackupJobSummary,
|
||||||
|
jobstate::{
|
||||||
|
Job,
|
||||||
|
JobState,
|
||||||
|
compute_schedule_status,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
backup::{
|
backup::{
|
||||||
DataStore,
|
DataStore,
|
||||||
BackupDir,
|
BackupDir,
|
||||||
BackupInfo,
|
BackupInfo,
|
||||||
|
StoreProgress,
|
||||||
},
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
Authid,
|
Authid,
|
||||||
DATASTORE_SCHEMA,
|
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
DRIVE_NAME_SCHEMA,
|
|
||||||
UPID_SCHEMA,
|
UPID_SCHEMA,
|
||||||
|
JOB_ID_SCHEMA,
|
||||||
MediaPoolConfig,
|
MediaPoolConfig,
|
||||||
|
Userid,
|
||||||
},
|
},
|
||||||
server::WorkerTask,
|
server::WorkerTask,
|
||||||
task::TaskState,
|
task::TaskState,
|
||||||
@ -40,144 +62,479 @@ use crate::{
|
|||||||
PoolWriter,
|
PoolWriter,
|
||||||
MediaPool,
|
MediaPool,
|
||||||
SnapshotReader,
|
SnapshotReader,
|
||||||
drive::media_changer,
|
drive::{
|
||||||
|
media_changer,
|
||||||
|
lock_tape_device,
|
||||||
|
set_tape_device_state,
|
||||||
|
},
|
||||||
changer::update_changer_online_status,
|
changer::update_changer_online_status,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
|
||||||
|
.post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||||
|
.post(&API_METHOD_BACKUP)
|
||||||
|
.match_all("id", &TAPE_BACKUP_JOB_ROUTER);
|
||||||
|
|
||||||
|
fn check_backup_permission(
|
||||||
|
auth_id: &Authid,
|
||||||
|
store: &str,
|
||||||
|
pool: &str,
|
||||||
|
drive: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||||
|
if (privs & PRIV_DATASTORE_READ) == 0 {
|
||||||
|
bail!("no permissions on /datastore/{}", store);
|
||||||
|
}
|
||||||
|
|
||||||
|
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
|
||||||
|
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||||
|
bail!("no permissions on /tape/drive/{}", drive);
|
||||||
|
}
|
||||||
|
|
||||||
|
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
|
||||||
|
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||||
|
bail!("no permissions on /tape/pool/{}", pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "List configured thape backup jobs and their status",
|
||||||
|
type: Array,
|
||||||
|
items: { type: TapeBackupJobStatus },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all tape backup jobs
|
||||||
|
pub fn list_tape_backup_jobs(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<TapeBackupJobStatus>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, digest) = config::tape_job::config()?;
|
||||||
|
|
||||||
|
let job_list_iter = config
|
||||||
|
.convert_to_typed_array("backup")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|_job: &TapeBackupJobConfig| {
|
||||||
|
// fixme: check access permission
|
||||||
|
true
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for job in job_list_iter {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||||
|
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let last_state = JobState::load("tape-backup-job", &job.id)
|
||||||
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
|
|
||||||
|
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||||
|
|
||||||
|
list.push(TapeBackupJobStatus { config: job, status });
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn do_tape_backup_job(
|
||||||
|
mut job: Job,
|
||||||
|
setup: TapeBackupJobSetup,
|
||||||
|
auth_id: &Authid,
|
||||||
|
schedule: Option<String>,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let job_id = format!("{}:{}:{}:{}",
|
||||||
|
setup.store,
|
||||||
|
setup.pool,
|
||||||
|
setup.drive,
|
||||||
|
job.jobname());
|
||||||
|
|
||||||
|
let worker_type = job.jobtype().to_string();
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||||
|
|
||||||
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
// for scheduled jobs we acquire the lock later in the worker
|
||||||
|
let drive_lock = if schedule.is_some() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||||
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
&worker_type,
|
||||||
|
Some(job_id.clone()),
|
||||||
|
auth_id.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
job.start(&worker.upid().to_string())?;
|
||||||
|
let mut drive_lock = drive_lock;
|
||||||
|
|
||||||
|
let (job_result, summary) = match try_block!({
|
||||||
|
if schedule.is_some() {
|
||||||
|
// for scheduled tape backup jobs, we wait indefinitely for the lock
|
||||||
|
task_log!(worker, "waiting for drive lock...");
|
||||||
|
loop {
|
||||||
|
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
|
||||||
|
drive_lock = Some(lock);
|
||||||
|
break;
|
||||||
|
} // ignore errors
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
|
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||||
|
if let Some(event_str) = schedule {
|
||||||
|
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
backup_worker(
|
||||||
|
&worker,
|
||||||
|
datastore,
|
||||||
|
&pool_config,
|
||||||
|
&setup,
|
||||||
|
email.clone(),
|
||||||
|
)
|
||||||
|
}) {
|
||||||
|
Ok(summary) => (Ok(()), summary),
|
||||||
|
Err(err) => (Err(err), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = worker.create_state(&job_result);
|
||||||
|
|
||||||
|
if let Some(email) = email {
|
||||||
|
if let Err(err) = crate::server::send_tape_backup_status(
|
||||||
|
&email,
|
||||||
|
Some(job.jobname()),
|
||||||
|
&setup,
|
||||||
|
&job_result,
|
||||||
|
summary,
|
||||||
|
) {
|
||||||
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = job.finish(status) {
|
||||||
|
eprintln!(
|
||||||
|
"could not finish job state for {}: {}",
|
||||||
|
job.jobtype().to_string(),
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = set_tape_device_state(&setup.drive, "") {
|
||||||
|
eprintln!(
|
||||||
|
"could not unset drive state for {}: {}",
|
||||||
|
setup.drive,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
job_result
|
||||||
|
}
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
id: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: JOB_ID_SCHEMA,
|
||||||
},
|
},
|
||||||
pool: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
},
|
},
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
},
|
},
|
||||||
"eject-media": {
|
access: {
|
||||||
description: "Eject media upon job completion.",
|
// Note: parameters are from job config, so we need to test inside function body
|
||||||
type: bool,
|
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||||
optional: true,
|
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
"export-media-set": {
|
)]
|
||||||
description: "Export media set upon job completion.",
|
/// Runs a tape backup job manually.
|
||||||
type: bool,
|
pub fn run_tape_backup_job(
|
||||||
optional: true,
|
id: String,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let (config, _digest) = config::tape_job::config()?;
|
||||||
|
let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||||
|
|
||||||
|
check_backup_permission(
|
||||||
|
&auth_id,
|
||||||
|
&backup_job.setup.store,
|
||||||
|
&backup_job.setup.pool,
|
||||||
|
&backup_job.setup.drive,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let job = Job::new("tape-backup-job", &id)?;
|
||||||
|
|
||||||
|
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
setup: {
|
||||||
|
type: TapeBackupJobSetup,
|
||||||
|
flatten: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
schema: UPID_SCHEMA,
|
schema: UPID_SCHEMA,
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
// Note: parameters are no uri parameter, so we need to test inside function body
|
||||||
|
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||||
|
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Backup datastore to tape media pool
|
/// Backup datastore to tape media pool
|
||||||
pub fn backup(
|
pub fn backup(
|
||||||
store: String,
|
setup: TapeBackupJobSetup,
|
||||||
pool: String,
|
|
||||||
drive: String,
|
|
||||||
eject_media: Option<bool>,
|
|
||||||
export_media_set: Option<bool>,
|
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
check_backup_permission(
|
||||||
|
&auth_id,
|
||||||
|
&setup.store,
|
||||||
|
&setup.pool,
|
||||||
|
&setup.drive,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||||
|
|
||||||
let (config, _digest) = config::media_pool::config()?;
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
// early check before starting worker
|
|
||||||
check_drive_exists(&drive_config, &drive)?;
|
// early check/lock before starting worker
|
||||||
|
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||||
|
|
||||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let eject_media = eject_media.unwrap_or(false);
|
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||||
let export_media_set = export_media_set.unwrap_or(false);
|
|
||||||
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||||
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"tape-backup",
|
"tape-backup",
|
||||||
Some(store),
|
Some(job_id),
|
||||||
auth_id,
|
auth_id,
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
|
let _drive_lock = drive_lock; // keep lock guard
|
||||||
Ok(())
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
|
let (job_result, summary) = match backup_worker(
|
||||||
|
&worker,
|
||||||
|
datastore,
|
||||||
|
&pool_config,
|
||||||
|
&setup,
|
||||||
|
email.clone(),
|
||||||
|
) {
|
||||||
|
Ok(summary) => (Ok(()), summary),
|
||||||
|
Err(err) => (Err(err), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(email) = email {
|
||||||
|
if let Err(err) = crate::server::send_tape_backup_status(
|
||||||
|
&email,
|
||||||
|
None,
|
||||||
|
&setup,
|
||||||
|
&job_result,
|
||||||
|
summary,
|
||||||
|
) {
|
||||||
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore errors
|
||||||
|
let _ = set_tape_device_state(&setup.drive, "");
|
||||||
|
job_result
|
||||||
}
|
}
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(upid_str.into())
|
Ok(upid_str.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
|
||||||
.post(&API_METHOD_BACKUP);
|
|
||||||
|
|
||||||
|
|
||||||
fn backup_worker(
|
fn backup_worker(
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
drive: &str,
|
|
||||||
pool_config: &MediaPoolConfig,
|
pool_config: &MediaPoolConfig,
|
||||||
eject_media: bool,
|
setup: &TapeBackupJobSetup,
|
||||||
export_media_set: bool,
|
email: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<TapeBackupJobSummary, Error> {
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let start = std::time::Instant::now();
|
||||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
let mut summary: TapeBackupJobSummary = Default::default();
|
||||||
|
|
||||||
task_log!(worker, "update media online status");
|
task_log!(worker, "update media online status");
|
||||||
let has_changer = update_media_online_status(drive)?;
|
let changer_name = update_media_online_status(&setup.drive)?;
|
||||||
|
|
||||||
let use_offline_media = !has_changer;
|
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||||
|
|
||||||
let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
|
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
||||||
|
|
||||||
let mut pool_writer = PoolWriter::new(pool, drive)?;
|
|
||||||
|
|
||||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||||
|
|
||||||
group_list.sort_unstable();
|
group_list.sort_unstable();
|
||||||
|
|
||||||
for group in group_list {
|
let group_count = group_list.len();
|
||||||
|
task_log!(worker, "found {} groups", group_count);
|
||||||
|
|
||||||
|
let mut progress = StoreProgress::new(group_count as u64);
|
||||||
|
|
||||||
|
let latest_only = setup.latest_only.unwrap_or(false);
|
||||||
|
|
||||||
|
if latest_only {
|
||||||
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
|
}
|
||||||
|
|
||||||
|
let datastore_name = datastore.name();
|
||||||
|
|
||||||
|
let mut errors = false;
|
||||||
|
|
||||||
|
let mut need_catalog = false; // avoid writing catalog for empty jobs
|
||||||
|
|
||||||
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||||
|
progress.done_groups = group_number as u64;
|
||||||
|
progress.done_snapshots = 0;
|
||||||
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||||
|
|
||||||
for info in snapshot_list {
|
if latest_only {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
progress.group_snapshots = 1;
|
||||||
|
if let Some(info) = snapshot_list.pop() {
|
||||||
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
|
||||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
|
}
|
||||||
|
progress.done_snapshots = 1;
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: {}",
|
||||||
|
progress
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
progress.group_snapshots = snapshot_list.len() as u64;
|
||||||
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||||
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
|
}
|
||||||
|
progress.done_snapshots = snapshot_number as u64 + 1;
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: {}",
|
||||||
|
progress
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pool_writer.commit()?;
|
pool_writer.commit()?;
|
||||||
|
|
||||||
if export_media_set {
|
if need_catalog {
|
||||||
|
task_log!(worker, "append media catalog");
|
||||||
|
|
||||||
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
task_log!(worker, "catalog does not fit on tape, writing to next volume");
|
||||||
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
bail!("write_catalog_archive failed on second media");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if setup.export_media_set.unwrap_or(false) {
|
||||||
pool_writer.export_media_set(worker)?;
|
pool_writer.export_media_set(worker)?;
|
||||||
} else if eject_media {
|
} else if setup.eject_media.unwrap_or(false) {
|
||||||
pool_writer.eject_media(worker)?;
|
pool_writer.eject_media(worker)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
if errors {
|
||||||
|
bail!("Tape backup finished with some errors. Please check the task log.");
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.duration = start.elapsed();
|
||||||
|
|
||||||
|
Ok(summary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to update the the media online status
|
// Try to update the the media online status
|
||||||
fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
|
||||||
|
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let mut has_changer = false;
|
|
||||||
|
|
||||||
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
||||||
|
|
||||||
has_changer = true;
|
|
||||||
|
|
||||||
let label_text_list = changer.online_media_label_texts()?;
|
let label_text_list = changer.online_media_label_texts()?;
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
@ -189,9 +546,11 @@ fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
|||||||
&changer_name,
|
&changer_name,
|
||||||
&label_text_list,
|
&label_text_list,
|
||||||
)?;
|
)?;
|
||||||
}
|
|
||||||
|
|
||||||
Ok(has_changer)
|
Ok(Some(changer_name))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_snapshot(
|
pub fn backup_snapshot(
|
||||||
@ -199,39 +558,61 @@ pub fn backup_snapshot(
|
|||||||
pool_writer: &mut PoolWriter,
|
pool_writer: &mut PoolWriter,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
snapshot: BackupDir,
|
snapshot: BackupDir,
|
||||||
) -> Result<(), Error> {
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
|
task_log!(worker, "backup snapshot {}", snapshot);
|
||||||
|
|
||||||
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
|
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
|
||||||
|
Ok(reader) => reader,
|
||||||
|
Err(err) => {
|
||||||
|
// ignore missing snapshots and continue
|
||||||
|
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
|
||||||
|
|
||||||
|
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
|
||||||
|
datastore.clone(),
|
||||||
|
snapshot_reader.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut chunk_iter = chunk_iter.peekable();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
// test is we have remaining chunks
|
// test is we have remaining chunks
|
||||||
if chunk_iter.peek().is_none() {
|
match chunk_iter.peek() {
|
||||||
break;
|
None => break,
|
||||||
|
Some(Ok(_)) => { /* Ok */ },
|
||||||
|
Some(Err(err)) => bail!("{}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||||
|
|
||||||
if leom {
|
if leom {
|
||||||
pool_writer.set_media_status_full(&uuid)?;
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Err(_) = reader_thread.join() {
|
||||||
|
bail!("chunk reader thread failed");
|
||||||
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
|
|
||||||
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
if !done {
|
if !done {
|
||||||
@ -250,5 +631,5 @@ pub fn backup_snapshot(
|
|||||||
|
|
||||||
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
||||||
|
|
||||||
Ok(())
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -1,16 +1,26 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::{api, Router, SubdirMap};
|
use proxmox::api::{api, Router, SubdirMap, RpcEnvironment, Permission};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config,
|
config::{
|
||||||
|
self,
|
||||||
|
cached_user_info::CachedUserInfo,
|
||||||
|
acl::{
|
||||||
|
PRIV_TAPE_AUDIT,
|
||||||
|
PRIV_TAPE_READ,
|
||||||
|
},
|
||||||
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
|
Authid,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
ChangerListEntry,
|
ChangerListEntry,
|
||||||
|
LinuxTapeDrive,
|
||||||
MtxEntryKind,
|
MtxEntryKind,
|
||||||
MtxStatusEntry,
|
MtxStatusEntry,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
@ -25,6 +35,7 @@ use crate::{
|
|||||||
ScsiMediaChange,
|
ScsiMediaChange,
|
||||||
mtx_status_to_online_set,
|
mtx_status_to_online_set,
|
||||||
},
|
},
|
||||||
|
drive::get_tape_device_state,
|
||||||
lookup_device_identification,
|
lookup_device_identification,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -36,6 +47,11 @@ use crate::{
|
|||||||
name: {
|
name: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
|
cache: {
|
||||||
|
description: "Use cached value.",
|
||||||
|
optional: true,
|
||||||
|
default: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -45,16 +61,22 @@ use crate::{
|
|||||||
type: MtxStatusEntry,
|
type: MtxStatusEntry,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Get tape changer status
|
/// Get tape changer status
|
||||||
pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
pub async fn get_status(
|
||||||
|
name: String,
|
||||||
|
cache: bool,
|
||||||
|
) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||||
|
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
let status = tokio::task::spawn_blocking(move || {
|
let status = tokio::task::spawn_blocking(move || {
|
||||||
changer_config.status()
|
changer_config.status(cache)
|
||||||
}).await??;
|
}).await??;
|
||||||
|
|
||||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
@ -66,9 +88,26 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
|||||||
|
|
||||||
inventory.update_online_status(&map)?;
|
inventory.update_online_status(&map)?;
|
||||||
|
|
||||||
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
||||||
|
|
||||||
|
for drive in drive_list {
|
||||||
|
if let Some(changer) = drive.changer {
|
||||||
|
if changer != name {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let num = drive.changer_drivenum.unwrap_or(0);
|
||||||
|
drive_map.insert(num, drive.name.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||||
|
let mut state = None;
|
||||||
|
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||||
|
state = get_tape_device_state(&config, &drive)?;
|
||||||
|
}
|
||||||
let entry = MtxStatusEntry {
|
let entry = MtxStatusEntry {
|
||||||
entry_kind: MtxEntryKind::Drive,
|
entry_kind: MtxEntryKind::Drive,
|
||||||
entry_id: id as u64,
|
entry_id: id as u64,
|
||||||
@ -78,6 +117,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
|||||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||||
},
|
},
|
||||||
loaded_slot: drive_status.loaded_slot,
|
loaded_slot: drive_status.loaded_slot,
|
||||||
|
state,
|
||||||
};
|
};
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
}
|
}
|
||||||
@ -96,6 +136,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
|||||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||||
},
|
},
|
||||||
loaded_slot: None,
|
loaded_slot: None,
|
||||||
|
state: None,
|
||||||
};
|
};
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
}
|
}
|
||||||
@ -119,6 +160,9 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_READ, false),
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Transfers media from one slot to another
|
/// Transfers media from one slot to another
|
||||||
pub async fn transfer(
|
pub async fn transfer(
|
||||||
@ -132,7 +176,8 @@ pub async fn transfer(
|
|||||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
changer_config.transfer(from, to)
|
changer_config.transfer(from, to)?;
|
||||||
|
Ok(())
|
||||||
}).await?
|
}).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,11 +192,18 @@ pub async fn transfer(
|
|||||||
type: ChangerListEntry,
|
type: ChangerListEntry,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List changers
|
/// List changers
|
||||||
pub fn list_changers(
|
pub fn list_changers(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<ChangerListEntry>, Error> {
|
) -> Result<Vec<ChangerListEntry>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
@ -162,6 +214,11 @@ pub fn list_changers(
|
|||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for changer in changer_list {
|
for changer in changer_list {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "changer", &changer.name]);
|
||||||
|
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let info = lookup_device_identification(&linux_changers, &changer.path);
|
let info = lookup_device_identification(&linux_changers, &changer.path);
|
||||||
let entry = ChangerListEntry { config: changer, info };
|
let entry = ChangerListEntry { config: changer, info };
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user