Compare commits
359 Commits
Author | SHA1 | Date | |
---|---|---|---|
355a41a763 | |||
5bd4825432 | |||
8f7e5b028a | |||
2a29d9a1ee | |||
e056966bc7 | |||
ef0ea4ba05 | |||
2892624783 | |||
2c10410b0d | |||
d1d74c4367 | |||
8b7f3b8f1d | |||
3f6c2efb8d | |||
227f36497a | |||
5ef4c7bcd3 | |||
70d00e0149 | |||
dcf155dac9 | |||
3c5b523631 | |||
6396bace3d | |||
713a128adf | |||
affc224aca | |||
6f82d32977 | |||
2a06e08618 | |||
1057b1f5a5 | |||
af76234112 | |||
1825c1a9b7 | |||
9a8bf2cac9 | |||
cc5ef79bec | |||
3725d95c65 | |||
4fb068019e | |||
6446a078a0 | |||
1d7fcbece8 | |||
8703a68a31 | |||
9bcdade85f | |||
b0156179b9 | |||
d0a0bad9d6 | |||
a4003d9078 | |||
3f4a62de2f | |||
bf23f63aa5 | |||
fd641b99c3 | |||
225affc9ca | |||
9ce2481a69 | |||
d95c74c6e7 | |||
218ee3269f | |||
5ca5f8daf3 | |||
98cdee781a | |||
9cf4504909 | |||
5f846a3fc1 | |||
c9793d47f9 | |||
be8adca115 | |||
9152a0077f | |||
0b90c67fb4 | |||
b4975d3102 | |||
ee33795b72 | |||
90e16be3ae | |||
cf90a369e2 | |||
6b303323be | |||
1576c7a0d9 | |||
cd5d6103ea | |||
207f763d1a | |||
1bed3aedc8 | |||
ab77d660cc | |||
b74a1daae9 | |||
bec357e2cb | |||
78593b5b5c | |||
7d6f03a7fe | |||
f46573f8c3 | |||
b83e136fb6 | |||
5c4203b20c | |||
7f9eef1d47 | |||
a8a0132766 | |||
831c43c91b | |||
b452e2df74 | |||
7f37cacfac | |||
3bb7e62e88 | |||
3b060167f6 | |||
8a76e71129 | |||
396fd747a6 | |||
16bd08b297 | |||
ccdf327ac8 | |||
8cd63df0dc | |||
b90cb34fd6 | |||
d6c1e12c06 | |||
d33d1c880b | |||
985e84e369 | |||
cc2c5c7762 | |||
40bf636b47 | |||
347cde827b | |||
ac4a1fb35c | |||
6f3714b9aa | |||
d810014eeb | |||
e0f6892625 | |||
9d5b426a6d | |||
8bf5769382 | |||
2970cd3d6d | |||
d41114c5a8 | |||
6c92449702 | |||
db04d10d14 | |||
5a4233f07b | |||
3c715edd07 | |||
bbe05d7fe9 | |||
2af8b8ef91 | |||
d4bfdfe749 | |||
1d14c31658 | |||
9bd81bb384 | |||
d64226efee | |||
2440eaa2df | |||
e8bf4f31f2 | |||
6682461d88 | |||
41f1132e0e | |||
d938c9337a | |||
9896a75caf | |||
7eefd0c3d7 | |||
2e268e311c | |||
3e182fd828 | |||
7b60850334 | |||
1552d9699c | |||
7507b19cd2 | |||
16f9ea6708 | |||
d984a9acf0 | |||
955f4aefcd | |||
858bbfbbd1 | |||
c1570b373f | |||
d336363771 | |||
e57aa36d3e | |||
b488f850aa | |||
ec07a280ba | |||
5006632550 | |||
7eb9f48485 | |||
31cba7098d | |||
f4571b0b50 | |||
3832911d50 | |||
28c86760da | |||
c4604ca468 | |||
464c409aa3 | |||
08ec39be0c | |||
25350f3370 | |||
0023cfa385 | |||
ed24142767 | |||
917230e4f8 | |||
05228f17f5 | |||
e8653b96be | |||
1cf191c597 | |||
3d3e31b7f8 | |||
8730cfcc3e | |||
5830e5620d | |||
46d53e3e90 | |||
3554fe6480 | |||
0dadf66dc7 | |||
a941bbd0c9 | |||
21e3ed3449 | |||
81678129fb | |||
52d8db7925 | |||
875d375d7a | |||
cba167b874 | |||
e68c0e68bd | |||
ff2bc2d21f | |||
4961404c7c | |||
3fbf2311e4 | |||
41685061f7 | |||
35a7ab5778 | |||
e1beaae4a2 | |||
965bd58693 | |||
00fdaaf12b | |||
60473d234a | |||
4f688e09a4 | |||
24e84128e4 | |||
e63457b6b2 | |||
a83cedc2ac | |||
076afa6197 | |||
423e3cbd18 | |||
0263396187 | |||
043018cfbe | |||
2037d9af03 | |||
7f07991035 | |||
18ce01caff | |||
5bc8e80a99 | |||
6252df4c18 | |||
451856d21d | |||
aa30663ca5 | |||
8616a4afe5 | |||
bc2358319b | |||
0bf4b81370 | |||
c9dd5a2452 | |||
cf95f616c5 | |||
1adbc7c13c | |||
9d28974c27 | |||
3dbc35b5c1 | |||
fee0fe5422 | |||
86d9f4e733 | |||
3f16f1b006 | |||
cbd9899389 | |||
cd44fb8d84 | |||
aca4c2b5a9 | |||
85eedfb78b | |||
f26276bc4e | |||
6d62e69f9a | |||
4188fd59a0 | |||
5b9f575648 | |||
0d890ec414 | |||
926d05ef0b | |||
8be48ddfc7 | |||
41e66bfaf6 | |||
47a7241410 | |||
54c77b3d62 | |||
a1c5575308 | |||
a44c934b5d | |||
546d2653ee | |||
33c06b3388 | |||
1917ea3ce1 | |||
70842b9ef2 | |||
e6122a657e | |||
9e860ac01a | |||
7690a8e7bd | |||
1860208560 | |||
1689296d46 | |||
7aa4851b77 | |||
6ef8e2902f | |||
aa16b7b284 | |||
9bbd83b1f2 | |||
65535670f9 | |||
9d42fe4d3b | |||
918a367258 | |||
970a70b41e | |||
4094fe5a31 | |||
dea8e2cb54 | |||
0514a4308f | |||
d0647e5a02 | |||
bbe06f97be | |||
f1a83e9759 | |||
38a81c6b46 | |||
6afb60abf5 | |||
a42212fc1e | |||
2e21948156 | |||
5279ee745f | |||
227501c063 | |||
89d25b1931 | |||
b57c0dbe30 | |||
8b910bb6bc | |||
dfde34e612 | |||
2530811e22 | |||
85205bc253 | |||
3cdd1a3424 | |||
002865405c | |||
8a73ef897a | |||
be61c56c21 | |||
dbaef7d04d | |||
2048073355 | |||
a585e1f696 | |||
415737b2b8 | |||
54f7007cc5 | |||
b0338178d7 | |||
159100b944 | |||
41a8db3576 | |||
fe291ab794 | |||
adb65b9889 | |||
8513626b9f | |||
7ca0ba4515 | |||
42200c405a | |||
be327dbccd | |||
c724dc3892 | |||
70dc2ff3ab | |||
81f5d03e8d | |||
e50c6b94c1 | |||
28eaff20bd | |||
8d1a1b2976 | |||
92eaec53db | |||
b3c2c57897 | |||
f458e97fda | |||
80bf9ae99b | |||
bebd4a7ca4 | |||
9468e94412 | |||
6b66c8507f | |||
167e5406c3 | |||
c111c9a931 | |||
bb71e3a023 | |||
7b1bf4c098 | |||
32b88d928a | |||
f8e1932337 | |||
7c9fb570cc | |||
56d22c66c0 | |||
85cdc4f371 | |||
96bcfb9b1f | |||
4a874665eb | |||
6f6b69946e | |||
5b7f44555e | |||
2ca396c015 | |||
d8dae16035 | |||
8f02db04f9 | |||
9f35e44681 | |||
6279b8f5a5 | |||
3084232cb5 | |||
67cc79ec52 | |||
b9a09a9501 | |||
4a0d3a3e3f | |||
2322a980d0 | |||
c19f5b85a3 | |||
7f9d8438ab | |||
51c80c5a52 | |||
6477ebcf6f | |||
bc02c2789c | |||
c0b3d09236 | |||
3ddbab6193 | |||
befd95a90a | |||
ab6cd4229b | |||
9213744ecb | |||
41c0333814 | |||
afcf8b3ed6 | |||
69ebbec40b | |||
b22a9c14a4 | |||
54067d8225 | |||
d64c4eeab0 | |||
15d2c7786e | |||
73a1da5ed6 | |||
fbf8779388 | |||
3231c35fb8 | |||
ced7838de4 | |||
2f26b8668a | |||
9432838914 | |||
1a89a7794e | |||
c0a87c12fb | |||
c6a7ea0a2f | |||
5bb057e5a2 | |||
2924b37d6d | |||
42c0f784e2 | |||
05f17d1ec4 | |||
777690a121 | |||
a98e228766 | |||
4c9174ce26 | |||
1d70e3812c | |||
e2225aa882 | |||
99dd709f3e | |||
f197c286d5 | |||
b121711baa | |||
085655b21b | |||
4c209d6b10 | |||
8dc45e291a | |||
ec1ae7e631 | |||
25aa55b5f5 | |||
b5c6088130 | |||
a65eb0ec29 | |||
42eef1451c | |||
11ecf058e4 | |||
5f1f7ef564 | |||
2e4e698633 | |||
02dce8cad0 | |||
8aa4842fa8 | |||
efc09f63cc | |||
3253d8a2e4 | |||
1531185dd0 | |||
baf9c3704e | |||
cdf39e62b3 | |||
b81e37f6ab | |||
ddebbb52fd | |||
983e929e25 | |||
f47e035721 | |||
a80d72f999 | |||
8de9a9917f | |||
fa016c1697 | |||
7d2c156eb1 | |||
04cec92e8d |
10
.gitignore
vendored
10
.gitignore
vendored
@ -1,6 +1,16 @@
|
||||
local.mak
|
||||
/target
|
||||
**/*.rs.bk
|
||||
*~
|
||||
*.backup
|
||||
*.backup[0-9]
|
||||
*.backup[0-9][0-9]
|
||||
*.old
|
||||
*.old[0-9]
|
||||
*.old[0-9][0-9]
|
||||
*.5
|
||||
*.7
|
||||
__pycache__/
|
||||
/etc/proxmox-backup.service
|
||||
/etc/proxmox-backup-proxy.service
|
||||
build/
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "1.0.7"
|
||||
version = "1.0.10"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -48,11 +48,11 @@ percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pin-project = "1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.1"
|
||||
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
|
||||
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "7"
|
||||
|
4
Makefile
4
Makefile
@ -10,7 +10,9 @@ SUBDIRS := etc www docs
|
||||
USR_BIN := \
|
||||
proxmox-backup-client \
|
||||
pxar \
|
||||
pmtx
|
||||
proxmox-tape \
|
||||
pmtx \
|
||||
pmt
|
||||
|
||||
# Binaries usable by admins
|
||||
USR_SBIN := \
|
||||
|
64
debian/changelog
vendored
64
debian/changelog
vendored
@ -1,3 +1,67 @@
|
||||
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
||||
|
||||
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
||||
label text
|
||||
|
||||
* api: tape backup: continue on vanishing snapshots, as a prune during long
|
||||
running tape backup jobs is OK
|
||||
|
||||
* tape: fix scsi volume_statistics and cartridge_memory for quantum drives
|
||||
|
||||
* typo fixes all over the place
|
||||
|
||||
* d/postinst: restart, not reload, when updating from a to old version
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 11 Mar 2021 08:24:31 +0100
|
||||
|
||||
rust-proxmox-backup (1.0.9-1) unstable; urgency=medium
|
||||
|
||||
* client: track key source, print when used
|
||||
|
||||
* fix #3026: pxar: metadata: apply flags _after_ updating mtime
|
||||
|
||||
* docs: add acl.cfg, datastore.cfg, remote.cfg, sync.cfg, user.cfg and
|
||||
verification.cfg manual page pages
|
||||
|
||||
* docs: add API viewer
|
||||
|
||||
* proxmox-backup-manger: add verify-job command group with various sub
|
||||
commands
|
||||
|
||||
* add experimental opt-in tape backup support
|
||||
|
||||
* lto-barcode: fix page offset calibration
|
||||
|
||||
* lto-barcode: fix avery 3420 paper format properties
|
||||
|
||||
* asyncify pxar create archive
|
||||
|
||||
* client: raise HTTP_TIMEOUT for simple requests to 120s
|
||||
|
||||
* docs: depend on mathjax library package from debian instead of CDN
|
||||
|
||||
* fix #3321: docs: client: fix interactive restore command explanation
|
||||
|
||||
* ui: use shorter datetime format for encryption key creation time
|
||||
|
||||
* docs: TFA: improve language
|
||||
|
||||
* config/TFA: webauthn: disallow registering the same token more than once,
|
||||
that can lead to buggy behavior in some token/browser combinations.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 08 Mar 2021 15:54:47 +0100
|
||||
|
||||
rust-proxmox-backup (1.0.8-1) unstable; urgency=medium
|
||||
|
||||
* Https Connector: use hostname instead of URL again to avoid certificate
|
||||
verification issues.
|
||||
|
||||
* ui: task summary: add verification jobs to count
|
||||
|
||||
* docs: explain some technical details about datastores/chunks
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Feb 2021 12:39:49 +0100
|
||||
|
||||
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
|
||||
|
||||
* fix #3197: skip fingerprint check when restoring key
|
||||
|
13
debian/control
vendored
13
debian/control
vendored
@ -36,13 +36,13 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-1+default-dev,
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.11+api-macro-dev,
|
||||
librust-proxmox-0.11+default-dev,
|
||||
librust-proxmox-0.11+sortable-macro-dev,
|
||||
librust-proxmox-0.11+websocket-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-pxar-0.8+default-dev,
|
||||
librust-pxar-0.8+tokio-io-dev,
|
||||
librust-pxar-0.9+default-dev,
|
||||
librust-pxar-0.9+tokio-io-dev,
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-7+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
@ -141,6 +141,7 @@ Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
libjs-mathjax,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
|
1
debian/control.in
vendored
1
debian/control.in
vendored
@ -38,6 +38,7 @@ Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
libjs-mathjax,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
|
2
debian/copyright
vendored
2
debian/copyright
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019 - 2021 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
|
3
debian/pmt.bc
vendored
Normal file
3
debian/pmt.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# pmt bash completion
|
||||
|
||||
complete -C 'pmt bashcomplete' pmt
|
27
debian/postinst
vendored
27
debian/postinst
vendored
@ -6,13 +6,21 @@ set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# need to have user backup in the tapoe group
|
||||
# need to have user backup in the tape group
|
||||
usermod -a -G tape backup
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=try-reload-or-restart
|
||||
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
||||
# there was an issue with reloading and systemd being confused in older daemon versions
|
||||
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
||||
# FIXME: remove with PBS 2.1
|
||||
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
||||
_dh_action=try-restart
|
||||
else
|
||||
_dh_action=try-reload-or-restart
|
||||
fi
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
@ -40,11 +48,16 @@ case "$1" in
|
||||
/etc/proxmox-backup/remote.cfg || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||
# FIXME: remove with 2.0
|
||||
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
||||
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
||||
chmod 0750 /var/lib/proxmox-backup/tape || true
|
||||
fi
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
|
||||
|
2
debian/proxmox-backup-docs.links
vendored
2
debian/proxmox-backup-docs.links
vendored
@ -1,3 +1,5 @@
|
||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
||||
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
||||
|
2
debian/proxmox-backup-server.bash-completion
vendored
2
debian/proxmox-backup-server.bash-completion
vendored
@ -1,2 +1,4 @@
|
||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||
debian/proxmox-tape.bc proxmox-tape
|
||||
debian/pmtx.bc pmtx
|
||||
debian/pmt.bc pmt
|
||||
|
15
debian/proxmox-backup-server.install
vendored
15
debian/proxmox-backup-server.install
vendored
@ -11,12 +11,27 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||
usr/sbin/proxmox-backup-manager
|
||||
usr/bin/pmtx
|
||||
usr/bin/pmt
|
||||
usr/bin/proxmox-tape
|
||||
usr/share/javascript/proxmox-backup/index.hbs
|
||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||
usr/share/javascript/proxmox-backup/images
|
||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||
usr/share/man/man1/proxmox-backup-manager.1
|
||||
usr/share/man/man1/proxmox-backup-proxy.1
|
||||
usr/share/man/man1/proxmox-tape.1
|
||||
usr/share/man/man1/pmtx.1
|
||||
usr/share/man/man1/pmt.1
|
||||
usr/share/man/man5/acl.cfg.5
|
||||
usr/share/man/man5/datastore.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||
usr/share/zsh/vendor-completions/_pmtx
|
||||
usr/share/zsh/vendor-completions/_pmt
|
||||
|
3
debian/proxmox-tape.bc
vendored
Normal file
3
debian/proxmox-tape.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# proxmox-tape bash completion
|
||||
|
||||
complete -C 'proxmox-tape bashcomplete' proxmox-tape
|
@ -5,6 +5,7 @@ LIBDIR = $(PREFIX)/lib
|
||||
LIBEXECDIR = $(LIBDIR)
|
||||
DATAROOTDIR = $(PREFIX)/share
|
||||
MAN1DIR = $(PREFIX)/share/man/man1
|
||||
MAN5DIR = $(PREFIX)/share/man/man5
|
||||
DOCDIR = $(PREFIX)/share/doc/proxmox-backup
|
||||
JSDIR = $(DATAROOTDIR)/javascript/proxmox-backup
|
||||
SYSCONFDIR = /etc
|
||||
|
126
docs/Makefile
126
docs/Makefile
@ -1,21 +1,43 @@
|
||||
include ../defines.mk
|
||||
|
||||
GENERATED_SYNOPSIS := \
|
||||
proxmox-tape/synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
backup-protocol-api.rst \
|
||||
reader-protocol-api.rst
|
||||
pmt/synopsis.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/user/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/verification/config.rst \
|
||||
config/acl/roles.rst \
|
||||
config/datastore/config.rst
|
||||
|
||||
MANUAL_PAGES := \
|
||||
MAN1_PAGES := \
|
||||
pxar.1 \
|
||||
pmtx.1 \
|
||||
pmt.1 \
|
||||
proxmox-tape.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-manager.1
|
||||
|
||||
MAN5_PAGES := \
|
||||
media-pool.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
acl.cfg.5 \
|
||||
user.cfg.5 \
|
||||
remote.cfg.5 \
|
||||
sync.cfg.5 \
|
||||
verification.cfg.5 \
|
||||
datastore.cfg.5
|
||||
|
||||
PRUNE_SIMULATOR_FILES := \
|
||||
prune-simulator/index.html \
|
||||
prune-simulator/documentation.html \
|
||||
@ -35,6 +57,10 @@ LTO_BARCODE_FILES := \
|
||||
lto-barcode/label-setup.js \
|
||||
lto-barcode/lto-barcode.js
|
||||
|
||||
API_VIEWER_SOURCES= \
|
||||
api-viewer/index.html \
|
||||
api-viewer/apidoc.js
|
||||
|
||||
# Sphinx documentation setup
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
@ -51,15 +77,7 @@ endif
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
|
||||
all: ${MANUAL_PAGES}
|
||||
|
||||
# Extract backup protocol docs
|
||||
backup-protocol-api.rst: ${COMPILEDIR}/dump-backup-api
|
||||
${COMPILEDIR}/dump-backup-api >$@
|
||||
|
||||
# Extract reader protocol docs
|
||||
reader-protocol-api.rst: ${COMPILEDIR}/dump-reader-api
|
||||
${COMPILEDIR}/dump-backup-api >$@
|
||||
all: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||
|
||||
# Build manual pages using rst2man
|
||||
|
||||
@ -77,6 +95,72 @@ pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
|
||||
pmt/synopsis.rst: ${COMPILEDIR}/pmt
|
||||
${COMPILEDIR}/pmt printdoc > pmt/synopsis.rst
|
||||
|
||||
pmt.1: pmt/man1.rst pmt/description.rst pmt/options.rst pmt/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/datastore/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen datastore.cfg >$@
|
||||
|
||||
datastore.cfg.5: config/datastore/man5.rst config/datastore/config.rst config/datastore/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/user/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen user.cfg >$@
|
||||
|
||||
user.cfg.5: config/user/man5.rst config/user/config.rst config/user/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/remote/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen remote.cfg >$@
|
||||
|
||||
remote.cfg.5: config/remote/man5.rst config/remote/config.rst config/remote/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/sync/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen sync.cfg >$@
|
||||
|
||||
sync.cfg.5: config/sync/man5.rst config/sync/config.rst config/sync/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/verification/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen verification.cfg >$@
|
||||
|
||||
verification.cfg.5: config/verification/man5.rst config/verification/config.rst config/verification/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/acl/roles.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen "config::acl::Role" >$@
|
||||
|
||||
acl.cfg.5: config/acl/man5.rst config/acl/roles.rst config/acl/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/media-pool/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen media-pool.cfg >$@
|
||||
|
||||
media-pool.cfg.5: config/media-pool/man5.rst config/media-pool/config.rst config/media-pool/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/tape/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen tape.cfg >$@
|
||||
|
||||
tape.cfg.5: config/tape/man5.rst config/tape/config.rst config/tape/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/tape-job/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen tape-job.cfg >$@
|
||||
|
||||
tape-job.cfg.5: config/tape-job/man5.rst config/tape-job/config.rst config/tape-job/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
proxmox-tape/synopsis.rst: ${COMPILEDIR}/proxmox-tape
|
||||
${COMPILEDIR}/proxmox-tape printdoc > proxmox-tape/synopsis.rst
|
||||
|
||||
proxmox-tape.1: proxmox-tape/man1.rst proxmox-tape/description.rst proxmox-tape/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
||||
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
||||
|
||||
@ -101,14 +185,22 @@ onlinehelpinfo:
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen apidata.js >$@
|
||||
|
||||
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
|
||||
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
||||
install -dm 0755 $(BUILDDIR)/html/api-viewer
|
||||
install -m 0644 ${API_VIEWER_SOURCES} $(BUILDDIR)/html/api-viewer
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
@ -127,12 +219,14 @@ epub3: ${GENERATED_SYNOPSIS}
|
||||
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
|
||||
|
||||
clean:
|
||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS}
|
||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
||||
|
||||
|
||||
install_manual_pages: ${MANUAL_PAGES}
|
||||
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||
install -dm755 $(DESTDIR)$(MAN1DIR)
|
||||
for i in ${MANUAL_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
||||
for i in ${MAN1_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
||||
install -dm755 $(DESTDIR)$(MAN5DIR)
|
||||
for i in ${MAN5_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN5DIR)/ ; done
|
||||
|
||||
install_html: html
|
||||
install -dm755 $(DESTDIR)$(DOCDIR)
|
||||
|
@ -90,7 +90,18 @@ class ReflabelMapper(Builder):
|
||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||
filename = self.env.doc2path(docname)
|
||||
filename_html = re.sub('.rst', '.html', filename)
|
||||
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
|
||||
|
||||
# node['ids'][0] contains a normalized version of the
|
||||
# headline. If the ref and headline are the same
|
||||
# (normalized) sphinx will set the node['ids'][1] to a
|
||||
# generic id in the format `idX` where X is numeric. If the
|
||||
# ref and headline are not the same, the ref name will be
|
||||
# stored in node['ids'][1]
|
||||
if re.match('^id[0-9]*$', node['ids'][1]):
|
||||
labelid = node['ids'][0]
|
||||
else:
|
||||
labelid = node['ids'][1]
|
||||
|
||||
title = cast(nodes.title, node[0])
|
||||
logger.info('traversing section {}'.format(title.astext()))
|
||||
ref_name = getattr(title, 'rawsource', title.astext())
|
||||
|
511
docs/api-viewer/PBSAPI.js
Normal file
511
docs/api-viewer/PBSAPI.js
Normal file
@ -0,0 +1,511 @@
|
||||
// avoid errors when running without development tools
|
||||
if (!Ext.isDefined(Ext.global.console)) {
|
||||
var console = {
|
||||
dir: function() {},
|
||||
log: function() {}
|
||||
};
|
||||
}
|
||||
|
||||
Ext.onReady(function() {
|
||||
|
||||
Ext.define('pve-param-schema', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'name', 'type', 'typetext', 'description', 'verbose_description',
|
||||
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
|
||||
'pattern', 'title', 'requires', 'format', 'default',
|
||||
'disallow', 'extends', 'links',
|
||||
{
|
||||
name: 'optional',
|
||||
type: 'boolean'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var store = Ext.define('pve-updated-treestore', {
|
||||
extend: 'Ext.data.TreeStore',
|
||||
model: Ext.define('pve-api-doc', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'path', 'info', 'text',
|
||||
]
|
||||
}),
|
||||
proxy: {
|
||||
type: 'memory',
|
||||
data: pbsapi
|
||||
},
|
||||
sorters: [{
|
||||
property: 'leaf',
|
||||
direction: 'ASC'
|
||||
}, {
|
||||
property: 'text',
|
||||
direction: 'ASC'
|
||||
}],
|
||||
filterer: 'bottomup',
|
||||
doFilter: function(node) {
|
||||
this.filterNodes(node, this.getFilters().getFilterFn(), true);
|
||||
},
|
||||
|
||||
filterNodes: function(node, filterFn, parentVisible) {
|
||||
var me = this,
|
||||
bottomUpFiltering = me.filterer === 'bottomup',
|
||||
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
|
||||
childNodes = node.childNodes,
|
||||
len = childNodes && childNodes.length, i, matchingChildren;
|
||||
|
||||
if (len) {
|
||||
for (i = 0; i < len; ++i) {
|
||||
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
|
||||
}
|
||||
if (bottomUpFiltering) {
|
||||
match = matchingChildren || match;
|
||||
}
|
||||
}
|
||||
|
||||
node.set("visible", match, me._silentOptions);
|
||||
return match;
|
||||
},
|
||||
|
||||
}).create();
|
||||
|
||||
var render_description = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
value = pdef.verbose_description || value;
|
||||
|
||||
// TODO: try to render asciidoc correctly
|
||||
|
||||
metaData.style = 'white-space:pre-wrap;'
|
||||
|
||||
return Ext.htmlEncode(value);
|
||||
};
|
||||
|
||||
var render_type = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
||||
};
|
||||
|
||||
var render_format = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
metaData.style = 'white-space:normal;'
|
||||
|
||||
if (pdef.typetext)
|
||||
return Ext.htmlEncode(pdef.typetext);
|
||||
|
||||
if (pdef['enum'])
|
||||
return pdef['enum'].join(' | ');
|
||||
|
||||
if (pdef.format)
|
||||
return pdef.format;
|
||||
|
||||
if (pdef.pattern)
|
||||
return Ext.htmlEncode(pdef.pattern);
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
var real_path = function(path) {
|
||||
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
|
||||
};
|
||||
|
||||
var permission_text = function(permission) {
|
||||
let permhtml = "";
|
||||
|
||||
if (permission.user) {
|
||||
if (!permission.description) {
|
||||
if (permission.user === 'world') {
|
||||
permhtml += "Accessible without any authentication.";
|
||||
} else if (permission.user === 'all') {
|
||||
permhtml += "Accessible by all authenticated users.";
|
||||
} else {
|
||||
permhtml += 'Onyl accessible by user "' +
|
||||
permission.user + '"';
|
||||
}
|
||||
}
|
||||
} else if (permission.check) {
|
||||
permhtml += "<pre>Check: " +
|
||||
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
|
||||
} else if (permission.userParam) {
|
||||
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
|
||||
} else if (permission.or) {
|
||||
permhtml += "<div>Or<div style='padding-left: 10px;'>";
|
||||
Ext.Array.each(permission.or, function(sub_permission) {
|
||||
permhtml += permission_text(sub_permission);
|
||||
})
|
||||
permhtml += "</div></div>";
|
||||
} else if (permission.and) {
|
||||
permhtml += "<div>And<div style='padding-left: 10px;'>";
|
||||
Ext.Array.each(permission.and, function(sub_permission) {
|
||||
permhtml += permission_text(sub_permission);
|
||||
})
|
||||
permhtml += "</div></div>";
|
||||
} else {
|
||||
//console.log(permission);
|
||||
permhtml += "Unknown systax!";
|
||||
}
|
||||
|
||||
return permhtml;
|
||||
};
|
||||
|
||||
var render_docu = function(data) {
|
||||
var md = data.info;
|
||||
|
||||
// console.dir(data);
|
||||
|
||||
var items = [];
|
||||
|
||||
var clicmdhash = {
|
||||
GET: 'get',
|
||||
POST: 'create',
|
||||
PUT: 'set',
|
||||
DELETE: 'delete'
|
||||
};
|
||||
|
||||
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
|
||||
var info = md[method];
|
||||
if (info) {
|
||||
|
||||
var usage = "";
|
||||
|
||||
usage += "<table><tr><td>HTTP: </td><td>"
|
||||
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
|
||||
|
||||
var sections = [
|
||||
{
|
||||
title: 'Description',
|
||||
html: Ext.htmlEncode(info.description),
|
||||
bodyPadding: 10
|
||||
},
|
||||
{
|
||||
title: 'Usage',
|
||||
html: usage,
|
||||
bodyPadding: 10
|
||||
}
|
||||
];
|
||||
|
||||
if (info.parameters && info.parameters.properties) {
|
||||
|
||||
var pstore = Ext.create('Ext.data.Store', {
|
||||
model: 'pve-param-schema',
|
||||
proxy: {
|
||||
type: 'memory'
|
||||
},
|
||||
groupField: 'optional',
|
||||
sorters: [
|
||||
{
|
||||
property: 'name',
|
||||
direction: 'ASC'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
Ext.Object.each(info.parameters.properties, function(name, pdef) {
|
||||
pdef.name = name;
|
||||
pstore.add(pdef);
|
||||
});
|
||||
|
||||
pstore.sort();
|
||||
|
||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||
enableGroupingMenu: false,
|
||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
|
||||
});
|
||||
|
||||
sections.push({
|
||||
xtype: 'gridpanel',
|
||||
title: 'Parameters',
|
||||
features: [groupingFeature],
|
||||
store: pstore,
|
||||
viewConfig: {
|
||||
trackOver: false,
|
||||
stripeRows: true
|
||||
},
|
||||
columns: [
|
||||
{
|
||||
header: 'Name',
|
||||
dataIndex: 'name',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Type',
|
||||
dataIndex: 'type',
|
||||
renderer: render_type,
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Default',
|
||||
dataIndex: 'default',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Format',
|
||||
dataIndex: 'type',
|
||||
renderer: render_format,
|
||||
flex: 2
|
||||
},
|
||||
{
|
||||
header: 'Description',
|
||||
dataIndex: 'description',
|
||||
renderer: render_description,
|
||||
flex: 6
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
if (info.returns) {
|
||||
|
||||
var retinf = info.returns;
|
||||
var rtype = retinf.type;
|
||||
if (!rtype && retinf.items)
|
||||
rtype = 'array';
|
||||
if (!rtype)
|
||||
rtype = 'object';
|
||||
|
||||
var rpstore = Ext.create('Ext.data.Store', {
|
||||
model: 'pve-param-schema',
|
||||
proxy: {
|
||||
type: 'memory'
|
||||
},
|
||||
groupField: 'optional',
|
||||
sorters: [
|
||||
{
|
||||
property: 'name',
|
||||
direction: 'ASC'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var properties;
|
||||
if (rtype === 'array' && retinf.items.properties) {
|
||||
properties = retinf.items.properties;
|
||||
}
|
||||
|
||||
if (rtype === 'object' && retinf.properties) {
|
||||
properties = retinf.properties;
|
||||
}
|
||||
|
||||
Ext.Object.each(properties, function(name, pdef) {
|
||||
pdef.name = name;
|
||||
rpstore.add(pdef);
|
||||
});
|
||||
|
||||
rpstore.sort();
|
||||
|
||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||
enableGroupingMenu: false,
|
||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
|
||||
});
|
||||
var returnhtml;
|
||||
if (retinf.items) {
|
||||
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
|
||||
}
|
||||
|
||||
if (retinf.properties) {
|
||||
returnhtml = returnhtml || '';
|
||||
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
|
||||
}
|
||||
|
||||
var rawSection = Ext.create('Ext.panel.Panel', {
|
||||
bodyPadding: '0px 10px 10px 10px',
|
||||
html: returnhtml,
|
||||
hidden: true
|
||||
});
|
||||
|
||||
sections.push({
|
||||
xtype: 'gridpanel',
|
||||
title: 'Returns: ' + rtype,
|
||||
features: [groupingFeature],
|
||||
store: rpstore,
|
||||
viewConfig: {
|
||||
trackOver: false,
|
||||
stripeRows: true
|
||||
},
|
||||
columns: [
|
||||
{
|
||||
header: 'Name',
|
||||
dataIndex: 'name',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Type',
|
||||
dataIndex: 'type',
|
||||
renderer: render_type,
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Default',
|
||||
dataIndex: 'default',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Format',
|
||||
dataIndex: 'type',
|
||||
renderer: render_format,
|
||||
flex: 2
|
||||
},
|
||||
{
|
||||
header: 'Description',
|
||||
dataIndex: 'description',
|
||||
renderer: render_description,
|
||||
flex: 6
|
||||
}
|
||||
],
|
||||
bbar: [
|
||||
{
|
||||
xtype: 'button',
|
||||
text: 'Show RAW',
|
||||
handler: function(btn) {
|
||||
rawSection.setVisible(!rawSection.isVisible());
|
||||
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
|
||||
}}
|
||||
]
|
||||
});
|
||||
|
||||
sections.push(rawSection);
|
||||
|
||||
|
||||
}
|
||||
|
||||
if (!data.path.match(/\/_upgrade_/)) {
|
||||
var permhtml = '';
|
||||
|
||||
if (!info.permissions) {
|
||||
permhtml = "Root only.";
|
||||
} else {
|
||||
if (info.permissions.description) {
|
||||
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
|
||||
Ext.htmlEncode(info.permissions.description) + "</div>";
|
||||
}
|
||||
permhtml += permission_text(info.permissions);
|
||||
}
|
||||
|
||||
// we do not have this information for PBS api
|
||||
//if (!info.allowtoken) {
|
||||
// permhtml += "<br />This API endpoint is not available for API tokens."
|
||||
//}
|
||||
|
||||
sections.push({
|
||||
title: 'Required permissions',
|
||||
bodyPadding: 10,
|
||||
html: permhtml
|
||||
});
|
||||
}
|
||||
|
||||
items.push({
|
||||
title: method,
|
||||
autoScroll: true,
|
||||
defaults: {
|
||||
border: false
|
||||
},
|
||||
items: sections
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
var ct = Ext.getCmp('docview');
|
||||
ct.setTitle("Path: " + real_path(data.path));
|
||||
ct.removeAll(true);
|
||||
ct.add(items);
|
||||
ct.setActiveTab(0);
|
||||
};
|
||||
|
||||
Ext.define('Ext.form.SearchField', {
|
||||
extend: 'Ext.form.field.Text',
|
||||
alias: 'widget.searchfield',
|
||||
|
||||
emptyText: 'Search...',
|
||||
|
||||
flex: 1,
|
||||
|
||||
inputType: 'search',
|
||||
listeners: {
|
||||
'change': function(){
|
||||
|
||||
var value = this.getValue();
|
||||
if (!Ext.isEmpty(value)) {
|
||||
store.filter({
|
||||
property: 'path',
|
||||
value: value,
|
||||
anyMatch: true
|
||||
});
|
||||
} else {
|
||||
store.clearFilter();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
var tree = Ext.create('Ext.tree.Panel', {
|
||||
title: 'Resource Tree',
|
||||
tbar: [
|
||||
{
|
||||
xtype: 'searchfield',
|
||||
}
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'expand',
|
||||
tooltip: 'Expand all',
|
||||
tooltipType: 'title',
|
||||
callback: (tree) => tree.expandAll(),
|
||||
},
|
||||
{
|
||||
type: 'collapse',
|
||||
tooltip: 'Collapse all',
|
||||
tooltipType: 'title',
|
||||
callback: (tree) => tree.collapseAll(),
|
||||
},
|
||||
],
|
||||
store: store,
|
||||
width: 200,
|
||||
region: 'west',
|
||||
split: true,
|
||||
margins: '5 0 5 5',
|
||||
rootVisible: false,
|
||||
listeners: {
|
||||
selectionchange: function(v, selections) {
|
||||
if (!selections[0])
|
||||
return;
|
||||
var rec = selections[0];
|
||||
render_docu(rec.data);
|
||||
location.hash = '#' + rec.data.path;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ext.create('Ext.container.Viewport', {
|
||||
layout: 'border',
|
||||
renderTo: Ext.getBody(),
|
||||
items: [
|
||||
tree,
|
||||
{
|
||||
xtype: 'tabpanel',
|
||||
title: 'Documentation',
|
||||
id: 'docview',
|
||||
region: 'center',
|
||||
margins: '5 5 5 0',
|
||||
layout: 'fit',
|
||||
items: []
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var deepLink = function() {
|
||||
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
|
||||
var endpoint = store.findNode('path', path);
|
||||
|
||||
if (endpoint) {
|
||||
tree.getSelectionModel().select(endpoint);
|
||||
tree.expandPath(endpoint.getPath());
|
||||
render_docu(endpoint.data);
|
||||
}
|
||||
}
|
||||
window.onhashchange = deepLink;
|
||||
|
||||
deepLink();
|
||||
|
||||
});
|
13
docs/api-viewer/index.html
Normal file
13
docs/api-viewer/index.html
Normal file
@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||
<title>Proxmox Backup Server API Documentation</title>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||
<script type="text/javascript" src="apidoc.js"></script>
|
||||
</head>
|
||||
<body></body>
|
||||
</html>
|
@ -60,33 +60,10 @@ Environment Variables
|
||||
Output Format
|
||||
-------------
|
||||
|
||||
Most commands support the ``--output-format`` parameter. It accepts
|
||||
the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
.. include:: output-format.rst
|
||||
|
||||
|
||||
Please use the following environment variables to modify output behavior:
|
||||
|
||||
``PROXMOX_OUTPUT_FORMAT``
|
||||
Defines the default output format.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_BORDER``
|
||||
If set (to any value), do not render table borders.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format if you need to process the output.
|
||||
|
||||
|
||||
.. _creating-backups:
|
||||
.. _client_creating_backups:
|
||||
|
||||
Creating Backups
|
||||
----------------
|
||||
@ -246,7 +223,7 @@ Restoring this backup will result in:
|
||||
. .. file2
|
||||
|
||||
|
||||
.. _encryption:
|
||||
.. _client_encryption:
|
||||
|
||||
Encryption
|
||||
----------
|
||||
@ -483,16 +460,15 @@ subdirectory and add the corresponding pattern to the list for subsequent restor
|
||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||
host. This will scan the whole archive.
|
||||
|
||||
With ``restore /target/path`` you can restore the sub-archive given by the current
|
||||
working directory to the local target path ``/target/path`` on your host.
|
||||
By additionally passing a glob pattern with ``--pattern <glob>``, the restore is
|
||||
further limited to files matching the pattern.
|
||||
For example:
|
||||
The ``restore`` command can be used to restore all the files contained within
|
||||
the backup archive. This is most helpful when paired with the ``--pattern
|
||||
<glob>`` option, as it allows you to restore all files matching a specific
|
||||
pattern. For example, if you wanted to restore configuration files
|
||||
located in ``/etc``, you could do the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pxar:/ > cd /etc/
|
||||
pxar:/etc/ > restore /target/ --pattern **/*.conf
|
||||
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||
...
|
||||
|
||||
The above will scan trough all the directories below ``/etc`` and restore all
|
||||
@ -657,10 +633,10 @@ shows the list of existing snapshots and what actions prune would take.
|
||||
|
||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`garbage-collection`.
|
||||
space you need to perform :ref:`client_garbage-collection`.
|
||||
|
||||
|
||||
.. _garbage-collection:
|
||||
.. _client_garbage-collection:
|
||||
|
||||
Garbage Collection
|
||||
------------------
|
||||
@ -721,32 +697,34 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client benchmark
|
||||
Uploaded 656 chunks in 5 seconds.
|
||||
Time per request: 7659 microseconds.
|
||||
TLS speed: 547.60 MB/s
|
||||
SHA256 speed: 585.76 MB/s
|
||||
Compression speed: 1923.96 MB/s
|
||||
Decompress speed: 7885.24 MB/s
|
||||
AES256/GCM speed: 3974.03 MB/s
|
||||
Uploaded 1517 chunks in 5 seconds.
|
||||
Time per request: 3309 microseconds.
|
||||
TLS speed: 1267.41 MB/s
|
||||
SHA256 speed: 2066.73 MB/s
|
||||
Compression speed: 775.11 MB/s
|
||||
Decompress speed: 1233.35 MB/s
|
||||
AES256/GCM speed: 3688.27 MB/s
|
||||
Verify speed: 783.43 MB/s
|
||||
┌───────────────────────────────────┬─────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞═══════════════════════════════════╪═════════════════════╡
|
||||
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||
│ TLS (maximal backup upload speed) │ 1267.41 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||
│ SHA256 checksum computation speed │ 2066.73 MB/s (102%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||
│ ZStd level 1 compression speed │ 775.11 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||
│ ZStd level 1 decompression speed │ 1233.35 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||
│ Chunk verification speed │ 783.43 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3688.27 MB/s (101%) │
|
||||
└───────────────────────────────────┴─────────────────────┘
|
||||
|
||||
|
||||
.. note:: The percentages given in the output table correspond to a
|
||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||
local host, so there is no network involved.
|
||||
|
||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||
rather than the default table format.
|
||||
|
||||
|
||||
|
@ -1,19 +1,140 @@
|
||||
Backup Protocol
|
||||
===============
|
||||
|
||||
.. todo:: add introduction to HTTP2 based backup protocols
|
||||
Proxmox Backup Server uses a REST based API. While the management
|
||||
interface use normal HTTP, the actual backup and restore interface use
|
||||
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
||||
standards, so the following section assumes that you are familiar on
|
||||
how to use them.
|
||||
|
||||
|
||||
Backup Protocol API
|
||||
-------------------
|
||||
|
||||
.. todo:: describe backup writer protocol
|
||||
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
||||
be upgraded to a HTTP/2 connection using
|
||||
``proxmox-backup-protocol-v1`` as protocol name::
|
||||
|
||||
.. include:: backup-protocol-api.rst
|
||||
GET /api2/json/backup HTTP/1.1
|
||||
UPGRADE: proxmox-backup-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
|
||||
The backup protocol allows you to upload three different kind of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
|
||||
The following section gives a short introduction how to upload such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
||||
details about available REST commands.
|
||||
|
||||
|
||||
Reader Protocol API
|
||||
-------------------
|
||||
Upload Blobs
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. todo:: describe backup reader protocol
|
||||
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
.. include:: reader-protocol-api.rst
|
||||
The file name needs to end with ``.blob``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
|
||||
Upload Chunks
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Chunks belong to an index, so you first need to open an index (see
|
||||
below). After that, you can upload chunks using ``POST /fixed_chunk``
|
||||
and ``POST /dynamic_chunk``. The HTTP body contains the chunk data
|
||||
encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
|
||||
Upload Fixed Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Fixed indexes are use to store VM image data. The VM image is split
|
||||
into equally sized chunks, which are uploaded individually. The index
|
||||
file simply contains a list to chunk digests.
|
||||
|
||||
You create a fixed index with ``POST /fixed_index``. Then upload
|
||||
chunks with ``POST /fixed_chunk``, and append them to the index with
|
||||
``PUT /fixed_index``. When finished, you need to close the index using
|
||||
``POST /fixed_close``.
|
||||
|
||||
The file name needs to end with ``.fidx``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
|
||||
Upload Dynamic Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Dynamic indexes are use to store file archive data. The archive data
|
||||
is split into dynamically sized chunks, which are uploaded
|
||||
individually. The index file simply contains a list to chunk digests
|
||||
and offsets.
|
||||
|
||||
You create a dynamic sized index with ``POST /dynamic_index``. Then
|
||||
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
||||
``PUT /dynamic_index``. When finished, you need to close the index using
|
||||
``POST /dynamic_close``.
|
||||
|
||||
The file name needs to end with ``.didx``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
Finish Backup
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Once you have uploaded all data, you need to call ``POST
|
||||
/finish``. This commits all data and ends the backup protocol.
|
||||
|
||||
|
||||
Restore/Reader Protocol API
|
||||
---------------------------
|
||||
|
||||
To start a new reader, the API call ``GET /api2/json/reader`` needs to
|
||||
be upgraded to a HTTP/2 connection using
|
||||
``proxmox-backup-reader-protocol-v1`` as protocol name::
|
||||
|
||||
GET /api2/json/reader HTTP/1.1
|
||||
UPGRADE: proxmox-backup-reader-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
|
||||
The reader protocol allows you to download three different kind of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
|
||||
The following section gives a short introduction how to download such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
||||
available REST commands.
|
||||
|
||||
|
||||
Download Blobs
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Downloading blobs is done using ``GET /download``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||
|
||||
|
||||
Download Chunks
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
|
||||
Download Index Files
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading index files is done using ``GET /download``. The HTTP body
|
||||
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
||||
or :ref:`Dynamic Index <dynamic-index-format>`.
|
||||
|
@ -1,5 +1,4 @@
|
||||
|
||||
.. _calendar-events:
|
||||
.. _calendar-event-scheduling:
|
||||
|
||||
Calendar Events
|
||||
===============
|
||||
|
18
docs/conf.py
18
docs/conf.py
@ -74,7 +74,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019-2020, Proxmox Server Solutions GmbH'
|
||||
copyright = '2019-2021, Proxmox Server Solutions GmbH'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -107,10 +107,8 @@ today_fmt = '%A, %d %B %Y'
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = [
|
||||
'_build', 'Thumbs.db', '.DS_Store',
|
||||
'proxmox-backup-client/man1.rst',
|
||||
'proxmox-backup-manager/man1.rst',
|
||||
'proxmox-backup-proxy/man1.rst',
|
||||
'pxar/man1.rst',
|
||||
'*/man1.rst',
|
||||
'config/*/man5.rst',
|
||||
'epilog.rst',
|
||||
'pbs-copyright.rst',
|
||||
'local-zfs.rst'
|
||||
@ -171,6 +169,7 @@ html_theme_options = {
|
||||
'extra_nav_links': {
|
||||
'Proxmox Homepage': 'https://proxmox.com',
|
||||
'PDF': 'proxmox-backup.pdf',
|
||||
'API Viewer' : 'api-viewer/index.html',
|
||||
'Prune Simulator' : 'prune-simulator/index.html',
|
||||
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
||||
},
|
||||
@ -246,10 +245,8 @@ html_js_files = [
|
||||
#
|
||||
# html_last_updated_fmt = None
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
# We need to disable smatquotes, else Option Lists do not display long options
|
||||
smartquotes = False
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
@ -467,3 +464,6 @@ epub_exclude_files = ['search.html']
|
||||
# If false, no index is generated.
|
||||
#
|
||||
# epub_use_index = True
|
||||
|
||||
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||
|
22
docs/config/acl/format.rst
Normal file
22
docs/config/acl/format.rst
Normal file
@ -0,0 +1,22 @@
|
||||
This file contains the access control list for the Proxmox Backup
|
||||
Server API.
|
||||
|
||||
Each line starts with ``acl:``, followed by 4 additional values
|
||||
separated by collon.
|
||||
|
||||
:propagate: Propagate permissions down the hierachrchy
|
||||
|
||||
:path: The object path
|
||||
|
||||
:User/Token: List of users and token
|
||||
|
||||
:Role: List of assigned roles
|
||||
|
||||
Here is an example list::
|
||||
|
||||
acl:1:/:root@pam!test:Admin
|
||||
acl:1:/datastore/store1:user1@pbs:DatastoreAdmin
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager acl`` command to manipulate
|
||||
this file.
|
35
docs/config/acl/man5.rst
Normal file
35
docs/config/acl/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
acl.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Access Control Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the access control configuration for the API.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Roles
|
||||
=====
|
||||
|
||||
The following roles exist:
|
||||
|
||||
.. include:: roles.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
18
docs/config/datastore/format.rst
Normal file
18
docs/config/datastore/format.rst
Normal file
@ -0,0 +1,18 @@
|
||||
The file contains a list of datastore configuration sections. Each
|
||||
section starts with a header ``datastore: <name>``, followed by the
|
||||
datastore configuration options.
|
||||
|
||||
::
|
||||
|
||||
datastore: <name1>
|
||||
path <path1>
|
||||
<option1> <value1>
|
||||
...
|
||||
|
||||
datastore: <name2>
|
||||
path <path2>
|
||||
...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager datastore`` command to manipulate
|
||||
this file.
|
33
docs/config/datastore/man5.rst
Normal file
33
docs/config/datastore/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
datastore.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Datastore Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/datastore.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the Datastore configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
13
docs/config/media-pool/format.rst
Normal file
13
docs/config/media-pool/format.rst
Normal file
@ -0,0 +1,13 @@
|
||||
Each entry starts with a header ``pool: <name>``, followed by the
|
||||
media pool configuration options.
|
||||
|
||||
::
|
||||
|
||||
pool: company1
|
||||
allocation always
|
||||
retention overwrite
|
||||
|
||||
pool: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-tape pool`` command to manipulate this file.
|
35
docs/config/media-pool/man5.rst
Normal file
35
docs/config/media-pool/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
media-pool.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Media Pool Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/media-pool.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the medila pool configuration
|
||||
for tape backups.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
17
docs/config/remote/format.rst
Normal file
17
docs/config/remote/format.rst
Normal file
@ -0,0 +1,17 @@
|
||||
This file contains information used to access remote servers.
|
||||
|
||||
Each entry starts with a header ``remote: <name>``, followed by the
|
||||
remote configuration options.
|
||||
|
||||
::
|
||||
|
||||
remote: server1
|
||||
host server1.local
|
||||
auth-id sync@pbs
|
||||
...
|
||||
|
||||
remote: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager remote`` command to manipulate
|
||||
this file.
|
35
docs/config/remote/man5.rst
Normal file
35
docs/config/remote/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
remote.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Remote Server Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/remote.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains information about remote servers,
|
||||
usable for synchronization jobs.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
15
docs/config/sync/format.rst
Normal file
15
docs/config/sync/format.rst
Normal file
@ -0,0 +1,15 @@
|
||||
Each entry starts with a header ``sync: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
sync: job1
|
||||
store store1
|
||||
remote-store store1
|
||||
remote lina
|
||||
|
||||
sync: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
|
||||
this file.
|
35
docs/config/sync/man5.rst
Normal file
35
docs/config/sync/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
sync.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Synchronization Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains the synchronization job
|
||||
configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
16
docs/config/tape-job/format.rst
Normal file
16
docs/config/tape-job/format.rst
Normal file
@ -0,0 +1,16 @@
|
||||
Each entry starts with a header ``backup: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
backup: job1
|
||||
drive hh8
|
||||
pool p4
|
||||
store store3
|
||||
schedule daily
|
||||
|
||||
backup: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-tape backup-job`` command to manipulate
|
||||
this file.
|
34
docs/config/tape-job/man5.rst
Normal file
34
docs/config/tape-job/man5.rst
Normal file
@ -0,0 +1,34 @@
|
||||
==========================
|
||||
tape-job.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Tape Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file ``/etc/proxmox-backup/tape-job.cfg`` is a configuration file for
|
||||
Proxmox Backup Server. It contains the tape job configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
22
docs/config/tape/format.rst
Normal file
22
docs/config/tape/format.rst
Normal file
@ -0,0 +1,22 @@
|
||||
Each drive configuration section starts with a header ``linux: <name>``,
|
||||
followed by the drive configuration options.
|
||||
|
||||
Tape changer configurations starts with ``changer: <name>``,
|
||||
followed by the changer configuration options.
|
||||
|
||||
::
|
||||
|
||||
linux: hh8
|
||||
changer sl3
|
||||
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||
|
||||
changer: sl3
|
||||
export-slots 14,15,16
|
||||
path /dev/tape/by-id/scsi-CJ0JBE0059
|
||||
|
||||
|
||||
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
||||
commands to manipulate this file.
|
||||
|
||||
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
|
||||
for debugging.
|
33
docs/config/tape/man5.rst
Normal file
33
docs/config/tape/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
tape.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Tape Drive and Changer Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/tape.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the tape drive and changer configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
28
docs/config/user/format.rst
Normal file
28
docs/config/user/format.rst
Normal file
@ -0,0 +1,28 @@
|
||||
This file contains the list of API users and API tokens.
|
||||
|
||||
Each user configuration section starts with a header ``user: <name>``,
|
||||
followed by the user configuration options.
|
||||
|
||||
API token configuration starts with a header ``token:
|
||||
<userid!token_name>``, followed by the token configuration. The data
|
||||
used to authenticate tokens is stored in a separate file
|
||||
(``token.shadow``).
|
||||
|
||||
|
||||
::
|
||||
|
||||
user: root@pam
|
||||
comment Superuser
|
||||
email test@example.local
|
||||
...
|
||||
|
||||
token: root@pam!token1
|
||||
comment API test token
|
||||
enable true
|
||||
expire 0
|
||||
|
||||
user: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager user`` command to manipulate
|
||||
this file.
|
33
docs/config/user/man5.rst
Normal file
33
docs/config/user/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
user.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
User Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the user configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
16
docs/config/verification/format.rst
Normal file
16
docs/config/verification/format.rst
Normal file
@ -0,0 +1,16 @@
|
||||
Each entry starts with a header ``verification: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
verification: verify-store2
|
||||
ignore-verified true
|
||||
outdated-after 7
|
||||
schedule daily
|
||||
store store2
|
||||
|
||||
verification: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager verify-job`` command to manipulate
|
||||
this file.
|
35
docs/config/verification/man5.rst
Normal file
35
docs/config/verification/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
verification.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Verification Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains the verification job
|
||||
configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
97
docs/configuration-files.rst
Normal file
97
docs/configuration-files.rst
Normal file
@ -0,0 +1,97 @@
|
||||
Configuration Files
|
||||
===================
|
||||
|
||||
All Proxmox Backup Server configuration files resides inside directory
|
||||
``/etc/proxmox-backup/``.
|
||||
|
||||
|
||||
``acl.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/acl/format.rst
|
||||
|
||||
|
||||
Roles
|
||||
^^^^^
|
||||
|
||||
The following roles exist:
|
||||
|
||||
.. include:: config/acl/roles.rst
|
||||
|
||||
|
||||
``datastore.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/datastore/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/datastore/config.rst
|
||||
|
||||
|
||||
``user.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/user/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/user/config.rst
|
||||
|
||||
|
||||
``remote.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/remote/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/remote/config.rst
|
||||
|
||||
|
||||
``sync.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/sync/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/sync/config.rst
|
||||
|
||||
|
||||
``verification.cfg``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/verification/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/verification/config.rst
|
@ -14,6 +14,10 @@ pre {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
background-color: #FAFAFA;
|
||||
}
|
||||
|
||||
li a.current {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid #000;
|
||||
@ -25,6 +29,23 @@ ul li.toctree-l1 > a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
color: #444;
|
||||
}
|
||||
div.sphinxsidebar ul ul {
|
||||
list-style: circle;
|
||||
}
|
||||
div.sphinxsidebar ul ul ul {
|
||||
list-style: square;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul a code {
|
||||
font-weight: normal;
|
||||
}
|
||||
div.sphinxsidebar ul ul a {
|
||||
border-bottom: 1px dotted #CCC;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form.search {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
@ -6,7 +6,113 @@ File Formats
|
||||
Proxmox File Archive Format (``.pxar``)
|
||||
---------------------------------------
|
||||
|
||||
|
||||
.. graphviz:: pxar-format-overview.dot
|
||||
|
||||
|
||||
|
||||
.. _data-blob-format:
|
||||
|
||||
Data Blob Format (``.blob``)
|
||||
----------------------------
|
||||
|
||||
The data blob format is used to store small binary data. The magic number decides the exact format:
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
|
||||
* - ``[66, 171, 56, 7, 190, 131, 112, 161]``
|
||||
- unencrypted
|
||||
- uncompressed
|
||||
* - ``[49, 185, 88, 66, 111, 182, 163, 127]``
|
||||
- unencrypted
|
||||
- compressed
|
||||
* - ``[123, 103, 133, 190, 34, 45, 76, 240]``
|
||||
- encrypted
|
||||
- uncompressed
|
||||
* - ``[230, 89, 27, 191, 11, 191, 216, 11]``
|
||||
- encrypted
|
||||
- compressed
|
||||
|
||||
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
|
||||
|
||||
Unencrypted blobs use the following format:
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
* - ``CRC32: [u8; 4]``
|
||||
* - ``Data: (max 16MiB)``
|
||||
|
||||
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
|
||||
byte Authenticated Encyryption (AE) tag, followed by the encrypted
|
||||
data:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
* - ``CRC32: [u8; 4]``
|
||||
* - ``ÌV: [u8; 16]``
|
||||
* - ``TAG: [u8; 16]``
|
||||
* - ``Data: (max 16MiB)``
|
||||
|
||||
|
||||
.. _fixed-index-format:
|
||||
|
||||
Fixed Index Format (``.fidx``)
|
||||
-------------------------------
|
||||
|
||||
All numbers are stored as little-endian.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
- ``[47, 127, 65, 237, 145, 253, 15, 205]``
|
||||
* - ``uuid: [u8; 16]``,
|
||||
- Unique ID
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||
* - ``size: u64``,
|
||||
- Image size
|
||||
* - ``chunk_size: u64``,
|
||||
- Chunk size
|
||||
* - ``reserved: [u8; 4016]``,
|
||||
- overall header size is one page (4096 bytes)
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
* - ``digest2: [u8; 32]``
|
||||
- next chunk
|
||||
* - ...
|
||||
- next chunk ...
|
||||
|
||||
|
||||
.. _dynamic-index-format:
|
||||
|
||||
Dynamic Index Format (``.didx``)
|
||||
--------------------------------
|
||||
|
||||
All numbers are stored as little-endian.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
- ``[28, 145, 78, 165, 25, 186, 179, 205]``
|
||||
* - ``uuid: [u8; 16]``,
|
||||
- Unique ID
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
* - ``reserved: [u8; 4032]``,
|
||||
- Overall header size is one page (4096 bytes)
|
||||
* - ``offset1: u64``
|
||||
- End of first chunk
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
* - ``offset2: u64``
|
||||
- End of second chunk
|
||||
* - ``digest2: [u8; 32]``
|
||||
- second chunk digest
|
||||
* - ...
|
||||
- next chunk offset/digest
|
||||
|
@ -129,7 +129,7 @@ top panel to view:
|
||||
* **Content**: Information on the datastore's backup groups and their respective
|
||||
contents
|
||||
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
|
||||
collection <garbage-collection>` operations, and run garbage collection
|
||||
collection <client_garbage-collection>` operations, and run garbage collection
|
||||
manually
|
||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
|
||||
| Version |version| -- |today|
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
@ -33,6 +33,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
pve-integration.rst
|
||||
pxar-tool.rst
|
||||
sysadmin.rst
|
||||
technical-overview.rst
|
||||
faq.rst
|
||||
|
||||
.. raw:: latex
|
||||
@ -44,6 +45,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
:caption: Appendix
|
||||
|
||||
command-syntax.rst
|
||||
configuration-files.rst
|
||||
file-formats.rst
|
||||
backup-protocol.rst
|
||||
calendarevents.rst
|
||||
|
@ -15,7 +15,7 @@ encryption (AE_). Using :term:`Rust` as the implementation language guarantees h
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
Proxmox Backup uses state of the art cryptography for both client-server
|
||||
communication and backup content :ref:`encryption <encryption>`. All
|
||||
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||
client-server communication uses `TLS
|
||||
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
||||
be encrypted on the client-side before sending, making it safer to back up data
|
||||
@ -161,7 +161,7 @@ of the issue and will send a notification once it has been solved.
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
// IBM LTO Ultrium Cartridge Label Specification
|
||||
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
||||
|
||||
let code39_codes = {
|
||||
const code39_codes = {
|
||||
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
||||
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
||||
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
||||
@ -53,10 +53,10 @@ let code39_codes = {
|
||||
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
||||
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
||||
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
||||
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
|
||||
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b'],
|
||||
};
|
||||
|
||||
let colors = [
|
||||
const colors = [
|
||||
'#BB282E',
|
||||
'#FAE54A',
|
||||
'#9AC653',
|
||||
@ -66,25 +66,22 @@ let colors = [
|
||||
'#E27B99',
|
||||
'#67A945',
|
||||
'#F6B855',
|
||||
'#705A81'
|
||||
'#705A81',
|
||||
];
|
||||
|
||||
let lto_label_width = 70;
|
||||
let lto_label_height = 17;
|
||||
const lto_label_width = 70;
|
||||
const lto_label_height = 16.9;
|
||||
|
||||
function foreach_label(page_layout, callback) {
|
||||
|
||||
let count = 0;
|
||||
let row = 0;
|
||||
let height = page_layout.margin_top;
|
||||
|
||||
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
||||
|
||||
let column = 0;
|
||||
let width = page_layout.margin_left;
|
||||
|
||||
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
||||
|
||||
callback(column, row, count, width, height);
|
||||
count += 1;
|
||||
|
||||
@ -97,11 +94,9 @@ function foreach_label(page_layout, callback) {
|
||||
height += page_layout.label_height;
|
||||
height += page_layout.row_spacing;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function compute_max_labels(page_layout) {
|
||||
|
||||
let max_labels = 0;
|
||||
foreach_label(page_layout, function() { max_labels += 1; });
|
||||
return max_labels;
|
||||
@ -110,10 +105,10 @@ function compute_max_labels(page_layout) {
|
||||
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
let svg = "";
|
||||
|
||||
if (label.length != 6) {
|
||||
if (label.length !== 6) {
|
||||
throw "wrong label length";
|
||||
}
|
||||
if (label_type.length != 2) {
|
||||
if (label_type.length !== 2) {
|
||||
throw "wrong label_type length";
|
||||
}
|
||||
|
||||
@ -126,20 +121,22 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
let xpos = pagex + code_width;
|
||||
let height = 12;
|
||||
|
||||
let label_rect = `x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}'`;
|
||||
|
||||
if (mode === 'placeholder') {
|
||||
if (label_borders) {
|
||||
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
svg += `<rect class='unprintable' ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
}
|
||||
return svg;
|
||||
}
|
||||
if (label_borders) {
|
||||
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
svg += `<rect ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
}
|
||||
|
||||
if (mode === "color" || mode == "frame") {
|
||||
if (mode === "color" || mode === "frame") {
|
||||
let w = lto_label_width/8;
|
||||
let h = lto_label_height - height;
|
||||
for (var i = 0; i < 7; i++) {
|
||||
for (let i = 0; i < 7; i++) {
|
||||
let textx = w/2 + pagex + i*w;
|
||||
let texty = pagey;
|
||||
|
||||
@ -168,7 +165,7 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
|
||||
let raw_label = `*${label}${label_type}*`;
|
||||
|
||||
for (var i = 0; i < raw_label.length; i++) {
|
||||
for (let i = 0; i < raw_label.length; i++) {
|
||||
let letter = raw_label.charAt(i);
|
||||
|
||||
let code = code39_codes[letter];
|
||||
@ -186,7 +183,6 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
}
|
||||
|
||||
for (let c of code) {
|
||||
|
||||
if (c === 's') {
|
||||
xpos += small;
|
||||
continue;
|
||||
@ -216,7 +212,7 @@ function html_page_header() {
|
||||
/* no page margins */
|
||||
html += "@page{margin-left: 0px;margin-right: 0px;margin-top: 0px;margin-bottom: 0px;}";
|
||||
/* to hide things on printed page */
|
||||
html += "@media print { .unprintable { visibility: hidden; } }";
|
||||
html += "@media print { .unprintable { visibility: hidden; } }";
|
||||
|
||||
html += "</style>";
|
||||
|
||||
@ -241,7 +237,6 @@ function printBarcodePage() {
|
||||
}
|
||||
|
||||
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
||||
|
||||
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||
|
||||
let c = calibration;
|
||||
@ -255,7 +250,6 @@ function generate_barcode_page(target_id, page_layout, label_list, calibration)
|
||||
svg += '>';
|
||||
|
||||
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
||||
|
||||
if (count >= label_list.length) { return; }
|
||||
|
||||
let item = label_list[count];
|
||||
@ -297,12 +291,11 @@ function setupPrintFrame(frame, page_width, page_height) {
|
||||
}
|
||||
|
||||
function generate_calibration_page(target_id, page_layout, calibration) {
|
||||
|
||||
let frame = document.getElementById(target_id);
|
||||
|
||||
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||
|
||||
let svg = svg_page_header( page_layout.page_width, page_layout.page_height);
|
||||
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||
|
||||
svg += "<defs>";
|
||||
svg += "<marker id='endarrow' markerWidth='10' markerHeight='7' ";
|
||||
|
@ -4,7 +4,7 @@ Ext.define('LabelList', {
|
||||
|
||||
plugins: {
|
||||
ptype: 'cellediting',
|
||||
clicksToEdit: 1
|
||||
clicksToEdit: 1,
|
||||
},
|
||||
|
||||
selModel: 'cellmodel',
|
||||
@ -44,7 +44,7 @@ Ext.define('LabelList', {
|
||||
xtype: 'prefixfield',
|
||||
allowBlank: false,
|
||||
},
|
||||
renderer: function (value, metaData, record) {
|
||||
renderer: function(value, metaData, record) {
|
||||
console.log(record);
|
||||
if (record.data.mode === 'placeholder') {
|
||||
return "-";
|
||||
@ -60,7 +60,7 @@ Ext.define('LabelList', {
|
||||
xtype: 'ltoTapeType',
|
||||
allowBlank: false,
|
||||
},
|
||||
renderer: function (value, metaData, record) {
|
||||
renderer: function(value, metaData, record) {
|
||||
console.log(record);
|
||||
if (record.data.mode === 'placeholder') {
|
||||
return "-";
|
||||
@ -133,7 +133,7 @@ Ext.define('LabelList', {
|
||||
handler: function(grid, rowIndex) {
|
||||
grid.getStore().removeAt(rowIndex);
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
@ -4,7 +4,6 @@ if (Ext.isFirefox) {
|
||||
}
|
||||
|
||||
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
|
||||
let max_labels = compute_max_labels(page_layout);
|
||||
|
||||
let count_fixed = 0;
|
||||
@ -44,20 +43,16 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
count = fill_size;
|
||||
}
|
||||
rest -= count;
|
||||
} else if (item.end <= item.start) {
|
||||
count = 1;
|
||||
} else {
|
||||
if (item.end <= item.start) {
|
||||
count = 1;
|
||||
} else {
|
||||
count = (item.end - item.start) + 1;
|
||||
}
|
||||
count = (item.end - item.start) + 1;
|
||||
}
|
||||
|
||||
for (j = 0; j < count; j++) {
|
||||
|
||||
let id = item.start + j;
|
||||
|
||||
if (item.prefix.length == 6) {
|
||||
|
||||
list.push({
|
||||
label: item.prefix,
|
||||
tape_type: item.tape_type,
|
||||
@ -66,9 +61,7 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
});
|
||||
rest += count - j - 1;
|
||||
break;
|
||||
|
||||
} else {
|
||||
|
||||
let pad_len = 6-item.prefix.length;
|
||||
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
||||
|
||||
@ -115,10 +108,10 @@ Ext.define('MainView', {
|
||||
label_list.push(record.data);
|
||||
});
|
||||
|
||||
let page_layout_view = view.down("pageLayoutPanel");
|
||||
let page_layout_view = view.down("pageLayoutPanel");
|
||||
let page_layout = page_layout_view.getValues();
|
||||
|
||||
let calibration_view = view.down("pageCalibration");
|
||||
let calibration_view = view.down("pageCalibration");
|
||||
let page_calibration = calibration_view.getValues();
|
||||
|
||||
draw_labels("print_frame", label_list, page_layout, page_calibration);
|
||||
@ -127,10 +120,10 @@ Ext.define('MainView', {
|
||||
update_calibration_preview: function() {
|
||||
let me = this;
|
||||
let view = me.getView();
|
||||
let page_layout_view = view.down("pageLayoutPanel");
|
||||
let page_layout_view = view.down("pageLayoutPanel");
|
||||
let page_layout = page_layout_view.getValues();
|
||||
|
||||
let calibration_view = view.down("pageCalibration");
|
||||
let calibration_view = view.down("pageCalibration");
|
||||
let page_calibration = calibration_view.getValues();
|
||||
console.log(page_calibration);
|
||||
generate_calibration_page('print_frame', page_layout, page_calibration);
|
||||
@ -195,19 +188,18 @@ Ext.define('MainView', {
|
||||
border: false,
|
||||
flex: 1,
|
||||
scrollable: true,
|
||||
tools:[{
|
||||
tools: [{
|
||||
type: 'print',
|
||||
tooltip: 'Open Print Dialog',
|
||||
handler: function(event, toolEl, panelHeader) {
|
||||
printBarcodePage();
|
||||
}
|
||||
},
|
||||
}],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
Ext.onReady(function() {
|
||||
|
||||
Ext.create('MainView', {
|
||||
renderTo: Ext.getBody(),
|
||||
});
|
||||
|
@ -31,8 +31,8 @@ Ext.define('PageCalibration', {
|
||||
scalex = 100/values.d_x;
|
||||
scaley = 100/values.d_y;
|
||||
|
||||
let offsetx = ((50*scalex) - values.s_x)/scalex;
|
||||
let offsety = ((50*scaley) - values.s_y)/scaley;
|
||||
let offsetx = ((50 - values.s_x) - (50*scalex - 50))/scalex;
|
||||
let offsety = ((50 - values.s_y) - (50*scaley - 50))/scaley;
|
||||
|
||||
return {
|
||||
scalex: scalex,
|
||||
@ -139,4 +139,4 @@ Ext.define('PageCalibration', {
|
||||
],
|
||||
},
|
||||
],
|
||||
})
|
||||
});
|
||||
|
@ -106,7 +106,7 @@ Ext.define('PageLayoutPanel', {
|
||||
xtype: 'numberfield',
|
||||
name: 'label_height',
|
||||
fieldLabel: 'Label height',
|
||||
minValue: 17,
|
||||
minValue: 15,
|
||||
allowBlank: false,
|
||||
value: 17,
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
let paper_sizes = {
|
||||
const paper_sizes = {
|
||||
a4: {
|
||||
comment: 'A4 (plain)',
|
||||
page_width: 210,
|
||||
@ -15,13 +15,13 @@ let paper_sizes = {
|
||||
page_width: 210,
|
||||
page_height: 297,
|
||||
label_width: 70,
|
||||
label_height: 17,
|
||||
label_height: 16.9,
|
||||
margin_left: 0,
|
||||
margin_top: 4,
|
||||
margin_top: 5,
|
||||
column_spacing: 0,
|
||||
row_spacing: 0,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
function paper_size_combo_data() {
|
||||
let data = [];
|
||||
|
@ -118,11 +118,11 @@ high, but you cannot recreate backup snapshots from the past.
|
||||
Garbage Collection
|
||||
------------------
|
||||
|
||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||
You can monitor and run :ref:`garbage collection <client_garbage-collection>` on the
|
||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
|
||||
start garbage collection on an entire datastore and the ``status`` subcommand to
|
||||
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||
|
||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
||||
@ -142,7 +142,7 @@ Verification
|
||||
Proxmox Backup offers various verification options to ensure that backup data is
|
||||
intact. Verification is generally carried out through the creation of verify
|
||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
||||
:ref:`calendar-events`). With these, you can set whether already verified
|
||||
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
||||
are checked again. The interface for creating verify jobs can be found under the
|
||||
**Verify Jobs** tab of the datastore.
|
||||
|
@ -65,7 +65,7 @@ the ``proxmox-backup-manager sync-job`` command. The configuration information
|
||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
||||
After creating a sync job, you can either start it manually from the GUI or
|
||||
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
|
||||
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
24
docs/output-format.rst
Normal file
24
docs/output-format.rst
Normal file
@ -0,0 +1,24 @@
|
||||
Most commands producing output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Also, the following environment variables can modify output behavior:
|
||||
|
||||
``PROXMOX_OUTPUT_FORMAT``
|
||||
Defines the default output format.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_BORDER``
|
||||
If set (to any value), do not render table borders.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format if you need to process the output.
|
@ -1,7 +1,7 @@
|
||||
Copyright and Disclaimer
|
||||
========================
|
||||
|
||||
Copyright (C) 2007-2019 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2007-2021 Proxmox Server Solutions GmbH
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as
|
||||
|
2
docs/pmt/description.rst
Normal file
2
docs/pmt/description.rst
Normal file
@ -0,0 +1,2 @@
|
||||
The ``pmt`` command controls Linux tape devices.
|
||||
|
42
docs/pmt/man1.rst
Normal file
42
docs/pmt/man1.rst
Normal file
@ -0,0 +1,42 @@
|
||||
==========================
|
||||
pmt
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Control Linux Tape Devices
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
.. include:: options.rst
|
||||
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
ENVIRONMENT
|
||||
===========
|
||||
|
||||
:TAPE: If set, replaces the `--device` option.
|
||||
|
||||
:PROXMOX_TAPE_DRIVE: If set, replaces the `--drive` option.
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
51
docs/pmt/options.rst
Normal file
51
docs/pmt/options.rst
Normal file
@ -0,0 +1,51 @@
|
||||
All commands support the following parameters to specify the tape device:
|
||||
|
||||
--device <path> Path to the Linux tape device
|
||||
|
||||
--drive <name> Use drive from Proxmox Backup Server configuration.
|
||||
|
||||
|
||||
Commands which generate output support the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Human readable.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Device driver options can be specified as integer numbers (see
|
||||
``/usr/include/linux/mtio.h``), or using symbolic names:
|
||||
|
||||
:``buffer-writes``: Enable buffered writes
|
||||
|
||||
:``async-writes``: Enable async writes
|
||||
|
||||
:``read-ahead``: Use read-ahead for fixed block size
|
||||
|
||||
:``debugging``: Enable debugging if compiled into the driver
|
||||
|
||||
:``two-fm``: Write two file marks when closing the file
|
||||
|
||||
:``fast-mteom``: Space directly to eod (and lose file number)
|
||||
|
||||
:``auto-lock``: Automatically lock/unlock drive door
|
||||
|
||||
:``def-writes``: Defaults are meant only for writes
|
||||
|
||||
:``can-bsr``: Indicates that the drive can space backwards
|
||||
|
||||
:``no-blklims``: Drive does not support read block limits
|
||||
|
||||
:``can-partitions``: Drive can handle partitioned tapes
|
||||
|
||||
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
||||
|
||||
:``sysv``: Enable the System V semantics
|
||||
|
||||
:``nowait``: Do not wait for rewind, etc. to complete
|
||||
|
||||
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
||||
in variable block mode to enhance performance when reading blocks
|
||||
shorter than the byte count
|
@ -1,6 +1,3 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
The ``pmtx`` command controls SCSI media changer devices (tape
|
||||
autoloader).
|
||||
|
||||
|
@ -18,11 +18,40 @@ Synopsis
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
All command supports the following parameters to specify the changer device:
|
||||
|
||||
--device <path> Path to Linux generic SCSI device (e.g. '/dev/sg4')
|
||||
|
||||
--changer <name> Use changer from Proxmox Backup Server configuration.
|
||||
|
||||
|
||||
Commands generating output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Human readable.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
ENVIRONMENT
|
||||
===========
|
||||
|
||||
:CHANGER: If set, replaces the `--device` option
|
||||
|
||||
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
|
||||
configuration to find the associcated changer device.
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
@ -1,4 +1,4 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
This tool implements a backup server client, i.e. it can connect to a
|
||||
backup servers to issue management commands and to create or restore
|
||||
backups.
|
||||
|
||||
|
@ -31,6 +31,12 @@ Those command are available when you start an intercative restore shell:
|
||||
.. include:: catalog-shell-synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
.. include:: ../output-format.rst
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
|
||||
This tool exposes the whole backup server management API on the
|
||||
command line.
|
||||
|
@ -1,4 +1,5 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||
permissions. Operation requiring more permissions are forwarded to
|
||||
the local ``proxmox-backup`` service.
|
||||
|
||||
|
7
docs/proxmox-backup/description.rst
Normal file
7
docs/proxmox-backup/description.rst
Normal file
@ -0,0 +1,7 @@
|
||||
This daemon exposes the Proxmox Backup Server management API on
|
||||
``127.0.0.1:82``. It runs as ``root`` and has permission to do all
|
||||
privileged operations.
|
||||
|
||||
NOTE: The daemon listens to a local address only, so you cannot access
|
||||
it from outside. The ``proxmox-backup-proxy`` daemon exposes the API
|
||||
to the outside world.
|
41
docs/proxmox-backup/man1.rst
Normal file
41
docs/proxmox-backup/man1.rst
Normal file
@ -0,0 +1,41 @@
|
||||
==========================
|
||||
proxmox-backup
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox Backup Local API Server
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
==========
|
||||
|
||||
This daemon is normally started and managed as ``systemd`` service::
|
||||
|
||||
systemctl start proxmox-backup
|
||||
|
||||
systemctl stop proxmox-backup
|
||||
|
||||
systemctl status proxmox-backup
|
||||
|
||||
For debugging, you can start the daemon in foreground using::
|
||||
|
||||
proxmox-backup-api
|
||||
|
||||
.. NOTE:: You need to stop the service before starting the daemon in
|
||||
foreground.
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
1
docs/proxmox-tape/description.rst
Normal file
1
docs/proxmox-tape/description.rst
Normal file
@ -0,0 +1 @@
|
||||
This tool can configure and manage tape backups.
|
28
docs/proxmox-tape/man1.rst
Normal file
28
docs/proxmox-tape/man1.rst
Normal file
@ -0,0 +1,28 @@
|
||||
==========================
|
||||
proxmox-tape
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox Tape Backup Command Line Tool
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
@ -1,6 +1,3 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
``pxar`` is a command line utility to create and manipulate archives in the
|
||||
:ref:`pxar-format`.
|
||||
It is inspired by `casync file archive format
|
||||
@ -80,7 +77,7 @@ These files must contain one pattern per line, again later patterns win over
|
||||
previous ones.
|
||||
The patterns control file exclusions of files present within the given directory
|
||||
or further below it in the tree.
|
||||
The behavior is the same as described in :ref:`creating-backups`.
|
||||
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -4,6 +4,9 @@ pxar
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
.. Avoid errors with sphinx ref role
|
||||
.. role:: ref(emphasis)
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox File Archive Command Line Tool
|
||||
-------------------------------------------------------------
|
||||
@ -25,4 +28,3 @@ Description
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
||||
|
@ -6,3 +6,9 @@ Service Daemons
|
||||
|
||||
.. include:: proxmox-backup-proxy/description.rst
|
||||
|
||||
|
||||
``proxmox-backup``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. include:: proxmox-backup/description.rst
|
||||
|
||||
|
@ -119,8 +119,8 @@ directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
||||
|
||||
|
||||
.. _storage_datastore_create:
|
||||
|
@ -25,4 +25,7 @@ either explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
|
||||
.. include:: local-zfs.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
@ -1,39 +1,49 @@
|
||||
.. _tape_backup:
|
||||
|
||||
Tape Backup
|
||||
===========
|
||||
|
||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||
production use. To enable it in the GUI, you need to issue the
|
||||
following command (as root user on the console):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# touch /etc/proxmox-backup/tape.cfg
|
||||
|
||||
Proxmox tape backup provides an easy way to store datastore content
|
||||
onto magnetic tapes. This increases data safety because you get:
|
||||
|
||||
- an additional copy of the data
|
||||
- to a different media type (tape)
|
||||
- to an additional location (you can move tapes offsite)
|
||||
- an additional copy of the data,
|
||||
- on a different media type (tape),
|
||||
- to an additional location (you can move tapes off-site)
|
||||
|
||||
In most restore jobs, only data from the last backup job is restored.
|
||||
Restore requests further decline the older the data
|
||||
Restore requests further decline, the older the data
|
||||
gets. Considering this, tape backup may also help to reduce disk
|
||||
usage, because you can safely remove data from disk once archived on
|
||||
tape. This is especially true if you need to keep data for several
|
||||
usage, because you can safely remove data from disk, once it's archived on
|
||||
tape. This is especially true if you need to retain data for several
|
||||
years.
|
||||
|
||||
Tape backups do not provide random access to the stored data. Instead,
|
||||
you need to restore the data to disk before you can access it
|
||||
again. Also, if you store your tapes offsite (using some kind of tape
|
||||
vaulting service), you need to bring them onsite before you can do any
|
||||
restore. So please consider that restores from tapes can take much
|
||||
longer than restores from disk.
|
||||
you need to restore the data to disk, before you can access it
|
||||
again. Also, if you store your tapes off-site (using some kind of tape
|
||||
vaulting service), you need to bring them back on-site, before you can do any
|
||||
restores. So please consider that restoring from tape can take much
|
||||
longer than restoring from disk.
|
||||
|
||||
|
||||
Tape Technology Primer
|
||||
----------------------
|
||||
|
||||
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||
.. _Linear Tape-Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||
|
||||
As of 2021, the only broadly available tape technology standard is
|
||||
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
||||
drives, autoloaders and LTO tape cartridges.
|
||||
As of 2021, the only widely available tape technology standard is
|
||||
`Linear Tape-Open`_ (LTO). Different vendors offer LTO Ultrium tape
|
||||
drives, auto-loaders, and LTO tape cartridges.
|
||||
|
||||
There are a few vendors offering proprietary drives with
|
||||
slight advantages in performance and capacity, but they have
|
||||
There are a few vendors that offer proprietary drives with
|
||||
slight advantages in performance and capacity. Nevertheless, they have
|
||||
significant disadvantages:
|
||||
|
||||
- proprietary (single vendor)
|
||||
@ -43,56 +53,55 @@ So we currently do not test such drives.
|
||||
|
||||
In general, LTO tapes offer the following advantages:
|
||||
|
||||
- Durable (30 years)
|
||||
- Durability (30 year lifespan)
|
||||
- High Capacity (12 TB)
|
||||
- Relatively low cost per TB
|
||||
- Cold Media
|
||||
- Movable (storable inside vault)
|
||||
- Multiple vendors (for both media and drives)
|
||||
- Build in AES-CGM Encryption engine
|
||||
- Build in AES-GCM Encryption engine
|
||||
|
||||
Please note that `Proxmox Backup Server` already stores compressed
|
||||
data, so we do not need/use the tape compression feature.
|
||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||
tape compression feature has no advantage.
|
||||
|
||||
|
||||
Supported Hardware
|
||||
------------------
|
||||
|
||||
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
|
||||
or later. In general, all SCSI2 tape drives supported by the Linux
|
||||
kernel should work, but feature like hardware encryptions needs LTO4
|
||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
||||
or later. In general, all SCSI-2 tape drives supported by the Linux
|
||||
kernel should work, but features like hardware encryption need LTO-4
|
||||
or later.
|
||||
|
||||
Tape changer support is done using the Linux 'mtx' command line
|
||||
tool. So any changer device supported by that tool should work.
|
||||
Tape changing is carried out using the Linux 'mtx' command line
|
||||
tool, so any changer device supported by this tool should work.
|
||||
|
||||
|
||||
Drive Performance
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
|
||||
Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
||||
that it still takes a minimum of 9 hours to completely write or
|
||||
read a single tape (even at maximum speed).
|
||||
|
||||
The only way to speed up that data rate is to use more than one
|
||||
drive. That way you can run several backup jobs in parallel, or run
|
||||
drive. That way, you can run several backup jobs in parallel, or run
|
||||
restore jobs while the other dives are used for backups.
|
||||
|
||||
Also consider that you need to read data first from your datastore
|
||||
(disk). But a single spinning disk is unable to deliver data at this
|
||||
Also consider that you first need to read data from your datastore
|
||||
(disk). However, a single spinning disk is unable to deliver data at this
|
||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
|
||||
to run your tape at full speed, please make sure that the source
|
||||
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||
to write to your tape at full speed, please make sure that the source
|
||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
||||
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
|
||||
:Tape Labels: are used to uniquely indentify a tape. You normally use
|
||||
some sticky paper labels and apply them on the front of the
|
||||
cartridge. We additionally store the label text magnetically on the
|
||||
tape (first file on tape).
|
||||
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
||||
sticky paper label to the front of the cartridge. We additionally store the
|
||||
label text magnetically on the tape (first file on tape).
|
||||
|
||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||
|
||||
@ -102,14 +111,14 @@ Terminology
|
||||
|
||||
:Barcodes: are a special form of tape labels, which are electronically
|
||||
readable. Most LTO tape robots use an 8 character string encoded as
|
||||
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||
Specification`_.
|
||||
|
||||
You can either buy such barcode labels from your cartridge vendor,
|
||||
or print them yourself. You can use our `LTO Barcode Generator`_ App
|
||||
for that.
|
||||
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||
app, if you would like to print them yourself.
|
||||
|
||||
.. Note:: Physical labels and the associated adhesive shall have an
|
||||
.. Note:: Physical labels and the associated adhesive should have an
|
||||
environmental performance to match or exceed the environmental
|
||||
specifications of the cartridge to which it is applied.
|
||||
|
||||
@ -122,8 +131,8 @@ Terminology
|
||||
:Media Set: A group of continuously written tapes (all from the same
|
||||
media pool).
|
||||
|
||||
:Tape drive: The decive used to read and write data to the tape. There
|
||||
are standalone drives, but drives often ship within tape libraries.
|
||||
:Tape drive: The device used to read and write data to the tape. There
|
||||
are standalone drives, but drives are usually shipped within tape libraries.
|
||||
|
||||
:Tape changer: A device which can change the tapes inside a tape drive
|
||||
(tape robot). They are usually part of a tape library.
|
||||
@ -132,10 +141,10 @@ Terminology
|
||||
|
||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
||||
a number of slots to hold tape cartridges, a barcode reader to
|
||||
identify tape cartridges and an automated method for loading tapes
|
||||
identify tape cartridges, and an automated method for loading tapes
|
||||
(a robot).
|
||||
|
||||
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||
|
||||
:Inventory: The inventory stores the list of known tapes (with
|
||||
additional status information).
|
||||
@ -143,14 +152,14 @@ Terminology
|
||||
:Catalog: A media catalog stores information about the media content.
|
||||
|
||||
|
||||
Tape Quickstart
|
||||
Tape Quick Start
|
||||
---------------
|
||||
|
||||
1. Configure your tape hardware (drives and changers)
|
||||
|
||||
2. Configure one or more media pools
|
||||
|
||||
3. Label your tape cartridges.
|
||||
3. Label your tape cartridges
|
||||
|
||||
4. Start your first tape backup job ...
|
||||
|
||||
@ -159,9 +168,10 @@ Configuration
|
||||
-------------
|
||||
|
||||
Please note that you can configure anything using the graphical user
|
||||
interface or the command line interface. Both methods results in the
|
||||
interface or the command line interface. Both methods result in the
|
||||
same configuration.
|
||||
|
||||
.. _tape_changer_config:
|
||||
|
||||
Tape changers
|
||||
~~~~~~~~~~~~~
|
||||
@ -169,8 +179,10 @@ Tape changers
|
||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||
this step if you are using a standalone drive.
|
||||
|
||||
Linux is able to auto detect those devices, and you can get a list
|
||||
of available devices using::
|
||||
Linux is able to auto detect these devices, and you can get a list
|
||||
of available devices using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer scan
|
||||
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||
@ -179,18 +191,22 @@ of available devices using::
|
||||
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||
|
||||
In order to use that device with Proxmox, you need to create a
|
||||
configuration entry::
|
||||
In order to use a device with Proxmox Backup Server, you need to create a
|
||||
configuration entry:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
||||
|
||||
Where ``sl3`` is an arbitrary name you can choose.
|
||||
|
||||
.. Note:: Please use stable device path names from inside
|
||||
.. Note:: Please use the persistent device path names from inside
|
||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||
different device after reboot, and that is not what you want.
|
||||
|
||||
You can show the final configuration with::
|
||||
You can display the final configuration with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer config sl3
|
||||
┌──────┬─────────────────────────────┐
|
||||
@ -201,7 +217,9 @@ You can show the final configuration with::
|
||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||
└──────┴─────────────────────────────┘
|
||||
|
||||
Or simply list all configured changer devices::
|
||||
Or simply list all configured changer devices:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer list
|
||||
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
||||
@ -213,7 +231,9 @@ Or simply list all configured changer devices::
|
||||
The Vendor, Model and Serial number are auto detected, but only shown
|
||||
if the device is online.
|
||||
|
||||
To test your setup, please query the status of the changer device with::
|
||||
To test your setup, please query the status of the changer device with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -231,15 +251,15 @@ To test your setup, please query the status of the changer device with::
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
Tape libraries usually provide some special import/export slots (also
|
||||
called "mail slots"). Tapes inside those slots are acessible from
|
||||
called "mail slots"). Tapes inside those slots are accessible from
|
||||
outside, making it easy to add/remove tapes to/from the library. Those
|
||||
tapes are considered to be "offline", so backup jobs will not use
|
||||
them. Those special slots are auto-detected and marked as
|
||||
them. Those special slots are auto-detected and marked as an
|
||||
``import-export`` slot in the status command.
|
||||
|
||||
It's worth noting that some of the smaller tape libraries don't have
|
||||
such slots. While they have something called "Mail Slot", that slot
|
||||
is just a way to grab the tape from the gripper. But they are unable
|
||||
such slots. While they have something called a "Mail Slot", that slot
|
||||
is just a way to grab the tape from the gripper. They are unable
|
||||
to hold media while the robot does other things. They also do not
|
||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
||||
the status output.
|
||||
@ -247,12 +267,16 @@ the status output.
|
||||
As a workaround, you can mark some of the normal slots as export
|
||||
slot. The software treats those slots like real ``import-export``
|
||||
slots, and the media inside those slots is considered to be 'offline'
|
||||
(not available for backup)::
|
||||
(not available for backup):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer update sl3 --export-slots 15,16
|
||||
|
||||
After that, you can see those artificial ``import-export`` slots in
|
||||
the status output::
|
||||
the status output:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -273,12 +297,15 @@ the status output::
|
||||
│ slot │ 14 │ │ │
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
.. _tape_drive_config:
|
||||
|
||||
Tape drives
|
||||
~~~~~~~~~~~
|
||||
|
||||
Linux is able to auto detect tape drives, and you can get a list
|
||||
of available tape drives using::
|
||||
of available tape drives using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive scan
|
||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||
@ -288,24 +315,30 @@ of available tape drives using::
|
||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||
|
||||
In order to use that drive with Proxmox, you need to create a
|
||||
configuration entry::
|
||||
configuration entry:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||
|
||||
.. Note:: Please use stable device path names from inside
|
||||
.. Note:: Please use the persistent device path names from inside
|
||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
||||
different device after reboot, and that is not what you want.
|
||||
|
||||
If you have a tape library, you also need to set the associated
|
||||
changer device::
|
||||
changer device:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||
|
||||
The ``--changer-drivenum`` is only necessary if the tape library
|
||||
includes more than one drive (The changer status command lists all
|
||||
drivenums).
|
||||
includes more than one drive (the changer status command lists all
|
||||
drive numbers).
|
||||
|
||||
You can show the final configuration with::
|
||||
You can display the final configuration with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive config mydrive
|
||||
┌─────────┬────────────────────────────────┐
|
||||
@ -319,9 +352,11 @@ You can show the final configuration with::
|
||||
└─────────┴────────────────────────────────┘
|
||||
|
||||
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||
configuration, because that is the default.
|
||||
configuration, because it is the default.
|
||||
|
||||
To list all configured drives use::
|
||||
To list all configured drives use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive list
|
||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||
@ -333,7 +368,9 @@ To list all configured drives use::
|
||||
The Vendor, Model and Serial number are auto detected, but only shown
|
||||
if the device is online.
|
||||
|
||||
For testing, you can simply query the drive status with::
|
||||
For testing, you can simply query the drive status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape status --drive mydrive
|
||||
┌───────────┬────────────────────────┐
|
||||
@ -345,9 +382,11 @@ For testing, you can simply query the drive status with::
|
||||
└───────────┴────────────────────────┘
|
||||
|
||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||
mode). This is the default anyways.
|
||||
mode). This is the default anyway.
|
||||
|
||||
|
||||
.. _tape_media_pool_config:
|
||||
|
||||
Media Pools
|
||||
~~~~~~~~~~~
|
||||
|
||||
@ -359,11 +398,11 @@ one media pool, so a job only uses tapes from that pool.
|
||||
A media set is a group of continuously written tapes, used to split
|
||||
the larger pool into smaller, restorable units. One or more backup
|
||||
jobs write to a media set, producing an ordered group of
|
||||
tapes. Media sets are identified by an unique ID. That ID and the
|
||||
sequence number is stored on each tape of that set (tape label).
|
||||
tapes. Media sets are identified by a unique ID. That ID and the
|
||||
sequence number are stored on each tape of that set (tape label).
|
||||
|
||||
Media sets are the basic unit for restore tasks, i.e. you need all
|
||||
tapes in the set to restore the media set content. Data is fully
|
||||
Media sets are the basic unit for restore tasks. This means that you need
|
||||
every tape in the set to restore the media set contents. Data is fully
|
||||
deduplicated inside a media set.
|
||||
|
||||
|
||||
@ -374,35 +413,35 @@ one media pool, so a job only uses tapes from that pool.
|
||||
|
||||
- Try to use the current media set.
|
||||
|
||||
This setting produce one large media set. While this is very
|
||||
This setting produces one large media set. While this is very
|
||||
space efficient (deduplication, no unused space), it can lead to
|
||||
long restore times, because restore jobs needs to read all tapes in the
|
||||
long restore times, because restore jobs need to read all tapes in the
|
||||
set.
|
||||
|
||||
.. NOTE:: Data is fully deduplicated inside a media set. That
|
||||
.. NOTE:: Data is fully deduplicated inside a media set. This
|
||||
also means that data is randomly distributed over the tapes in
|
||||
the set. So even if you restore a single VM, this may have to
|
||||
read data from all tapes inside the media set.
|
||||
the set. Thus, even if you restore a single VM, data may have to be
|
||||
read from all tapes inside the media set.
|
||||
|
||||
Larger media sets are also more error prone, because a single
|
||||
damaged media makes the restore fail.
|
||||
Larger media sets are also more error-prone, because a single
|
||||
damaged tape makes the restore fail.
|
||||
|
||||
Usage scenario: Mostly used with tape libraries, and you manually
|
||||
Usage scenario: Mostly used with tape libraries. You manually
|
||||
trigger new set creation by running a backup job with the
|
||||
``--export`` option.
|
||||
|
||||
.. NOTE:: Retention period starts with the existence of a newer
|
||||
media set.
|
||||
media set.
|
||||
|
||||
- Always create a new media set.
|
||||
|
||||
With this setting each backup job creates a new media set. This
|
||||
is less space efficient, because the last media from the last set
|
||||
With this setting, each backup job creates a new media set. This
|
||||
is less space efficient, because the media from the last set
|
||||
may not be fully written, leaving the remaining space unused.
|
||||
|
||||
The advantage is that this procudes media sets of minimal
|
||||
size. Small set are easier to handle, you can move sets to an
|
||||
off-site vault, and restore is much faster.
|
||||
size. Small sets are easier to handle, can be moved more conveniently
|
||||
to an off-site vault, and can be restored much faster.
|
||||
|
||||
.. NOTE:: Retention period starts with the creation time of the
|
||||
media set.
|
||||
@ -417,7 +456,7 @@ one media pool, so a job only uses tapes from that pool.
|
||||
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
||||
will create a new set each week.
|
||||
|
||||
This balances between space efficency and media count.
|
||||
This balances between space efficiency and media count.
|
||||
|
||||
.. NOTE:: Retention period starts when the calendar event
|
||||
triggers.
|
||||
@ -426,13 +465,13 @@ one media pool, so a job only uses tapes from that pool.
|
||||
|
||||
- Required tape is offline (and you use a tape library).
|
||||
|
||||
- Current set contains damaged of retired tapes.
|
||||
- Current set contains damaged or retired tapes.
|
||||
|
||||
- Media pool encryption changed
|
||||
- Media pool encryption has changed
|
||||
|
||||
- Database consistency errors, e.g. if the inventory does not
|
||||
contain required media info, or contain conflicting infos
|
||||
(outdated data).
|
||||
- Database consistency errors, for example, if the inventory does not
|
||||
contain the required media information, or it contains conflicting
|
||||
information (outdated data).
|
||||
|
||||
.. topic:: Retention Policy
|
||||
|
||||
@ -449,40 +488,48 @@ one media pool, so a job only uses tapes from that pool.
|
||||
|
||||
.. topic:: Hardware Encryption
|
||||
|
||||
LTO4 (or later) tape drives support hardware encryption. If you
|
||||
LTO-4 (or later) tape drives support hardware encryption. If you
|
||||
configure the media pool to use encryption, all data written to the
|
||||
tapes is encrypted using the configured key.
|
||||
|
||||
That way, unauthorized users cannot read data from the media,
|
||||
e.g. if you loose a media while shipping to an offsite location.
|
||||
This way, unauthorized users cannot read data from the media,
|
||||
for example, if you loose a tape while shipping to an offsite location.
|
||||
|
||||
.. Note:: If the backup client also encrypts data, data on tape
|
||||
.. Note:: If the backup client also encrypts data, data on the tape
|
||||
will be double encrypted.
|
||||
|
||||
The password protected key is stored on each media, so it is
|
||||
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
|
||||
you remember the password in case you need to restore the key.
|
||||
The password protected key is stored on each medium, so that it is
|
||||
possbible to `restore the key <tape_restore_encryption_key_>`_ using
|
||||
the password. Please make sure to remember the password, in case
|
||||
you need to restore the key.
|
||||
|
||||
|
||||
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
|
||||
the source datastore, so it is impossible to distinguish
|
||||
store1:/vm/100 from store2:/vm/100. Please use different media
|
||||
pools if the source is from a different name space)
|
||||
.. NOTE:: We use global content namespace, meaning we do not store the
|
||||
source datastore name. Because of this, it is impossible to distinguish
|
||||
store1:/vm/100 from store2:/vm/100. Please use different media pools
|
||||
if the sources are from different namespaces with conflicting names
|
||||
(for example, if the sources are from different Proxmox VE clusters).
|
||||
|
||||
|
||||
The following command creates a new media pool::
|
||||
The following command creates a new media pool:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
||||
|
||||
# proxmox-tape pool create daily --drive mydrive
|
||||
|
||||
|
||||
Additional option can be set later using the update command::
|
||||
Additional option can be set later, using the update command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape pool update daily --allocation daily --retention 7days
|
||||
|
||||
|
||||
To list all configured pools use::
|
||||
To list all configured pools use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape pool list
|
||||
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
||||
@ -491,58 +538,142 @@ To list all configured pools use::
|
||||
│ daily │ mydrive │ daily │ 7days │ │
|
||||
└───────┴──────────┴────────────┴───────────┴──────────┘
|
||||
|
||||
.. _tape_backup_job_config:
|
||||
|
||||
Tape Jobs
|
||||
~~~~~~~~~
|
||||
Tape Backup Jobs
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
To automate tape backup, you can configure tape backup jobs which
|
||||
write datastore content to a media pool, based on a specific time schedule.
|
||||
The required settings are:
|
||||
|
||||
- ``store``: The datastore you want to backup
|
||||
|
||||
- ``pool``: The media pool - only tape cartridges from that pool are
|
||||
used.
|
||||
|
||||
- ``drive``: The tape drive.
|
||||
|
||||
- ``schedule``: Job schedule (see :ref:`calendar-event-scheduling`)
|
||||
|
||||
For example, to configure a tape backup job for datastore ``vmstore1``
|
||||
use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job create job2 --store vmstore1 \
|
||||
--pool yourpool --drive yourdrive --schedule daily
|
||||
|
||||
The backup includes all snapshots from a backup group by default. You can
|
||||
set the ``latest-only`` flag to include only the latest snapshots:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --latest-only
|
||||
|
||||
Backup jobs can use email to send tape request notifications or
|
||||
report errors. You can set the notification user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --notify-user root@pam
|
||||
|
||||
.. Note:: The email address is a property of the user (see :ref:`user_mgmt`).
|
||||
|
||||
It is sometimes useful to eject the tape from the drive after a
|
||||
backup. For a standalone drive, the ``eject-media`` option ejects the
|
||||
tape, making sure that the following backup cannot use the tape
|
||||
(unless someone manually loads the tape again). For tape libraries,
|
||||
this option unloads the tape to a free slot, which provides better
|
||||
dust protection than inside a drive:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --eject-media
|
||||
|
||||
.. Note:: For failed jobs, the tape remains in the drive.
|
||||
|
||||
For tape libraries, the ``export-media`` option moves all tapes from
|
||||
the media set to an export slot, making sure that the following backup
|
||||
cannot use the tapes. An operator can pick up those tapes and move them
|
||||
to a vault.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --export-media
|
||||
|
||||
.. Note:: The ``export-media`` option can be used to force the start
|
||||
of a new media set, because tapes from the current set are no
|
||||
longer online.
|
||||
|
||||
It is also possible to run backup jobs manually:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job run job2
|
||||
|
||||
To remove a job, please use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job remove job2
|
||||
|
||||
|
||||
Administration
|
||||
--------------
|
||||
|
||||
Many sub-command of the ``proxmox-tape`` command line tools take a
|
||||
Many sub-commands of the ``proxmox-tape`` command line tools take a
|
||||
parameter called ``--drive``, which specifies the tape drive you want
|
||||
to work on. For convenience, you can set that in an environment
|
||||
variable::
|
||||
to work on. For convenience, you can set this in an environment
|
||||
variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# export PROXMOX_TAPE_DRIVE=mydrive
|
||||
|
||||
You can then omit the ``--drive`` parameter from the command. If the
|
||||
drive has an associated changer device, you may also omit the changer
|
||||
parameter from commands that needs a changer device, for example::
|
||||
parameter from commands that needs a changer device, for example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status
|
||||
|
||||
Should displays the changer status of the changer device associated with
|
||||
should display the changer status of the changer device associated with
|
||||
drive ``mydrive``.
|
||||
|
||||
|
||||
Label Tapes
|
||||
~~~~~~~~~~~
|
||||
|
||||
By default, tape cartidges all looks the same, so you need to put a
|
||||
label on them for unique identification. So first, put a sticky paper
|
||||
By default, tape cartridges all look the same, so you need to put a
|
||||
label on them for unique identification. First, put a sticky paper
|
||||
label with some human readable text on the cartridge.
|
||||
|
||||
If you use a `Tape Library`_, you should use an 8 character string
|
||||
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||
Specification`_. You can either bye such barcode labels from your
|
||||
cartidge vendor, or print them yourself. You can use our `LTO Barcode
|
||||
Generator`_ App for that.
|
||||
encoded as `Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||
Specification`_. You can either buy such barcode labels from your
|
||||
cartridge vendor, or print them yourself. You can use our `LTO Barcode
|
||||
Generator`_ app to print them.
|
||||
|
||||
Next, you need to write that same label text to the tape, so that the
|
||||
software can uniquely identify the tape too.
|
||||
|
||||
For a standalone drive, manually insert the new tape cartidge into the
|
||||
drive and run::
|
||||
For a standalone drive, manually insert the new tape cartridge into the
|
||||
drive and run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
||||
|
||||
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||
|
||||
.. Note:: For safety reasons, this command fails if the tape contain
|
||||
any data. If you want to overwrite it anways, erase the tape first.
|
||||
.. Note:: For safety reasons, this command fails if the tape contains
|
||||
any data. If you want to overwrite it anyway, erase the tape first.
|
||||
|
||||
You can verify success by reading back the label::
|
||||
You can verify success by reading back the label:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape read-label
|
||||
┌─────────────────┬──────────────────────────────────────┐
|
||||
@ -566,7 +697,9 @@ You can verify success by reading back the label::
|
||||
|
||||
If you have a tape library, apply the sticky barcode label to the tape
|
||||
cartridges first. Then load those empty tapes into the library. You
|
||||
can then label all unlabeled tapes with a single command::
|
||||
can then label all unlabeled tapes with a single command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape barcode-label [--pool <pool-name>]
|
||||
|
||||
@ -574,7 +707,9 @@ can then label all unlabeled tapes with a single command::
|
||||
Run Tape Backups
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
To manually run a backup job use::
|
||||
To manually run a backup job use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup <store> <pool> [OPTIONS]
|
||||
|
||||
@ -583,11 +718,11 @@ The following options are available:
|
||||
--eject-media Eject media upon job completion.
|
||||
|
||||
It is normally good practice to eject the tape after use. This unmounts the
|
||||
tape from the drive and prevents the tape from getting dirty with dust.
|
||||
tape from the drive and prevents the tape from getting dusty.
|
||||
|
||||
--export-media-set Export media set upon job completion.
|
||||
|
||||
After a sucessful backup job, this moves all tapes from the used
|
||||
After a successful backup job, this moves all tapes from the used
|
||||
media set into import-export slots. The operator can then pick up
|
||||
those tapes and move them to a media vault.
|
||||
|
||||
@ -602,7 +737,9 @@ catalogs, you need to restore them first. Please note that you need
|
||||
the catalog to find your data, but restoring a complete media-set does
|
||||
not need media catalogs.
|
||||
|
||||
The following command shows the media content (from catalog)::
|
||||
The following command lists the media content (from catalog):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape media content
|
||||
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
||||
@ -615,7 +752,9 @@ The following command shows the media content (from catalog)::
|
||||
|
||||
|
||||
A restore job reads the data from the media set and moves it back to
|
||||
data disk (datastore)::
|
||||
data disk (datastore):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
// proxmox-tape restore <media-set-uuid> <datastore>
|
||||
|
||||
@ -633,14 +772,18 @@ Restore Catalog
|
||||
Encryption Key Management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Creating a new encryption key::
|
||||
Creating a new encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key create --hint "tape pw 2020"
|
||||
Tape Encryption Key Password: **********
|
||||
Verify Password: **********
|
||||
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
||||
|
||||
List existing encryption keys::
|
||||
List existing encryption keys:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key list
|
||||
┌───────────────────────────────────────────────────┬───────────────┐
|
||||
@ -649,7 +792,9 @@ List existing encryption keys::
|
||||
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
||||
└───────────────────────────────────────────────────┴───────────────┘
|
||||
|
||||
To show encryption key details::
|
||||
To show encryption key details:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
||||
┌─────────────┬───────────────────────────────────────────────┐
|
||||
@ -668,37 +813,43 @@ To show encryption key details::
|
||||
|
||||
The ``paperkey`` subcommand can be used to create a QR encoded
|
||||
version of a tape encryption key. The following command sends the output of the
|
||||
``paperkey`` command to a text file, for easy printing::
|
||||
``paperkey`` command to a text file, for easy printing:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
||||
|
||||
|
||||
.. _restore_encryption_key:
|
||||
.. _tape_restore_encryption_key:
|
||||
|
||||
Restoring Encryption Keys
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can restore the encryption key from the tape, using the password
|
||||
used to generate the key. First, load the tape you want to restore
|
||||
into the drive. Then run::
|
||||
into the drive. Then run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key restore
|
||||
Tepe Encryption Key Password: ***********
|
||||
|
||||
If the password is correct, the key will get imported to the
|
||||
database. Further restore jobs automatically use any availbale key.
|
||||
database. Further restore jobs automatically use any available key.
|
||||
|
||||
|
||||
Tape Cleaning
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
LTO tape drives requires regular cleaning. This is done by loading a
|
||||
LTO tape drives require regular cleaning. This is done by loading a
|
||||
cleaning cartridge into the drive, which is a manual task for
|
||||
standalone drives.
|
||||
|
||||
For tape libraries, cleaning cartridges are identified using special
|
||||
labels starting with letters "CLN". For example, our tape library has a
|
||||
cleaning cartridge inside slot 3::
|
||||
cleaning cartridge inside slot 3:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -715,7 +866,9 @@ cleaning cartridge inside slot 3::
|
||||
│ ... │ ... │ │ │
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
To initiate a cleaning operation simply run::
|
||||
To initiate a cleaning operation simply run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape clean
|
||||
|
||||
@ -723,10 +876,85 @@ This command does the following:
|
||||
|
||||
- find the cleaning tape (in slot 3)
|
||||
|
||||
- unload the current media from the drive (back to slot1)
|
||||
- unload the current media from the drive (back to slot 1)
|
||||
|
||||
- load the cleaning tape into the drive
|
||||
|
||||
- run drive cleaning operation
|
||||
|
||||
- unload the cleaning tape (to slot 3)
|
||||
|
||||
|
||||
|
||||
Configuration Files
|
||||
-------------------
|
||||
|
||||
``media-pool.cfg``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/media-pool/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/media-pool/config.rst
|
||||
|
||||
|
||||
``tape.cfg``
|
||||
~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/tape/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/tape/config.rst
|
||||
|
||||
|
||||
``tape-job.cfg``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/tape-job/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/tape-job/config.rst
|
||||
|
||||
|
||||
|
||||
Command Syntax
|
||||
--------------
|
||||
|
||||
``proxmox-tape``
|
||||
----------------
|
||||
|
||||
.. include:: proxmox-tape/synopsis.rst
|
||||
|
||||
|
||||
``pmt``
|
||||
-------
|
||||
|
||||
.. include:: pmt/options.rst
|
||||
|
||||
....
|
||||
|
||||
.. include:: pmt/synopsis.rst
|
||||
|
||||
|
||||
``pmtx``
|
||||
--------
|
||||
|
||||
.. include:: pmtx/synopsis.rst
|
||||
|
166
docs/technical-overview.rst
Normal file
166
docs/technical-overview.rst
Normal file
@ -0,0 +1,166 @@
|
||||
.. _tech_design_overview:
|
||||
|
||||
Technical Overview
|
||||
==================
|
||||
|
||||
Datastores
|
||||
----------
|
||||
|
||||
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||
stored in the following directory structure:
|
||||
|
||||
<datastore-root>/<type>/<id>/<time>/
|
||||
|
||||
The deduplication of datastores is based on reusing chunks, which are
|
||||
referenced by the indexes in a backup snapshot. This means that multiple
|
||||
indexes can reference the same chunks, reducing the amount of space needed to
|
||||
contain the data (even across backup snapshots).
|
||||
|
||||
Chunks
|
||||
------
|
||||
|
||||
A chunk is some (possibly encrypted) data with a CRC-32 checksum at the end and
|
||||
a type marker at the beginning. It is identified by the SHA-256 checksum of its
|
||||
content.
|
||||
|
||||
To generate such chunks, backup data is split either into fixed-size or
|
||||
dynamically sized chunks. The same content will be hashed to the same checksum.
|
||||
|
||||
The chunks of a datastore are found in
|
||||
|
||||
<datastore-root>/.chunks/
|
||||
|
||||
This chunk directory is further subdivided by the first four byte of the chunks
|
||||
checksum, so the chunk with the checksum
|
||||
|
||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||
|
||||
lives in
|
||||
|
||||
<datastore-root>/.chunks/a342/
|
||||
|
||||
This is done to reduce the number of files per directory, as having many files
|
||||
per directory can be bad for file system performance.
|
||||
|
||||
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
||||
is created.
|
||||
|
||||
Fixed-sized Chunks
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For block based backups (like VMs), fixed-sized chunks are used. The content
|
||||
(disk image), is split into chunks of the same length (typically 4 MiB).
|
||||
|
||||
This works very well for VM images, since the file system on the guest most
|
||||
often tries to allocate files in contiguous pieces, so new files get new
|
||||
blocks, and changing existing files changes only their own blocks.
|
||||
|
||||
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
||||
can track the changed blocks of an image. Since these bitmap are also a
|
||||
representation of the image split into chunks, there is a direct relation
|
||||
between dirty blocks of the image and chunks which need to get uploaded, so
|
||||
only modified chunks of the disk have to be uploaded for a backup.
|
||||
|
||||
Since the image is always split into chunks of the same size, unchanged blocks
|
||||
will result in identical checksums for those chunks, so such chunks do not need
|
||||
to be backed up again. This way storage snapshots are not needed to find the
|
||||
changed blocks.
|
||||
|
||||
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
||||
does not rely on storage snapshots either.
|
||||
|
||||
Dynamically sized Chunks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If one does not want to backup block-based systems but rather file-based
|
||||
systems, using fixed-sized chunks is not a good idea, since every time a file
|
||||
would change in size, the remaining data gets shifted around and this would
|
||||
result in many chunks changing, reducing the amount of deduplication.
|
||||
|
||||
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
||||
instead. Instead of splitting an image into fixed sizes, it first generates a
|
||||
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
||||
over this on-the-fly generated archive to calculate chunk boundaries.
|
||||
|
||||
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
||||
by continuously calculating a checksum while iterating over the data, and on
|
||||
certain conditions it triggers a hash boundary.
|
||||
|
||||
Assuming that most files of the system that is to be backed up have not
|
||||
changed, eventually the algorithm triggers the boundary on the same data as a
|
||||
previous backup, resulting in chunks that can be reused.
|
||||
|
||||
Encrypted Chunks
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Encrypted chunks are a special case. Both fixed- and dynamically sized chunks
|
||||
can be encrypted, and they are handled in a slightly different manner than
|
||||
normal chunks.
|
||||
|
||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||
chunk content, but with the plaintext content concatenated with the encryption
|
||||
key. This way, two chunks of the same data encrypted with different keys
|
||||
generate two different checksums and no collisions occur for multiple
|
||||
encryption keys.
|
||||
|
||||
This is done to speed up the client part of the backup, since it only needs to
|
||||
encrypt chunks that are actually getting uploaded. Chunks that exist already in
|
||||
the previous backup, do not need to be encrypted and uploaded.
|
||||
|
||||
Caveats and Limitations
|
||||
-----------------------
|
||||
|
||||
Notes on hash collisions
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
||||
more) inputs generate the same checksum. For SHA-256, this chance is
|
||||
negligible. To calculate such a collision, one can use the ideas of the
|
||||
'birthday problem' from probability theory. For big numbers, this is actually
|
||||
infeasible to calculate with regular computers, but there is a good
|
||||
approximation:
|
||||
|
||||
.. math::
|
||||
|
||||
p(n, d) = 1 - e^{-n^2/(2d)}
|
||||
|
||||
Where `n` is the number of tries, and `d` is the number of possibilities.
|
||||
For a concrete example lets assume a large datastore of 1 PiB, and an average
|
||||
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
||||
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
||||
will see that the probability of a collision in that scenario is:
|
||||
|
||||
.. math::
|
||||
|
||||
3.1115 * 10^{-61}
|
||||
|
||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||
collission is about the same as winning 13 such lotto games *in a row*.
|
||||
|
||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||
accident in a normal datastore.
|
||||
|
||||
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
||||
an upper limit for how big the chunk are, this is not a problem, since a
|
||||
potential attacker cannot arbitrarily add content to the data beyond that
|
||||
limit.
|
||||
|
||||
File-based Backup
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since dynamically sized chunks (for file-based backups) are created on a custom
|
||||
archive format (pxar) and not over the files directly, there is no relation
|
||||
between files and the chunks. This means that the Proxmox Backup client has to
|
||||
read all files again for every backup, otherwise it would not be possible to
|
||||
generate a consistent independent pxar archive where the original chunks can be
|
||||
reused. Note that there will be still only new or change chunks be uploaded.
|
||||
|
||||
Verification of encrypted chunks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||
available, making it impossible for the server (without the encryption key), to
|
||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
@ -1,3 +1,5 @@
|
||||
.. _terms:
|
||||
|
||||
Terminology
|
||||
===========
|
||||
|
||||
@ -99,6 +101,7 @@ Backup Group
|
||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||
may contain one or more backup snapshots.
|
||||
|
||||
.. _term_backup_snapshot:
|
||||
|
||||
Backup Snapshot
|
||||
---------------
|
||||
|
@ -286,26 +286,26 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
||||
- Datastore.Backup (*)
|
||||
|
||||
.. _user_tfa:
|
||||
|
||||
Two-factor authentication
|
||||
-------------------------
|
||||
|
||||
Introduction
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Simple authentication requires only secret piece of evidence (one factor) that
|
||||
a user can successfully claim a identiy (authenticate), for example, that you
|
||||
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
|
||||
If the password gets stolen, or leaked in another way, anybody can use it to
|
||||
login - even if they should not be allowed to do so.
|
||||
With simple authentication, only a password (single factor) is required to
|
||||
successfully claim an identity (authenticate), for example, to be able to log in
|
||||
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||
the password gets stolen or leaked, anybody can use it to log in - even if they
|
||||
should not be allowed to do so.
|
||||
|
||||
With Two-factor authentication (TFA) a user is asked for an additional factor,
|
||||
to proof his authenticity. The extra factor is different from a password
|
||||
(something only the user knows), it is something only the user has, for example
|
||||
a piece of hardware (security key) or an secret saved on the users smartphone.
|
||||
|
||||
This means that a remote user can never get hold on such a physical object. So,
|
||||
even if that user would know your password they cannot successfully
|
||||
authenticate as you, as your second factor is missing.
|
||||
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||
to verify their authenticity. Rather than relying on something only the user
|
||||
knows (a password), this extra factor requires something only the user has, for
|
||||
example, a piece of hardware (security key) or a secret saved on the user's
|
||||
smartphone. This prevents a remote user from gaining unauthorized access to an
|
||||
account, as even if they have the password, they will not have access to the
|
||||
physical object (second factor).
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
||||
:align: right
|
||||
@ -314,30 +314,33 @@ authenticate as you, as your second factor is missing.
|
||||
Available Second Factors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can setup more than one second factor to avoid that losing your smartphone
|
||||
or security key permanently locks you out from your account.
|
||||
You can set up multiple second factors, in order to avoid a situation in which
|
||||
losing your smartphone or security key locks you out of your account
|
||||
permanently.
|
||||
|
||||
There are three different two-factor authentication methods supported:
|
||||
Proxmox Backup Server supports three different two-factor authentication
|
||||
methods:
|
||||
|
||||
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
||||
A short code derived from a shared secret and the current time, it switches
|
||||
A short code derived from a shared secret and the current time, it changes
|
||||
every 30 seconds.
|
||||
|
||||
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
||||
A general standard for authentication. It is implemented by various security
|
||||
devices like hardware keys or trusted platform modules (TPM) from a computer
|
||||
devices, like hardware keys or trusted platform modules (TPM) from a computer
|
||||
or smart phone.
|
||||
|
||||
* Single use Recovery Keys. A list of keys which should either be printed out
|
||||
and locked in a secure fault or saved digitally in a electronic vault.
|
||||
Each key can be used only once, they are perfect for ensuring you are not
|
||||
locked out even if all of your other second factors are lost or corrupt.
|
||||
and locked in a secure place or saved digitally in an electronic vault.
|
||||
Each key can be used only once. These are perfect for ensuring that you are
|
||||
not locked out, even if all of your other second factors are lost or corrupt.
|
||||
|
||||
|
||||
Setup
|
||||
~~~~~
|
||||
|
||||
.. _user_tfa_setup_totp:
|
||||
|
||||
TOTP
|
||||
^^^^
|
||||
|
||||
@ -345,15 +348,16 @@ TOTP
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
There is not server setup required, simply install a TOTP app on your
|
||||
There is no server setup required. Simply install a TOTP app on your
|
||||
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
||||
Proxmox Backup Server web-interface to add a TOTP factor.
|
||||
|
||||
.. _user_tfa_setup_webauthn:
|
||||
|
||||
WebAuthn
|
||||
^^^^^^^^
|
||||
|
||||
For WebAuthn to work you need to have two things:
|
||||
For WebAuthn to work, you need to have two things:
|
||||
|
||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||
@ -361,10 +365,11 @@ For WebAuthn to work you need to have two things:
|
||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||
|
||||
Once you fullfilled both of those requirements, you can add a WebAuthn
|
||||
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||
configuration in the *Access Control* panel.
|
||||
|
||||
.. _user_tfa_setup_recovery_keys:
|
||||
|
||||
Recovery Keys
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
@ -372,7 +377,7 @@ Recovery Keys
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
Recovery key codes do not need any preparation, you can simply create a set of
|
||||
Recovery key codes do not need any preparation; you can simply create a set of
|
||||
recovery keys in the *Access Control* panel.
|
||||
|
||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||
@ -381,7 +386,7 @@ recovery keys in the *Access Control* panel.
|
||||
TFA and Automated Access
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Two-factor authentication is only implemented for the web-interface, you should
|
||||
Two-factor authentication is only implemented for the web-interface. You should
|
||||
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
||||
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
|
||||
non-interactive ones (for example, adding a Proxmox Backup Server to Proxmox VE
|
||||
as a storage).
|
||||
|
@ -15,19 +15,19 @@ fn extract_acl_node_data(
|
||||
path: &str,
|
||||
list: &mut Vec<AclListItem>,
|
||||
exact: bool,
|
||||
token_user: &Option<Authid>,
|
||||
auth_id_filter: &Option<Authid>,
|
||||
) {
|
||||
// tokens can't have tokens, so we can early return
|
||||
if let Some(token_user) = token_user {
|
||||
if token_user.is_token() {
|
||||
if let Some(auth_id_filter) = auth_id_filter {
|
||||
if auth_id_filter.is_token() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (user, roles) in &node.users {
|
||||
if let Some(token_user) = token_user {
|
||||
if let Some(auth_id_filter) = auth_id_filter {
|
||||
if !user.is_token()
|
||||
|| user.user() != token_user.user() {
|
||||
|| user.user() != auth_id_filter.user() {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -43,7 +43,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
if token_user.is_some() {
|
||||
if auth_id_filter.is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
for (comp, child) in &node.children {
|
||||
let new_path = format!("{}/{}", path, comp);
|
||||
extract_acl_node_data(child, &new_path, list, exact, token_user);
|
||||
extract_acl_node_data(child, &new_path, list, exact, auth_id_filter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
||||
|
||||
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
||||
match {
|
||||
// scope to prevent the temprary iter from borrowing across the whole match
|
||||
// scope to prevent the temporary iter from borrowing across the whole match
|
||||
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
||||
entry.map(|(ty, index, _)| (ty, index))
|
||||
} {
|
||||
@ -240,7 +240,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Get a single TFA entry.
|
||||
/// Delete a single TFA entry.
|
||||
fn delete_tfa(
|
||||
userid: Userid,
|
||||
id: String,
|
||||
@ -259,7 +259,7 @@ fn delete_tfa(
|
||||
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||
|
||||
match {
|
||||
// scope to prevent the temprary iter from borrowing across the whole match
|
||||
// scope to prevent the temporary iter from borrowing across the whole match
|
||||
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
||||
entry.map(|(ty, index, _)| (ty, index))
|
||||
} {
|
||||
|
@ -3,8 +3,6 @@
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -22,19 +20,20 @@ use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::api2::helpers;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::pxar::create_zip;
|
||||
|
||||
use crate::server::{jobstate::Job, WorkerTask};
|
||||
use crate::tools::{
|
||||
self,
|
||||
zip::{ZipEncoder, ZipEntry},
|
||||
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
||||
};
|
||||
|
||||
@ -1294,7 +1293,7 @@ pub fn catalog(
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
@ -1326,112 +1325,14 @@ pub fn catalog(
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
let mut current = catalog_reader.root()?;
|
||||
let mut components = vec![];
|
||||
|
||||
let path = if filepath != "root" && filepath != "/" {
|
||||
base64::decode(filepath)?
|
||||
} else {
|
||||
vec![b'/']
|
||||
};
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if !components.is_empty() && components[0] == b'/' {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == b'/') {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
} else {
|
||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push(b'/');
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
let mut entry = json!({
|
||||
"filepath": path,
|
||||
"text": text,
|
||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||
"leaf": true,
|
||||
});
|
||||
match direntry.attr {
|
||||
DirEntryAttribute::Directory { start: _ } => {
|
||||
entry["leaf"] = false.into();
|
||||
},
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
entry["size"] = size.into();
|
||||
entry["mtime"] = mtime.into();
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
fn recurse_files<'a, T, W>(
|
||||
zip: &'a mut ZipEncoder<W>,
|
||||
decoder: &'a mut Accessor<T>,
|
||||
prefix: &'a Path,
|
||||
file: FileEntry<T>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
let metadata = file.entry().metadata();
|
||||
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(file.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Hardlink(_) => {
|
||||
let realfile = decoder.follow_hardlink(&file).await?;
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(realfile.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
let dir = file.enter_directory().await?;
|
||||
let mut readdir = dir.read_dir();
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
false,
|
||||
);
|
||||
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
||||
while let Some(entry) = readdir.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
recurse_files(zip, decoder, prefix, entry).await?;
|
||||
}
|
||||
}
|
||||
_ => {} // ignore all else
|
||||
};
|
||||
|
||||
Ok(())
|
||||
})
|
||||
helpers::list_dir_content(&mut catalog_reader, &path)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
@ -1509,9 +1410,10 @@ pub fn pxar_file_download(
|
||||
|
||||
let decoder = Accessor::new(reader, archive_size).await?;
|
||||
let root = decoder.open_root().await?;
|
||||
let path = OsStr::from_bytes(file_path).to_os_string();
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
||||
.lookup(&path).await?
|
||||
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
|
||||
|
||||
let body = match file.kind() {
|
||||
EntryKind::File { .. } => Body::wrap_stream(
|
||||
@ -1525,37 +1427,19 @@ pub fn pxar_file_download(
|
||||
.map_err(move |err| {
|
||||
eprintln!(
|
||||
"error during streaming of hardlink '{:?}' - {}",
|
||||
filepath, err
|
||||
path, err
|
||||
);
|
||||
err
|
||||
}),
|
||||
),
|
||||
EntryKind::Directory => {
|
||||
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
||||
let mut prefix = PathBuf::new();
|
||||
let mut components = file.entry().path().components();
|
||||
components.next_back(); // discar last
|
||||
for comp in components {
|
||||
prefix.push(comp);
|
||||
}
|
||||
|
||||
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
||||
|
||||
crate::server::spawn_internal_task(async move {
|
||||
let mut zipencoder = ZipEncoder::new(channelwriter);
|
||||
let mut decoder = decoder;
|
||||
recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during creating of zip: {}", err))?;
|
||||
|
||||
zipencoder
|
||||
.finish()
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
||||
});
|
||||
|
||||
crate::server::spawn_internal_task(
|
||||
create_zip(channelwriter, decoder, path.clone(), false)
|
||||
);
|
||||
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
|
||||
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
||||
eprintln!("error during streaming of zip '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! Datastore Syncronization Job Management
|
||||
//! Datastore Synchronization Job Management
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
@ -7,16 +7,35 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::do_sync_job;
|
||||
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::UPID;
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
use crate::{
|
||||
api2::{
|
||||
types::{
|
||||
DATASTORE_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
pull::do_sync_job,
|
||||
config::sync::{
|
||||
check_sync_job_modify_access,
|
||||
check_sync_job_read_access,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
cached_user_info::CachedUserInfo,
|
||||
sync::{
|
||||
self,
|
||||
SyncJobStatus,
|
||||
SyncJobConfig,
|
||||
},
|
||||
},
|
||||
server::{
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -30,7 +49,7 @@ use crate::tools::systemd::time::{
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobStatus },
|
||||
items: { type: SyncJobStatus },
|
||||
},
|
||||
access: {
|
||||
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
||||
@ -49,48 +68,29 @@ pub fn list_sync_jobs(
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config
|
||||
let job_config_iter = config
|
||||
.convert_to_typed_array("sync")?
|
||||
.into_iter()
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
if let Some(store) = &store {
|
||||
&job.store == store
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
let as_config: SyncJobConfig = job.into();
|
||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||
}).collect();
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
check_sync_job_read_access(&user_info, &auth_id, &job)
|
||||
});
|
||||
|
||||
for job in &mut list {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_config_iter {
|
||||
let last_state = JobState::load("syncjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
list.push(SyncJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
@ -1,24 +1,40 @@
|
||||
//! Datastore Verify Job Management
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::server::do_verification_job;
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
use crate::{
|
||||
api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
server::{
|
||||
do_verification_job,
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
},
|
||||
cached_user_info::CachedUserInfo,
|
||||
verify::{
|
||||
self,
|
||||
VerificationJobConfig,
|
||||
VerificationJobStatus,
|
||||
},
|
||||
},
|
||||
};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::verify;
|
||||
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
||||
use serde_json::Value;
|
||||
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
||||
use crate::server::UPID;
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -52,10 +68,10 @@ pub fn list_verification_jobs(
|
||||
|
||||
let (config, digest) = verify::config()?;
|
||||
|
||||
let mut list: Vec<VerificationJobStatus> = config
|
||||
let job_config_iter = config
|
||||
.convert_to_typed_array("verification")?
|
||||
.into_iter()
|
||||
.filter(|job: &VerificationJobStatus| {
|
||||
.filter(|job: &VerificationJobConfig| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
if privs & required_privs == 0 {
|
||||
return false;
|
||||
@ -66,36 +82,17 @@ pub fn list_verification_jobs(
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}).collect();
|
||||
});
|
||||
|
||||
for job in &mut list {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_config_iter {
|
||||
let last_state = JobState::load("verificationjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
list.push(VerificationJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
@ -268,7 +268,7 @@ async move {
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"blob", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BLOB)
|
||||
|
@ -12,6 +12,7 @@ pub mod drive;
|
||||
pub mod changer;
|
||||
pub mod media_pool;
|
||||
pub mod tape_encryption_keys;
|
||||
pub mod tape_backup_job;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("access", &access::ROUTER),
|
||||
@ -21,6 +22,7 @@ const SUBDIRS: SubdirMap = &[
|
||||
("media-pool", &media_pool::ROUTER),
|
||||
("remote", &remote::ROUTER),
|
||||
("sync", &sync::ROUTER),
|
||||
("tape-backup-job", &tape_backup_job::ROUTER),
|
||||
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
||||
("verify", &verify::ROUTER),
|
||||
];
|
||||
|
@ -5,6 +5,7 @@ use anyhow::Error;
|
||||
|
||||
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
||||
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
||||
use proxmox::api::schema::Updatable;
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
||||
@ -73,9 +74,9 @@ pub fn update_webauthn_config(
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
||||
}
|
||||
webauthn.apply_to(wa);
|
||||
wa.update_from::<&str>(webauthn, &[])?;
|
||||
} else {
|
||||
tfa.webauthn = Some(webauthn.build()?);
|
||||
tfa.webauthn = Some(WebauthnConfig::try_build_from(webauthn)?);
|
||||
}
|
||||
|
||||
tfa::write(&tfa)?;
|
||||
|
@ -6,15 +6,24 @@ use proxmox::api::{
|
||||
api,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
schema::parse_property_string,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
LINUX_DRIVE_PATH_SCHEMA,
|
||||
SCSI_CHANGER_PATH_SCHEMA,
|
||||
SLOT_ARRAY_SCHEMA,
|
||||
EXPORT_SLOT_LIST_SCHEMA,
|
||||
ScsiTapeChanger,
|
||||
@ -34,7 +43,7 @@ use crate::{
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
},
|
||||
"export-slots": {
|
||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||
@ -42,6 +51,9 @@ use crate::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new changer device
|
||||
pub fn create_changer(
|
||||
@ -94,7 +106,9 @@ pub fn create_changer(
|
||||
returns: {
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get tape changer configuration
|
||||
pub fn get_config(
|
||||
@ -123,17 +137,31 @@ pub fn get_config(
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List changers
|
||||
pub fn list_changers(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::drive::config()?;
|
||||
|
||||
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|changer| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &changer.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
@ -156,7 +184,7 @@ pub enum DeletableProperty {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"export-slots": {
|
||||
@ -177,6 +205,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update a tape changer configuration
|
||||
pub fn update_changer(
|
||||
@ -251,6 +282,9 @@ pub fn update_changer(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a tape changer configuration
|
||||
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
||||
|
@ -2,11 +2,19 @@ use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
@ -41,6 +49,9 @@ use crate::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new drive
|
||||
pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||
@ -84,6 +95,9 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||
returns: {
|
||||
type: LinuxTapeDrive,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get drive configuration
|
||||
pub fn get_config(
|
||||
@ -112,17 +126,31 @@ pub fn get_config(
|
||||
type: LinuxTapeDrive,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape drives filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List drives
|
||||
pub fn list_drives(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::drive::config()?;
|
||||
|
||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||
|
||||
let drive_list = drive_list
|
||||
.into_iter()
|
||||
.filter(|drive| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &drive.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(drive_list)
|
||||
@ -173,6 +201,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update a drive configuration
|
||||
pub fn update_drive(
|
||||
@ -246,6 +277,9 @@ pub fn update_drive(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a drive configuration
|
||||
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
||||
|
@ -6,75 +6,61 @@ use proxmox::{
|
||||
api,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api2::types::{
|
||||
Authid,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
},
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
allocation: {
|
||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
retention: {
|
||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
template: {
|
||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encrypt: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
config: {
|
||||
type: MediaPoolConfig,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new media pool
|
||||
pub fn create_pool(
|
||||
name: String,
|
||||
allocation: Option<String>,
|
||||
retention: Option<String>,
|
||||
template: Option<String>,
|
||||
encrypt: Option<String>,
|
||||
config: MediaPoolConfig,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = config::media_pool::lock()?;
|
||||
|
||||
let (mut config, _digest) = config::media_pool::config()?;
|
||||
let (mut section_config, _digest) = config::media_pool::config()?;
|
||||
|
||||
if config.sections.get(&name).is_some() {
|
||||
bail!("Media pool '{}' already exists", name);
|
||||
if section_config.sections.get(&config.name).is_some() {
|
||||
bail!("Media pool '{}' already exists", config.name);
|
||||
}
|
||||
|
||||
let item = MediaPoolConfig {
|
||||
name: name.clone(),
|
||||
allocation,
|
||||
retention,
|
||||
template,
|
||||
encrypt,
|
||||
};
|
||||
section_config.set_data(&config.name, "pool", &config)?;
|
||||
|
||||
config.set_data(&name, "pool", &item)?;
|
||||
|
||||
config::media_pool::save_config(&config)?;
|
||||
config::media_pool::save_config(§ion_config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -87,15 +73,29 @@ pub fn create_pool(
|
||||
type: MediaPoolConfig,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured media pools filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List media pools
|
||||
pub fn list_pools(
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaPoolConfig>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::media_pool::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("pool")?;
|
||||
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|pool| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
@ -113,6 +113,9 @@ pub fn list_pools(
|
||||
returns: {
|
||||
type: MediaPoolConfig,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get media pool configuration
|
||||
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
||||
@ -137,6 +140,8 @@ pub enum DeletableProperty {
|
||||
template,
|
||||
/// Delete encryption fingerprint
|
||||
encrypt,
|
||||
/// Delete comment
|
||||
comment,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -162,6 +167,10 @@ pub enum DeletableProperty {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
@ -172,6 +181,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update media pool settings
|
||||
pub fn update_pool(
|
||||
@ -180,6 +192,7 @@ pub fn update_pool(
|
||||
retention: Option<String>,
|
||||
template: Option<String>,
|
||||
encrypt: Option<String>,
|
||||
comment: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -196,6 +209,7 @@ pub fn update_pool(
|
||||
DeletableProperty::retention => { data.retention = None; },
|
||||
DeletableProperty::template => { data.template = None; },
|
||||
DeletableProperty::encrypt => { data.encrypt = None; },
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -205,6 +219,15 @@ pub fn update_pool(
|
||||
if template.is_some() { data.template = template; }
|
||||
if encrypt.is_some() { data.encrypt = encrypt; }
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
config.set_data(&name, "pool", &data)?;
|
||||
|
||||
config::media_pool::save_config(&config)?;
|
||||
@ -221,6 +244,9 @@ pub fn update_pool(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a media pool configuration
|
||||
pub fn delete_pool(name: String) -> Result<(), Error> {
|
||||
|
341
src/api2/config/tape_backup_job.rs
Normal file
341
src/api2/config/tape_backup_job.rs
Normal file
@ -0,0 +1,341 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::{
|
||||
api2::types::{
|
||||
Authid,
|
||||
Userid,
|
||||
JOB_ID_SCHEMA,
|
||||
DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
tape_job::{
|
||||
TAPE_JOB_CFG_LOCKFILE,
|
||||
TapeBackupJobConfig,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs.",
|
||||
type: Array,
|
||||
items: { type: TapeBackupJobConfig },
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all tape backup jobs
|
||||
pub fn list_tape_backup_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TapeBackupJobConfig>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array::<TapeBackupJobConfig>("backup")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|job| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
job: {
|
||||
type: TapeBackupJobConfig,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new tape backup job.
|
||||
pub fn create_tape_backup_job(
|
||||
job: TapeBackupJobConfig,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, _digest) = config::tape_job::config()?;
|
||||
|
||||
if config.sections.get(&job.id).is_some() {
|
||||
bail!("job '{}' already exists.", job.id);
|
||||
}
|
||||
|
||||
config.set_data(&job.id, "backup", &job)?;
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::create_state_file("tape-backup-job", &job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: { type: TapeBackupJobConfig },
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a tape backup job configuration.
|
||||
pub fn read_tape_backup_job(
|
||||
id: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<TapeBackupJobConfig, Error> {
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let job = config.lookup("backup", &id)?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(job)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
Comment,
|
||||
/// Delete the job schedule.
|
||||
Schedule,
|
||||
/// Delete the eject-media property
|
||||
EjectMedia,
|
||||
/// Delete the export-media-set property
|
||||
ExportMediaSet,
|
||||
/// Delete the 'latest-only' property
|
||||
LatestOnly,
|
||||
/// Delete the 'notify-user' property
|
||||
NotifyUser,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"latest-only": {
|
||||
description: "Backup latest snapshots only.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update the tape backup job
|
||||
pub fn update_tape_backup_job(
|
||||
id: String,
|
||||
store: Option<String>,
|
||||
pool: Option<String>,
|
||||
drive: Option<String>,
|
||||
eject_media: Option<bool>,
|
||||
export_media_set: Option<bool>,
|
||||
latest_only: Option<bool>,
|
||||
notify_user: Option<Userid>,
|
||||
comment: Option<String>,
|
||||
schedule: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||
|
||||
let mut data: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::EjectMedia => { data.setup.eject_media = None; },
|
||||
DeletableProperty::ExportMediaSet => { data.setup.export_media_set = None; },
|
||||
DeletableProperty::LatestOnly => { data.setup.latest_only = None; },
|
||||
DeletableProperty::NotifyUser => { data.setup.notify_user = None; },
|
||||
DeletableProperty::Schedule => { data.schedule = None; },
|
||||
DeletableProperty::Comment => { data.comment = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(store) = store { data.setup.store = store; }
|
||||
if let Some(pool) = pool { data.setup.pool = pool; }
|
||||
if let Some(drive) = drive { data.setup.drive = drive; }
|
||||
|
||||
if eject_media.is_some() { data.setup.eject_media = eject_media; };
|
||||
if export_media_set.is_some() { data.setup.export_media_set = export_media_set; }
|
||||
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
||||
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
||||
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
config.set_data(&id, "backup", &data)?;
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a tape backup job configuration
|
||||
pub fn delete_tape_backup_job(
|
||||
id: String,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.lookup::<TapeBackupJobConfig>("backup", &id) {
|
||||
Ok(_job) => {
|
||||
config.sections.remove(&id);
|
||||
},
|
||||
Err(_) => { bail!("job '{}' does not exist.", id) },
|
||||
};
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::remove_state_file("tape-backup-job", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_TAPE_BACKUP_JOB)
|
||||
.put(&API_METHOD_UPDATE_TAPE_BACKUP_JOB)
|
||||
.delete(&API_METHOD_DELETE_TAPE_BACKUP_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||
.post(&API_METHOD_CREATE_TAPE_BACKUP_JOB)
|
||||
.match_all("id", &ITEM_ROUTER);
|
@ -7,12 +7,17 @@ use proxmox::{
|
||||
ApiMethod,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
},
|
||||
tools::fs::open_file_locked,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
tape_encryption_keys::{
|
||||
TAPE_KEYS_LOCKFILE,
|
||||
load_keys,
|
||||
@ -44,6 +49,9 @@ use crate::{
|
||||
type: Array,
|
||||
items: { type: KeyInfo },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List existing keys
|
||||
pub fn list_keys(
|
||||
@ -93,6 +101,9 @@ pub fn list_keys(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Change the encryption key's password (and password hint).
|
||||
pub fn change_passphrase(
|
||||
@ -108,7 +119,7 @@ pub fn change_passphrase(
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
if let Kdf::None = kdf {
|
||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
||||
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||
}
|
||||
|
||||
let _lock = open_file_locked(
|
||||
@ -161,6 +172,9 @@ pub fn change_passphrase(
|
||||
returns: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new encryption key
|
||||
pub fn create_key(
|
||||
@ -173,7 +187,7 @@ pub fn create_key(
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
if let Kdf::None = kdf {
|
||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
||||
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||
}
|
||||
|
||||
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
||||
@ -198,6 +212,9 @@ pub fn create_key(
|
||||
returns: {
|
||||
type: KeyInfo,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get key config (public key part)
|
||||
pub fn read_key(
|
||||
@ -232,6 +249,9 @@ pub fn read_key(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a encryption key from the database
|
||||
///
|
||||
|
@ -1,3 +1,4 @@
|
||||
use std::io::{Read, Seek};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Error;
|
||||
@ -6,6 +7,9 @@ use hyper::{Body, Response, StatusCode, header};
|
||||
|
||||
use proxmox::http_bail;
|
||||
|
||||
use crate::api2::types::ArchiveEntry;
|
||||
use crate::backup::{CatalogReader, DirEntryAttribute};
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = match tokio::fs::File::open(path.clone()).await {
|
||||
Ok(file) => file,
|
||||
@ -27,3 +31,30 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
||||
|
||||
/// Returns the list of content of the given path
|
||||
pub fn list_dir_content<R: Read + Seek>(
|
||||
reader: &mut CatalogReader<R>,
|
||||
path: &[u8],
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let dir = reader.lookup_recursive(path)?;
|
||||
let mut res = vec![];
|
||||
let mut path = path.to_vec();
|
||||
if !path.is_empty() && path[0] == b'/' {
|
||||
path.remove(0);
|
||||
}
|
||||
|
||||
for direntry in reader.read_dir(&dir)? {
|
||||
let mut components = path.clone();
|
||||
components.push(b'/');
|
||||
components.extend(&direntry.name);
|
||||
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
|
||||
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
||||
entry.size = size.into();
|
||||
entry.mtime = mtime.into();
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||
},
|
||||
notify: {
|
||||
type: bool,
|
||||
description: r#"Send notification mail about new package updates availanle to the
|
||||
description: r#"Send notification mail about new package updates available to the
|
||||
email address configured for 'root@pam')."#,
|
||||
default: false,
|
||||
optional: true,
|
||||
|
@ -7,19 +7,61 @@ use hyper::http::request::Parts;
|
||||
use hyper::{Body, Response, Request, StatusCode};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::http_err;
|
||||
use proxmox::{
|
||||
http_err,
|
||||
sortable,
|
||||
identity,
|
||||
list_subdirs_api_method,
|
||||
api::{
|
||||
ApiResponseFuture,
|
||||
ApiHandler,
|
||||
ApiMethod,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
router::SubdirMap,
|
||||
schema::{
|
||||
ObjectSchema,
|
||||
BooleanSchema,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
use crate::tools::fs::lock_dir_noblock_shared;
|
||||
use crate::{
|
||||
api2::{
|
||||
helpers,
|
||||
types::{
|
||||
DATASTORE_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA,
|
||||
BACKUP_TIME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA,
|
||||
CHUNK_DIGEST_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
DataStore,
|
||||
ArchiveType,
|
||||
BackupDir,
|
||||
IndexFile,
|
||||
archive_type,
|
||||
},
|
||||
server::{
|
||||
WorkerTask,
|
||||
H2Service,
|
||||
},
|
||||
tools::{
|
||||
self,
|
||||
fs::lock_dir_noblock_shared,
|
||||
},
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_DATASTORE_READ,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
},
|
||||
cached_user_info::CachedUserInfo,
|
||||
},
|
||||
};
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -171,21 +213,24 @@ fn upgrade_to_backup_reader_protocol(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
const READER_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"chunk", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
||||
),
|
||||
(
|
||||
"download", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.download(&API_METHOD_SPEEDTEST)
|
||||
),
|
||||
];
|
||||
|
||||
pub const READER_API_ROUTER: Router = Router::new()
|
||||
.subdirs(&[
|
||||
(
|
||||
"chunk", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
||||
),
|
||||
(
|
||||
"download", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.download(&API_METHOD_SPEEDTEST)
|
||||
),
|
||||
]);
|
||||
.get(&list_subdirs_api_method!(READER_API_SUBDIRS))
|
||||
.subdirs(READER_API_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||
@ -216,7 +261,7 @@ fn download_file(
|
||||
path.push(&file_name);
|
||||
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
|
||||
let index: Option<Box<dyn IndexFile + Send>> = match archive_type(&file_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
|
@ -160,12 +160,11 @@ pub fn datastore_status(
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{
|
||||
@ -10,27 +10,47 @@ use proxmox::{
|
||||
RpcEnvironment,
|
||||
RpcEnvironmentType,
|
||||
Router,
|
||||
Permission,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
task_log,
|
||||
task_warn,
|
||||
config::{
|
||||
self,
|
||||
drive::check_drive_exists,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_DATASTORE_READ,
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_WRITE,
|
||||
},
|
||||
tape_job::{
|
||||
TapeBackupJobConfig,
|
||||
TapeBackupJobSetup,
|
||||
TapeBackupJobStatus,
|
||||
},
|
||||
},
|
||||
server::{
|
||||
lookup_user_email,
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
DataStore,
|
||||
BackupDir,
|
||||
BackupInfo,
|
||||
StoreProgress,
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
DATASTORE_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
UPID_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
Userid,
|
||||
},
|
||||
server::WorkerTask,
|
||||
task::TaskState,
|
||||
@ -40,90 +60,320 @@ use crate::{
|
||||
PoolWriter,
|
||||
MediaPool,
|
||||
SnapshotReader,
|
||||
drive::media_changer,
|
||||
drive::{
|
||||
media_changer,
|
||||
lock_tape_device,
|
||||
set_tape_device_state,
|
||||
},
|
||||
changer::update_changer_online_status,
|
||||
},
|
||||
};
|
||||
|
||||
const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||
.post(&API_METHOD_BACKUP)
|
||||
.match_all("id", &TAPE_BACKUP_JOB_ROUTER);
|
||||
|
||||
fn check_backup_permission(
|
||||
auth_id: &Authid,
|
||||
store: &str,
|
||||
pool: &str,
|
||||
drive: &str,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||
if (privs & PRIV_DATASTORE_READ) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
|
||||
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||
bail!("no permissions on /tape/drive/{}", drive);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
|
||||
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||
bail!("no permissions on /tape/pool/{}", pool);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
returns: {
|
||||
description: "List configured thape backup jobs and their status",
|
||||
type: Array,
|
||||
items: { type: TapeBackupJobStatus },
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all tape backup jobs
|
||||
pub fn list_tape_backup_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TapeBackupJobStatus>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let job_list_iter = config
|
||||
.convert_to_typed_array("backup")?
|
||||
.into_iter()
|
||||
.filter(|_job: &TapeBackupJobConfig| {
|
||||
// fixme: check access permission
|
||||
true
|
||||
});
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_list_iter {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let last_state = JobState::load("tape-backup-job", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
list.push(TapeBackupJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn do_tape_backup_job(
|
||||
mut job: Job,
|
||||
setup: TapeBackupJobSetup,
|
||||
auth_id: &Authid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let job_id = format!("{}:{}:{}:{}",
|
||||
setup.store,
|
||||
setup.pool,
|
||||
setup.drive,
|
||||
job.jobname());
|
||||
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
&worker_type,
|
||||
Some(job_id.clone()),
|
||||
auth_id.clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let job_result = backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
);
|
||||
|
||||
let status = worker.create_state(&job_result);
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_tape_backup_status(
|
||||
&email,
|
||||
Some(job.jobname()),
|
||||
&setup,
|
||||
&job_result,
|
||||
) {
|
||||
eprintln!("send tape backup notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(err) = set_tape_device_state(&setup.drive, "") {
|
||||
eprintln!(
|
||||
"could not unset drive state for {}: {}",
|
||||
setup.drive,
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
job_result
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
// Note: parameters are from job config, so we need to test inside function body
|
||||
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Runs a tape backup job manually.
|
||||
pub fn run_tape_backup_job(
|
||||
id: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let (config, _digest) = config::tape_job::config()?;
|
||||
let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||
|
||||
check_backup_permission(
|
||||
&auth_id,
|
||||
&backup_job.setup.store,
|
||||
&backup_job.setup.pool,
|
||||
&backup_job.setup.drive,
|
||||
)?;
|
||||
|
||||
let job = Job::new("tape-backup-job", &id)?;
|
||||
|
||||
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
setup: {
|
||||
type: TapeBackupJobSetup,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
// Note: parameters are no uri parameter, so we need to test inside function body
|
||||
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Backup datastore to tape media pool
|
||||
pub fn backup(
|
||||
store: String,
|
||||
pool: String,
|
||||
drive: String,
|
||||
eject_media: Option<bool>,
|
||||
export_media_set: Option<bool>,
|
||||
setup: TapeBackupJobSetup,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
check_backup_permission(
|
||||
&auth_id,
|
||||
&setup.store,
|
||||
&setup.pool,
|
||||
&setup.drive,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &drive)?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let eject_media = eject_media.unwrap_or(false);
|
||||
let export_media_set = export_media_set.unwrap_or(false);
|
||||
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"tape-backup",
|
||||
Some(store),
|
||||
Some(job_id),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
|
||||
Ok(())
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
let job_result = backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
);
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_tape_backup_status(
|
||||
&email,
|
||||
None,
|
||||
&setup,
|
||||
&job_result,
|
||||
) {
|
||||
eprintln!("send tape backup notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// ignore errors
|
||||
let _ = set_tape_device_state(&setup.drive, "");
|
||||
job_result
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_BACKUP);
|
||||
|
||||
|
||||
fn backup_worker(
|
||||
worker: &WorkerTask,
|
||||
datastore: Arc<DataStore>,
|
||||
drive: &str,
|
||||
pool_config: &MediaPoolConfig,
|
||||
eject_media: bool,
|
||||
export_media_set: bool,
|
||||
setup: &TapeBackupJobSetup,
|
||||
email: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
@ -131,54 +381,98 @@ fn backup_worker(
|
||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
||||
|
||||
task_log!(worker, "update media online status");
|
||||
let has_changer = update_media_online_status(drive)?;
|
||||
let changer_name = update_media_online_status(&setup.drive)?;
|
||||
|
||||
let use_offline_media = !has_changer;
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
||||
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
|
||||
|
||||
let mut pool_writer = PoolWriter::new(pool, drive)?;
|
||||
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
||||
|
||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||
|
||||
group_list.sort_unstable();
|
||||
|
||||
for group in group_list {
|
||||
let group_count = group_list.len();
|
||||
task_log!(worker, "found {} groups", group_count);
|
||||
|
||||
let mut progress = StoreProgress::new(group_count as u64);
|
||||
|
||||
let latest_only = setup.latest_only.unwrap_or(false);
|
||||
|
||||
if latest_only {
|
||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||
}
|
||||
|
||||
let mut errors = false;
|
||||
|
||||
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||
progress.done_groups = group_number as u64;
|
||||
progress.done_snapshots = 0;
|
||||
progress.group_snapshots = 0;
|
||||
|
||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||
|
||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||
|
||||
for info in snapshot_list {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
continue;
|
||||
if latest_only {
|
||||
progress.group_snapshots = 1;
|
||||
if let Some(info) = snapshot_list.pop() {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||
continue;
|
||||
}
|
||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||
errors = true;
|
||||
}
|
||||
progress.done_snapshots = 1;
|
||||
task_log!(
|
||||
worker,
|
||||
"percentage done: {}",
|
||||
progress
|
||||
);
|
||||
}
|
||||
} else {
|
||||
progress.group_snapshots = snapshot_list.len() as u64;
|
||||
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||
continue;
|
||||
}
|
||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||
errors = true;
|
||||
}
|
||||
progress.done_snapshots = snapshot_number as u64 + 1;
|
||||
task_log!(
|
||||
worker,
|
||||
"percentage done: {}",
|
||||
progress
|
||||
);
|
||||
}
|
||||
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
||||
}
|
||||
}
|
||||
|
||||
pool_writer.commit()?;
|
||||
|
||||
if export_media_set {
|
||||
if setup.export_media_set.unwrap_or(false) {
|
||||
pool_writer.export_media_set(worker)?;
|
||||
} else if eject_media {
|
||||
} else if setup.eject_media.unwrap_or(false) {
|
||||
pool_writer.eject_media(worker)?;
|
||||
}
|
||||
|
||||
if errors {
|
||||
bail!("Tape backup finished with some errors. Please check the task log.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Try to update the the media online status
|
||||
fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
||||
fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let mut has_changer = false;
|
||||
|
||||
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
||||
|
||||
has_changer = true;
|
||||
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
@ -189,9 +483,11 @@ fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
||||
&changer_name,
|
||||
&label_text_list,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(has_changer)
|
||||
Ok(Some(changer_name))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backup_snapshot(
|
||||
@ -199,11 +495,18 @@ pub fn backup_snapshot(
|
||||
pool_writer: &mut PoolWriter,
|
||||
datastore: Arc<DataStore>,
|
||||
snapshot: BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
|
||||
task_log!(worker, "backup snapshot {}", snapshot);
|
||||
|
||||
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
|
||||
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
|
||||
Ok(reader) => reader,
|
||||
Err(err) => {
|
||||
// ignore missing snapshots and continue
|
||||
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
||||
|
||||
@ -250,5 +553,5 @@ pub fn backup_snapshot(
|
||||
|
||||
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
||||
|
||||
Ok(())
|
||||
Ok(true)
|
||||
}
|
||||
|
@ -1,16 +1,26 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router, SubdirMap};
|
||||
use proxmox::api::{api, Router, SubdirMap, RpcEnvironment, Permission};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_READ,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
ChangerListEntry,
|
||||
LinuxTapeDrive,
|
||||
MtxEntryKind,
|
||||
MtxStatusEntry,
|
||||
ScsiTapeChanger,
|
||||
@ -25,6 +35,7 @@ use crate::{
|
||||
ScsiMediaChange,
|
||||
mtx_status_to_online_set,
|
||||
},
|
||||
drive::get_tape_device_state,
|
||||
lookup_device_identification,
|
||||
},
|
||||
};
|
||||
@ -36,6 +47,11 @@ use crate::{
|
||||
name: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
cache: {
|
||||
description: "Use cached value.",
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -45,16 +61,22 @@ use crate::{
|
||||
type: MtxStatusEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get tape changer status
|
||||
pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
pub async fn get_status(
|
||||
name: String,
|
||||
cache: bool,
|
||||
) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||
|
||||
let status = tokio::task::spawn_blocking(move || {
|
||||
changer_config.status()
|
||||
changer_config.status(cache)
|
||||
}).await??;
|
||||
|
||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||
@ -66,9 +88,26 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
|
||||
inventory.update_online_status(&map)?;
|
||||
|
||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
||||
|
||||
for drive in drive_list {
|
||||
if let Some(changer) = drive.changer {
|
||||
if changer != name {
|
||||
continue;
|
||||
}
|
||||
let num = drive.changer_drivenum.unwrap_or(0);
|
||||
drive_map.insert(num, drive.name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||
let mut state = None;
|
||||
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||
state = get_tape_device_state(&config, &drive)?;
|
||||
}
|
||||
let entry = MtxStatusEntry {
|
||||
entry_kind: MtxEntryKind::Drive,
|
||||
entry_id: id as u64,
|
||||
@ -78,6 +117,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||
},
|
||||
loaded_slot: drive_status.loaded_slot,
|
||||
state,
|
||||
};
|
||||
list.push(entry);
|
||||
}
|
||||
@ -96,6 +136,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||
},
|
||||
loaded_slot: None,
|
||||
state: None,
|
||||
};
|
||||
list.push(entry);
|
||||
}
|
||||
@ -119,6 +160,9 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Transfers media from one slot to another
|
||||
pub async fn transfer(
|
||||
@ -132,7 +176,8 @@ pub async fn transfer(
|
||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
changer_config.transfer(from, to)
|
||||
changer_config.transfer(from, to)?;
|
||||
Ok(())
|
||||
}).await?
|
||||
}
|
||||
|
||||
@ -147,11 +192,18 @@ pub async fn transfer(
|
||||
type: ChangerListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List changers
|
||||
pub fn list_changers(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<ChangerListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -162,6 +214,11 @@ pub fn list_changers(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for changer in changer_list {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "changer", &changer.name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let info = lookup_device_identification(&linux_changers, &changer.path);
|
||||
let entry = ChangerListEntry { config: changer, info };
|
||||
list.push(entry);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,7 +4,7 @@ use anyhow::{bail, format_err, Error};
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
use proxmox::{
|
||||
api::{api, Router, SubdirMap},
|
||||
api::{api, Router, SubdirMap, RpcEnvironment, Permission},
|
||||
list_subdirs_api_method,
|
||||
tools::Uuid,
|
||||
};
|
||||
@ -12,18 +12,25 @@ use proxmox::{
|
||||
use crate::{
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
BACKUP_ID_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
MEDIA_LABEL_SCHEMA,
|
||||
MEDIA_UUID_SCHEMA,
|
||||
MEDIA_SET_UUID_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
MediaListEntry,
|
||||
MediaStatus,
|
||||
MediaContentEntry,
|
||||
VAULT_NAME_SCHEMA,
|
||||
},
|
||||
backup::{
|
||||
BackupDir,
|
||||
@ -44,6 +51,16 @@ use crate::{
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"update-status": {
|
||||
description: "Try to update tape library status (check what tapes are online).",
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
"update-status-changer": {
|
||||
// only update status for a single changer
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -53,19 +70,32 @@ use crate::{
|
||||
type: MediaListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List of registered backup media filtered by Tape.Audit privileges on pool",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List pool media
|
||||
pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Error> {
|
||||
pub async fn list_media(
|
||||
pool: Option<String>,
|
||||
update_status: bool,
|
||||
update_status_changer: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
|
||||
let catalogs = tokio::task::spawn_blocking(move || {
|
||||
// update online media status
|
||||
if let Err(err) = update_online_status(status_path) {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("update online media status failed - using old state");
|
||||
if update_status {
|
||||
// update online media status
|
||||
if let Err(err) = update_online_status(status_path, update_status_changer.as_deref()) {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("update online media status failed - using old state");
|
||||
}
|
||||
}
|
||||
// test what catalog files we have
|
||||
MediaCatalog::media_with_catalogs(status_path)
|
||||
@ -84,13 +114,23 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
|
||||
}
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", pool_name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||
|
||||
let use_offline_media = true; // does not matter here
|
||||
let pool = MediaPool::with_config(status_path, &config, use_offline_media)?;
|
||||
let changer_name = None; // assume standalone drive
|
||||
let mut pool = MediaPool::with_config(status_path, &config, changer_name)?;
|
||||
|
||||
let current_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
// Call start_write_session, so that we show the same status a
|
||||
// backup job would see.
|
||||
pool.force_media_availability();
|
||||
pool.start_write_session(current_time)?;
|
||||
|
||||
for media in pool.list_media() {
|
||||
let expired = pool.media_is_expired(&media, current_time);
|
||||
|
||||
@ -130,38 +170,118 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
|
||||
}
|
||||
}
|
||||
|
||||
if pool.is_none() {
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]);
|
||||
if (privs & PRIV_TAPE_AUDIT) != 0 {
|
||||
if pool.is_none() {
|
||||
|
||||
for media_id in inventory.list_unassigned_media() {
|
||||
for media_id in inventory.list_unassigned_media() {
|
||||
|
||||
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
||||
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
||||
|
||||
if status == MediaStatus::Unknown {
|
||||
status = MediaStatus::Writable;
|
||||
if status == MediaStatus::Unknown {
|
||||
status = MediaStatus::Writable;
|
||||
}
|
||||
|
||||
list.push(MediaListEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
ctime: media_id.label.ctime,
|
||||
label_text: media_id.label.label_text.to_string(),
|
||||
location,
|
||||
status,
|
||||
catalog: true, // empty, so we do not need a catalog
|
||||
expired: false,
|
||||
media_set_uuid: None,
|
||||
media_set_name: None,
|
||||
media_set_ctime: None,
|
||||
seq_nr: None,
|
||||
pool: None,
|
||||
});
|
||||
}
|
||||
|
||||
list.push(MediaListEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
ctime: media_id.label.ctime,
|
||||
label_text: media_id.label.label_text.to_string(),
|
||||
location,
|
||||
status,
|
||||
catalog: true, // empty, so we do not need a catalog
|
||||
expired: false,
|
||||
media_set_uuid: None,
|
||||
media_set_name: None,
|
||||
media_set_ctime: None,
|
||||
seq_nr: None,
|
||||
pool: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// add media with missing pool configuration
|
||||
// set status to MediaStatus::Unknown
|
||||
for uuid in inventory.media_list() {
|
||||
let media_id = inventory.lookup_media(uuid).unwrap();
|
||||
let media_set_label = match media_id.media_set_label {
|
||||
Some(ref set) => set,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
if config.sections.get(&media_set_label.pool).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &media_set_label.pool]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (_status, location) = inventory.status_and_location(uuid);
|
||||
|
||||
let media_set_name = inventory.generate_media_set_name(&media_set_label.uuid, None)?;
|
||||
|
||||
list.push(MediaListEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
label_text: media_id.label.label_text.clone(),
|
||||
ctime: media_id.label.ctime,
|
||||
pool: Some(media_set_label.pool.clone()),
|
||||
location,
|
||||
status: MediaStatus::Unknown,
|
||||
catalog: catalogs.contains(uuid),
|
||||
expired: false,
|
||||
media_set_ctime: Some(media_set_label.ctime),
|
||||
media_set_uuid: Some(media_set_label.uuid.clone()),
|
||||
media_set_name: Some(media_set_name),
|
||||
seq_nr: Some(media_set_label.seq_nr),
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
},
|
||||
"vault-name": {
|
||||
schema: VAULT_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Change Tape location to vault (if given), or offline.
|
||||
pub fn move_tape(
|
||||
label_text: String,
|
||||
vault_name: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
|
||||
let uuid = inventory.find_media_by_label_text(&label_text)
|
||||
.ok_or_else(|| format_err!("no such media '{}'", label_text))?
|
||||
.label
|
||||
.uuid
|
||||
.clone();
|
||||
|
||||
if let Some(vault_name) = vault_name {
|
||||
inventory.set_media_location_vault(&uuid, &vault_name)?;
|
||||
} else {
|
||||
inventory.set_media_location_offline(&uuid)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -259,11 +379,18 @@ pub struct MediaContentListFilter {
|
||||
type: MediaContentEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List content filtered by Tape.Audit privilege on pool",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List media content
|
||||
pub fn list_content(
|
||||
filter: MediaContentListFilter,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaContentEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
|
||||
@ -283,6 +410,11 @@ pub fn list_content(
|
||||
if &set.pool != pool { continue; }
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &set.pool]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ref media_uuid) = filter.media {
|
||||
if &media_id.label.uuid != media_uuid { continue; }
|
||||
}
|
||||
@ -291,10 +423,13 @@ pub fn list_content(
|
||||
if &set.uuid != media_set_uuid { continue; }
|
||||
}
|
||||
|
||||
let config: MediaPoolConfig = config.lookup("pool", &set.pool)?;
|
||||
let template = match config.lookup::<MediaPoolConfig>("pool", &set.pool) {
|
||||
Ok(pool_config) => pool_config.template.clone(),
|
||||
_ => None, // simply use default if there is no pool config
|
||||
};
|
||||
|
||||
let media_set_name = inventory
|
||||
.generate_media_set_name(&set.uuid, config.template.clone())
|
||||
.generate_media_set_name(&set.uuid, template)
|
||||
.unwrap_or_else(|_| set.uuid.to_string());
|
||||
|
||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
||||
@ -326,6 +461,76 @@ pub fn list_content(
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Get current media status
|
||||
pub fn get_media_status(uuid: Uuid) -> Result<MediaStatus, Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
|
||||
let (status, _location) = inventory.status_and_location(&uuid);
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
status: {
|
||||
type: MediaStatus,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Update media status (None, 'full', 'damaged' or 'retired')
|
||||
///
|
||||
/// It is not allowed to set status to 'writable' or 'unknown' (those
|
||||
/// are internally managed states).
|
||||
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
|
||||
match status {
|
||||
None => inventory.clear_media_status(&uuid)?,
|
||||
Some(MediaStatus::Retired) => inventory.set_media_status_retired(&uuid)?,
|
||||
Some(MediaStatus::Damaged) => inventory.set_media_status_damaged(&uuid)?,
|
||||
Some(MediaStatus::Full) => inventory.set_media_status_full(&uuid)?,
|
||||
Some(status) => bail!("setting media status '{:?}' is not allowed", status),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const MEDIA_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"status",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_GET_MEDIA_STATUS)
|
||||
.post(&API_METHOD_UPDATE_MEDIA_STATUS)
|
||||
),
|
||||
];
|
||||
|
||||
pub const MEDIA_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(MEDIA_SUBDIRS))
|
||||
.subdirs(MEDIA_SUBDIRS);
|
||||
|
||||
pub const MEDIA_LIST_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_MEDIA)
|
||||
.match_all("uuid", &MEDIA_ROUTER);
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"content",
|
||||
@ -337,10 +542,11 @@ const SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.get(&API_METHOD_DESTROY_MEDIA)
|
||||
),
|
||||
( "list", &MEDIA_LIST_ROUTER ),
|
||||
(
|
||||
"list",
|
||||
"move",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_MEDIA)
|
||||
.post(&API_METHOD_MOVE_TAPE)
|
||||
),
|
||||
];
|
||||
|
||||
|
@ -11,6 +11,7 @@ use proxmox::{
|
||||
RpcEnvironment,
|
||||
RpcEnvironmentType,
|
||||
Router,
|
||||
Permission,
|
||||
section_config::SectionConfigData,
|
||||
},
|
||||
tools::{
|
||||
@ -24,17 +25,23 @@ use proxmox::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
task_log,
|
||||
task::TaskState,
|
||||
tools::compute_file_csum,
|
||||
api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
UPID_SCHEMA,
|
||||
Authid,
|
||||
MediaPoolConfig,
|
||||
Userid,
|
||||
},
|
||||
config::{
|
||||
self,
|
||||
drive::check_drive_exists,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
PRIV_TAPE_READ,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
archive_type,
|
||||
@ -49,13 +56,15 @@ use crate::{
|
||||
DynamicIndexReader,
|
||||
FixedIndexReader,
|
||||
},
|
||||
server::WorkerTask,
|
||||
server::{
|
||||
lookup_user_email,
|
||||
WorkerTask,
|
||||
},
|
||||
tape::{
|
||||
TAPE_STATUS_DIR,
|
||||
TapeRead,
|
||||
MediaId,
|
||||
MediaCatalog,
|
||||
ChunkArchiveDecoder,
|
||||
MediaPool,
|
||||
Inventory,
|
||||
file_formats::{
|
||||
@ -65,18 +74,20 @@ use crate::{
|
||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||
MediaContentHeader,
|
||||
ChunkArchiveDecoder,
|
||||
},
|
||||
drive::{
|
||||
TapeDriver,
|
||||
request_and_load_media,
|
||||
}
|
||||
lock_tape_device,
|
||||
set_tape_device_state,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_RESTORE);
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -90,23 +101,43 @@ pub const ROUTER: Router = Router::new()
|
||||
description: "Media set UUID.",
|
||||
type: String,
|
||||
},
|
||||
"notify-user": {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
// Note: parameters are no uri parameter, so we need to test inside function body
|
||||
description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Restore data from media-set
|
||||
pub fn restore(
|
||||
store: String,
|
||||
drive: String,
|
||||
media_set: String,
|
||||
notify_user: Option<Userid>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||
if (privs & PRIV_TAPE_READ) == 0 {
|
||||
bail!("no permissions on /tape/drive/{}", drive);
|
||||
}
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
@ -115,13 +146,17 @@ pub fn restore(
|
||||
|
||||
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
|
||||
|
||||
// check if pool exists
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let _pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
|
||||
if (privs & PRIV_TAPE_READ) == 0 {
|
||||
bail!("no permissions on /tape/pool/{}", pool);
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &drive)?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &drive)?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
@ -131,6 +166,9 @@ pub fn restore(
|
||||
auth_id.clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
|
||||
set_tape_device_state(&drive, &worker.upid().to_string())?;
|
||||
|
||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
||||
|
||||
@ -159,20 +197,21 @@ pub fn restore(
|
||||
}
|
||||
}
|
||||
|
||||
worker.log(format!("Restore mediaset '{}'", media_set));
|
||||
task_log!(worker, "Restore mediaset '{}'", media_set);
|
||||
if let Some(fingerprint) = encryption_key_fingerprint {
|
||||
worker.log(format!("Encryption key fingerprint: {}", fingerprint));
|
||||
task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
|
||||
}
|
||||
worker.log(format!("Pool: {}", pool));
|
||||
worker.log(format!("Datastore: {}", store));
|
||||
worker.log(format!("Drive: {}", drive));
|
||||
worker.log(format!(
|
||||
task_log!(worker, "Pool: {}", pool);
|
||||
task_log!(worker, "Datastore: {}", store);
|
||||
task_log!(worker, "Drive: {}", drive);
|
||||
task_log!(
|
||||
worker,
|
||||
"Required media list: {}",
|
||||
media_id_list.iter()
|
||||
.map(|media_id| media_id.label.label_text.as_str())
|
||||
.collect::<Vec<&str>>()
|
||||
.join(";")
|
||||
));
|
||||
);
|
||||
|
||||
for media_id in media_id_list.iter() {
|
||||
request_and_restore_media(
|
||||
@ -182,10 +221,21 @@ pub fn restore(
|
||||
&drive,
|
||||
&datastore,
|
||||
&auth_id,
|
||||
¬ify_user,
|
||||
)?;
|
||||
}
|
||||
|
||||
worker.log(format!("Restore mediaset '{}' done", media_set));
|
||||
task_log!(worker, "Restore mediaset '{}' done", media_set);
|
||||
|
||||
if let Err(err) = set_tape_device_state(&drive, "") {
|
||||
task_log!(
|
||||
worker,
|
||||
"could not unset drive state for {}: {}",
|
||||
drive,
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
)?;
|
||||
@ -201,6 +251,7 @@ pub fn request_and_restore_media(
|
||||
drive_name: &str,
|
||||
datastore: &DataStore,
|
||||
authid: &Authid,
|
||||
notify_user: &Option<Userid>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let media_set_uuid = match media_id.media_set_label {
|
||||
@ -208,7 +259,12 @@ pub fn request_and_restore_media(
|
||||
Some(ref set) => &set.uuid,
|
||||
};
|
||||
|
||||
let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
|
||||
let email = notify_user
|
||||
.as_ref()
|
||||
.and_then(|userid| lookup_user_email(userid))
|
||||
.or_else(|| lookup_user_email(&authid.clone().into()));
|
||||
|
||||
let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
|
||||
|
||||
match info.media_set_label {
|
||||
None => {
|
||||
@ -249,7 +305,7 @@ pub fn restore_media(
|
||||
let current_file_number = drive.current_file_number()?;
|
||||
let reader = match drive.read_next_file()? {
|
||||
None => {
|
||||
worker.log(format!("detected EOT after {} files", current_file_number));
|
||||
task_log!(worker, "detected EOT after {} files", current_file_number);
|
||||
break;
|
||||
}
|
||||
Some(reader) => reader,
|
||||
@ -287,7 +343,7 @@ fn restore_archive<'a>(
|
||||
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
||||
let snapshot = std::str::from_utf8(&snapshot)
|
||||
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
||||
worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot));
|
||||
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
|
||||
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
@ -303,16 +359,16 @@ fn restore_archive<'a>(
|
||||
path.push(rel_path);
|
||||
|
||||
if is_new {
|
||||
worker.log(format!("restore snapshot {}", backup_dir));
|
||||
task_log!(worker, "restore snapshot {}", backup_dir);
|
||||
|
||||
match restore_snapshot_archive(reader, &path) {
|
||||
match restore_snapshot_archive(worker, reader, &path) {
|
||||
Err(err) => {
|
||||
std::fs::remove_dir_all(&path)?;
|
||||
bail!("restore snapshot {} failed - {}", backup_dir, err);
|
||||
}
|
||||
Ok(false) => {
|
||||
std::fs::remove_dir_all(&path)?;
|
||||
worker.log(format!("skip incomplete snapshot {}", backup_dir));
|
||||
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
||||
}
|
||||
Ok(true) => {
|
||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
||||
@ -331,7 +387,7 @@ fn restore_archive<'a>(
|
||||
}
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||
|
||||
worker.log(format!("Found chunk archive: {}", current_file_number));
|
||||
task_log!(worker, "Found chunk archive: {}", current_file_number);
|
||||
let datastore = target.as_ref().map(|t| t.0);
|
||||
|
||||
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
||||
@ -339,7 +395,7 @@ fn restore_archive<'a>(
|
||||
for digest in chunks.iter() {
|
||||
catalog.register_chunk(&digest)?;
|
||||
}
|
||||
worker.log(format!("register {} chunks", chunks.len()));
|
||||
task_log!(worker, "register {} chunks", chunks.len());
|
||||
catalog.end_chunk_archive()?;
|
||||
catalog.commit_if_large()?;
|
||||
}
|
||||
@ -365,6 +421,9 @@ fn restore_chunk_archive<'a>(
|
||||
|
||||
let result: Result<_, Error> = proxmox::try_block!({
|
||||
while let Some((digest, blob)) = decoder.next_chunk()? {
|
||||
|
||||
worker.check_abort()?;
|
||||
|
||||
if let Some(datastore) = datastore {
|
||||
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||
if !chunk_exists {
|
||||
@ -374,14 +433,14 @@ fn restore_chunk_archive<'a>(
|
||||
blob.decode(None, Some(&digest))?; // verify digest
|
||||
}
|
||||
if verbose {
|
||||
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||
}
|
||||
datastore.insert_chunk(&blob, &digest)?;
|
||||
} else if verbose {
|
||||
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||
}
|
||||
} else if verbose {
|
||||
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||
}
|
||||
chunks.push(digest);
|
||||
}
|
||||
@ -411,12 +470,13 @@ fn restore_chunk_archive<'a>(
|
||||
}
|
||||
|
||||
fn restore_snapshot_archive<'a>(
|
||||
worker: &WorkerTask,
|
||||
reader: Box<dyn 'a + TapeRead>,
|
||||
snapshot_path: &Path,
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
||||
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
|
||||
match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
|
||||
Ok(()) => Ok(true),
|
||||
Err(err) => {
|
||||
let reader = decoder.input();
|
||||
@ -438,6 +498,7 @@ fn restore_snapshot_archive<'a>(
|
||||
}
|
||||
|
||||
fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
||||
worker: &WorkerTask,
|
||||
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
||||
snapshot_path: &Path,
|
||||
) -> Result<(), Error> {
|
||||
@ -460,6 +521,8 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
||||
let mut manifest = None;
|
||||
|
||||
loop {
|
||||
worker.check_abort()?;
|
||||
|
||||
let entry = match decoder.next() {
|
||||
None => break,
|
||||
Some(entry) => entry?,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user