Compare commits
321 Commits
Author | SHA1 | Date | |
---|---|---|---|
1825c1a9b7 | |||
9a8bf2cac9 | |||
cc5ef79bec | |||
3725d95c65 | |||
4fb068019e | |||
6446a078a0 | |||
1d7fcbece8 | |||
8703a68a31 | |||
9bcdade85f | |||
b0156179b9 | |||
d0a0bad9d6 | |||
a4003d9078 | |||
3f4a62de2f | |||
bf23f63aa5 | |||
fd641b99c3 | |||
225affc9ca | |||
9ce2481a69 | |||
d95c74c6e7 | |||
218ee3269f | |||
5ca5f8daf3 | |||
98cdee781a | |||
9cf4504909 | |||
5f846a3fc1 | |||
c9793d47f9 | |||
be8adca115 | |||
9152a0077f | |||
0b90c67fb4 | |||
b4975d3102 | |||
ee33795b72 | |||
90e16be3ae | |||
cf90a369e2 | |||
6b303323be | |||
1576c7a0d9 | |||
cd5d6103ea | |||
207f763d1a | |||
1bed3aedc8 | |||
ab77d660cc | |||
b74a1daae9 | |||
bec357e2cb | |||
78593b5b5c | |||
7d6f03a7fe | |||
f46573f8c3 | |||
b83e136fb6 | |||
5c4203b20c | |||
7f9eef1d47 | |||
a8a0132766 | |||
831c43c91b | |||
b452e2df74 | |||
7f37cacfac | |||
3bb7e62e88 | |||
3b060167f6 | |||
8a76e71129 | |||
396fd747a6 | |||
16bd08b297 | |||
ccdf327ac8 | |||
8cd63df0dc | |||
b90cb34fd6 | |||
d6c1e12c06 | |||
d33d1c880b | |||
985e84e369 | |||
cc2c5c7762 | |||
40bf636b47 | |||
347cde827b | |||
ac4a1fb35c | |||
6f3714b9aa | |||
d810014eeb | |||
e0f6892625 | |||
9d5b426a6d | |||
8bf5769382 | |||
2970cd3d6d | |||
d41114c5a8 | |||
6c92449702 | |||
db04d10d14 | |||
5a4233f07b | |||
3c715edd07 | |||
bbe05d7fe9 | |||
2af8b8ef91 | |||
d4bfdfe749 | |||
1d14c31658 | |||
9bd81bb384 | |||
d64226efee | |||
2440eaa2df | |||
e8bf4f31f2 | |||
6682461d88 | |||
41f1132e0e | |||
d938c9337a | |||
9896a75caf | |||
7eefd0c3d7 | |||
2e268e311c | |||
3e182fd828 | |||
7b60850334 | |||
1552d9699c | |||
7507b19cd2 | |||
16f9ea6708 | |||
d984a9acf0 | |||
955f4aefcd | |||
858bbfbbd1 | |||
c1570b373f | |||
d336363771 | |||
e57aa36d3e | |||
b488f850aa | |||
ec07a280ba | |||
5006632550 | |||
7eb9f48485 | |||
31cba7098d | |||
f4571b0b50 | |||
3832911d50 | |||
28c86760da | |||
c4604ca468 | |||
464c409aa3 | |||
08ec39be0c | |||
25350f3370 | |||
0023cfa385 | |||
ed24142767 | |||
917230e4f8 | |||
05228f17f5 | |||
e8653b96be | |||
1cf191c597 | |||
3d3e31b7f8 | |||
8730cfcc3e | |||
5830e5620d | |||
46d53e3e90 | |||
3554fe6480 | |||
0dadf66dc7 | |||
a941bbd0c9 | |||
21e3ed3449 | |||
81678129fb | |||
52d8db7925 | |||
875d375d7a | |||
cba167b874 | |||
e68c0e68bd | |||
ff2bc2d21f | |||
4961404c7c | |||
3fbf2311e4 | |||
41685061f7 | |||
35a7ab5778 | |||
e1beaae4a2 | |||
965bd58693 | |||
00fdaaf12b | |||
60473d234a | |||
4f688e09a4 | |||
24e84128e4 | |||
e63457b6b2 | |||
a83cedc2ac | |||
076afa6197 | |||
423e3cbd18 | |||
0263396187 | |||
043018cfbe | |||
2037d9af03 | |||
7f07991035 | |||
18ce01caff | |||
5bc8e80a99 | |||
6252df4c18 | |||
451856d21d | |||
aa30663ca5 | |||
8616a4afe5 | |||
bc2358319b | |||
0bf4b81370 | |||
c9dd5a2452 | |||
cf95f616c5 | |||
1adbc7c13c | |||
9d28974c27 | |||
3dbc35b5c1 | |||
fee0fe5422 | |||
86d9f4e733 | |||
3f16f1b006 | |||
cbd9899389 | |||
cd44fb8d84 | |||
aca4c2b5a9 | |||
85eedfb78b | |||
f26276bc4e | |||
6d62e69f9a | |||
4188fd59a0 | |||
5b9f575648 | |||
0d890ec414 | |||
926d05ef0b | |||
8be48ddfc7 | |||
41e66bfaf6 | |||
47a7241410 | |||
54c77b3d62 | |||
a1c5575308 | |||
a44c934b5d | |||
546d2653ee | |||
33c06b3388 | |||
1917ea3ce1 | |||
70842b9ef2 | |||
e6122a657e | |||
9e860ac01a | |||
7690a8e7bd | |||
1860208560 | |||
1689296d46 | |||
7aa4851b77 | |||
6ef8e2902f | |||
aa16b7b284 | |||
9bbd83b1f2 | |||
65535670f9 | |||
9d42fe4d3b | |||
918a367258 | |||
970a70b41e | |||
4094fe5a31 | |||
dea8e2cb54 | |||
0514a4308f | |||
d0647e5a02 | |||
bbe06f97be | |||
f1a83e9759 | |||
38a81c6b46 | |||
6afb60abf5 | |||
a42212fc1e | |||
2e21948156 | |||
5279ee745f | |||
227501c063 | |||
89d25b1931 | |||
b57c0dbe30 | |||
8b910bb6bc | |||
dfde34e612 | |||
2530811e22 | |||
85205bc253 | |||
3cdd1a3424 | |||
002865405c | |||
8a73ef897a | |||
be61c56c21 | |||
dbaef7d04d | |||
2048073355 | |||
a585e1f696 | |||
415737b2b8 | |||
54f7007cc5 | |||
b0338178d7 | |||
159100b944 | |||
41a8db3576 | |||
fe291ab794 | |||
adb65b9889 | |||
8513626b9f | |||
7ca0ba4515 | |||
42200c405a | |||
be327dbccd | |||
c724dc3892 | |||
70dc2ff3ab | |||
81f5d03e8d | |||
e50c6b94c1 | |||
28eaff20bd | |||
8d1a1b2976 | |||
92eaec53db | |||
b3c2c57897 | |||
f458e97fda | |||
80bf9ae99b | |||
bebd4a7ca4 | |||
9468e94412 | |||
6b66c8507f | |||
167e5406c3 | |||
c111c9a931 | |||
bb71e3a023 | |||
7b1bf4c098 | |||
32b88d928a | |||
f8e1932337 | |||
7c9fb570cc | |||
56d22c66c0 | |||
85cdc4f371 | |||
96bcfb9b1f | |||
4a874665eb | |||
6f6b69946e | |||
5b7f44555e | |||
2ca396c015 | |||
d8dae16035 | |||
8f02db04f9 | |||
9f35e44681 | |||
6279b8f5a5 | |||
3084232cb5 | |||
67cc79ec52 | |||
b9a09a9501 | |||
4a0d3a3e3f | |||
2322a980d0 | |||
c19f5b85a3 | |||
7f9d8438ab | |||
51c80c5a52 | |||
6477ebcf6f | |||
bc02c2789c | |||
c0b3d09236 | |||
3ddbab6193 | |||
befd95a90a | |||
ab6cd4229b | |||
9213744ecb | |||
41c0333814 | |||
afcf8b3ed6 | |||
69ebbec40b | |||
b22a9c14a4 | |||
54067d8225 | |||
d64c4eeab0 | |||
15d2c7786e | |||
73a1da5ed6 | |||
fbf8779388 | |||
3231c35fb8 | |||
ced7838de4 | |||
2f26b8668a | |||
9432838914 | |||
1a89a7794e | |||
c0a87c12fb | |||
c6a7ea0a2f | |||
5bb057e5a2 | |||
2924b37d6d | |||
42c0f784e2 | |||
05f17d1ec4 | |||
777690a121 | |||
a98e228766 | |||
4c9174ce26 | |||
1d70e3812c | |||
e2225aa882 | |||
99dd709f3e | |||
f197c286d5 | |||
b121711baa | |||
085655b21b | |||
4c209d6b10 | |||
8dc45e291a | |||
ec1ae7e631 | |||
25aa55b5f5 | |||
b5c6088130 | |||
a65eb0ec29 | |||
42eef1451c | |||
11ecf058e4 | |||
5f1f7ef564 | |||
2e4e698633 | |||
02dce8cad0 |
10
.gitignore
vendored
10
.gitignore
vendored
@ -1,6 +1,16 @@
|
||||
local.mak
|
||||
/target
|
||||
**/*.rs.bk
|
||||
*~
|
||||
*.backup
|
||||
*.backup[0-9]
|
||||
*.backup[0-9][0-9]
|
||||
*.old
|
||||
*.old[0-9]
|
||||
*.old[0-9][0-9]
|
||||
*.5
|
||||
*.7
|
||||
__pycache__/
|
||||
/etc/proxmox-backup.service
|
||||
/etc/proxmox-backup-proxy.service
|
||||
build/
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "1.0.8"
|
||||
version = "1.0.9"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -48,11 +48,11 @@ percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pin-project = "1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.1"
|
||||
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
|
||||
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "7"
|
||||
|
4
Makefile
4
Makefile
@ -10,7 +10,9 @@ SUBDIRS := etc www docs
|
||||
USR_BIN := \
|
||||
proxmox-backup-client \
|
||||
pxar \
|
||||
pmtx
|
||||
proxmox-tape \
|
||||
pmtx \
|
||||
pmt
|
||||
|
||||
# Binaries usable by admins
|
||||
USR_SBIN := \
|
||||
|
37
debian/changelog
vendored
37
debian/changelog
vendored
@ -1,3 +1,40 @@
|
||||
rust-proxmox-backup (1.0.9-1) unstable; urgency=medium
|
||||
|
||||
* client: track key source, print when used
|
||||
|
||||
* fix #3026: pxar: metadata: apply flags _after_ updating mtime
|
||||
|
||||
* docs: add acl.cfg, datastore.cfg, remote.cfg, sync.cfg, user.cfg and
|
||||
verification.cfg manual page pages
|
||||
|
||||
* docs: add API viewer
|
||||
|
||||
* proxmox-backup-manger: add verify-job command group with various sub
|
||||
commands
|
||||
|
||||
* add experimental opt-in tape backup support
|
||||
|
||||
* lto-barcode: fix page offset calibration
|
||||
|
||||
* lto-barcode: fix avery 3420 paper format properties
|
||||
|
||||
* asyncify pxar create archive
|
||||
|
||||
* client: raise HTTP_TIMEOUT for simple requests to 120s
|
||||
|
||||
* docs: depend on mathjax library package from debian instead of CDN
|
||||
|
||||
* fix #3321: docs: client: fix interactive restore command explanation
|
||||
|
||||
* ui: use shorter datetime format for encryption key creation time
|
||||
|
||||
* docs: TFA: improve language
|
||||
|
||||
* config/TFA: webauthn: disallow registering the same token more than once,
|
||||
that can lead to buggy behavior in some token/browser combinations.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 08 Mar 2021 15:54:47 +0100
|
||||
|
||||
rust-proxmox-backup (1.0.8-1) unstable; urgency=medium
|
||||
|
||||
* Https Connector: use hostname instead of URL again to avoid certificate
|
||||
|
13
debian/control
vendored
13
debian/control
vendored
@ -36,13 +36,13 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-1+default-dev,
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
|
||||
librust-proxmox-0.11+api-macro-dev,
|
||||
librust-proxmox-0.11+default-dev,
|
||||
librust-proxmox-0.11+sortable-macro-dev,
|
||||
librust-proxmox-0.11+websocket-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-pxar-0.8+default-dev,
|
||||
librust-pxar-0.8+tokio-io-dev,
|
||||
librust-pxar-0.9+default-dev,
|
||||
librust-pxar-0.9+tokio-io-dev,
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-7+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
@ -141,6 +141,7 @@ Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
libjs-mathjax,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
|
1
debian/control.in
vendored
1
debian/control.in
vendored
@ -38,6 +38,7 @@ Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
libjs-mathjax,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
|
3
debian/pmt.bc
vendored
Normal file
3
debian/pmt.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# pmt bash completion
|
||||
|
||||
complete -C 'pmt bashcomplete' pmt
|
2
debian/proxmox-backup-docs.links
vendored
2
debian/proxmox-backup-docs.links
vendored
@ -1,3 +1,5 @@
|
||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
||||
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
||||
|
2
debian/proxmox-backup-server.bash-completion
vendored
2
debian/proxmox-backup-server.bash-completion
vendored
@ -1,2 +1,4 @@
|
||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||
debian/proxmox-tape.bc proxmox-tape
|
||||
debian/pmtx.bc pmtx
|
||||
debian/pmt.bc pmt
|
||||
|
15
debian/proxmox-backup-server.install
vendored
15
debian/proxmox-backup-server.install
vendored
@ -11,12 +11,27 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||
usr/sbin/proxmox-backup-manager
|
||||
usr/bin/pmtx
|
||||
usr/bin/pmt
|
||||
usr/bin/proxmox-tape
|
||||
usr/share/javascript/proxmox-backup/index.hbs
|
||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||
usr/share/javascript/proxmox-backup/images
|
||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||
usr/share/man/man1/proxmox-backup-manager.1
|
||||
usr/share/man/man1/proxmox-backup-proxy.1
|
||||
usr/share/man/man1/proxmox-tape.1
|
||||
usr/share/man/man1/pmtx.1
|
||||
usr/share/man/man1/pmt.1
|
||||
usr/share/man/man5/acl.cfg.5
|
||||
usr/share/man/man5/datastore.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||
usr/share/zsh/vendor-completions/_pmtx
|
||||
usr/share/zsh/vendor-completions/_pmt
|
||||
|
3
debian/proxmox-tape.bc
vendored
Normal file
3
debian/proxmox-tape.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# proxmox-tape bash completion
|
||||
|
||||
complete -C 'proxmox-tape bashcomplete' proxmox-tape
|
@ -5,6 +5,7 @@ LIBDIR = $(PREFIX)/lib
|
||||
LIBEXECDIR = $(LIBDIR)
|
||||
DATAROOTDIR = $(PREFIX)/share
|
||||
MAN1DIR = $(PREFIX)/share/man/man1
|
||||
MAN5DIR = $(PREFIX)/share/man/man5
|
||||
DOCDIR = $(PREFIX)/share/doc/proxmox-backup
|
||||
JSDIR = $(DATAROOTDIR)/javascript/proxmox-backup
|
||||
SYSCONFDIR = /etc
|
||||
|
126
docs/Makefile
126
docs/Makefile
@ -1,21 +1,43 @@
|
||||
include ../defines.mk
|
||||
|
||||
GENERATED_SYNOPSIS := \
|
||||
proxmox-tape/synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
backup-protocol-api.rst \
|
||||
reader-protocol-api.rst
|
||||
pmt/synopsis.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/user/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/verification/config.rst \
|
||||
config/acl/roles.rst \
|
||||
config/datastore/config.rst
|
||||
|
||||
MANUAL_PAGES := \
|
||||
MAN1_PAGES := \
|
||||
pxar.1 \
|
||||
pmtx.1 \
|
||||
pmt.1 \
|
||||
proxmox-tape.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-manager.1
|
||||
|
||||
MAN5_PAGES := \
|
||||
media-pool.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
acl.cfg.5 \
|
||||
user.cfg.5 \
|
||||
remote.cfg.5 \
|
||||
sync.cfg.5 \
|
||||
verification.cfg.5 \
|
||||
datastore.cfg.5
|
||||
|
||||
PRUNE_SIMULATOR_FILES := \
|
||||
prune-simulator/index.html \
|
||||
prune-simulator/documentation.html \
|
||||
@ -35,6 +57,10 @@ LTO_BARCODE_FILES := \
|
||||
lto-barcode/label-setup.js \
|
||||
lto-barcode/lto-barcode.js
|
||||
|
||||
API_VIEWER_SOURCES= \
|
||||
api-viewer/index.html \
|
||||
api-viewer/apidoc.js
|
||||
|
||||
# Sphinx documentation setup
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
@ -51,15 +77,7 @@ endif
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
|
||||
all: ${MANUAL_PAGES}
|
||||
|
||||
# Extract backup protocol docs
|
||||
backup-protocol-api.rst: ${COMPILEDIR}/dump-backup-api
|
||||
${COMPILEDIR}/dump-backup-api >$@
|
||||
|
||||
# Extract reader protocol docs
|
||||
reader-protocol-api.rst: ${COMPILEDIR}/dump-reader-api
|
||||
${COMPILEDIR}/dump-backup-api >$@
|
||||
all: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||
|
||||
# Build manual pages using rst2man
|
||||
|
||||
@ -77,6 +95,72 @@ pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
|
||||
pmt/synopsis.rst: ${COMPILEDIR}/pmt
|
||||
${COMPILEDIR}/pmt printdoc > pmt/synopsis.rst
|
||||
|
||||
pmt.1: pmt/man1.rst pmt/description.rst pmt/options.rst pmt/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/datastore/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen datastore.cfg >$@
|
||||
|
||||
datastore.cfg.5: config/datastore/man5.rst config/datastore/config.rst config/datastore/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/user/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen user.cfg >$@
|
||||
|
||||
user.cfg.5: config/user/man5.rst config/user/config.rst config/user/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/remote/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen remote.cfg >$@
|
||||
|
||||
remote.cfg.5: config/remote/man5.rst config/remote/config.rst config/remote/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/sync/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen sync.cfg >$@
|
||||
|
||||
sync.cfg.5: config/sync/man5.rst config/sync/config.rst config/sync/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/verification/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen verification.cfg >$@
|
||||
|
||||
verification.cfg.5: config/verification/man5.rst config/verification/config.rst config/verification/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/acl/roles.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen "config::acl::Role" >$@
|
||||
|
||||
acl.cfg.5: config/acl/man5.rst config/acl/roles.rst config/acl/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/media-pool/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen media-pool.cfg >$@
|
||||
|
||||
media-pool.cfg.5: config/media-pool/man5.rst config/media-pool/config.rst config/media-pool/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/tape/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen tape.cfg >$@
|
||||
|
||||
tape.cfg.5: config/tape/man5.rst config/tape/config.rst config/tape/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
config/tape-job/config.rst: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen tape-job.cfg >$@
|
||||
|
||||
tape-job.cfg.5: config/tape-job/man5.rst config/tape-job/config.rst config/tape-job/format.rst
|
||||
rst2man $< >$@
|
||||
|
||||
proxmox-tape/synopsis.rst: ${COMPILEDIR}/proxmox-tape
|
||||
${COMPILEDIR}/proxmox-tape printdoc > proxmox-tape/synopsis.rst
|
||||
|
||||
proxmox-tape.1: proxmox-tape/man1.rst proxmox-tape/description.rst proxmox-tape/synopsis.rst
|
||||
rst2man $< >$@
|
||||
|
||||
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
||||
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
||||
|
||||
@ -101,14 +185,22 @@ onlinehelpinfo:
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||
${COMPILEDIR}/docgen apidata.js >$@
|
||||
|
||||
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
|
||||
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
||||
install -dm 0755 $(BUILDDIR)/html/api-viewer
|
||||
install -m 0644 ${API_VIEWER_SOURCES} $(BUILDDIR)/html/api-viewer
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
@ -127,12 +219,14 @@ epub3: ${GENERATED_SYNOPSIS}
|
||||
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
|
||||
|
||||
clean:
|
||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS}
|
||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
||||
|
||||
|
||||
install_manual_pages: ${MANUAL_PAGES}
|
||||
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||
install -dm755 $(DESTDIR)$(MAN1DIR)
|
||||
for i in ${MANUAL_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
||||
for i in ${MAN1_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN1DIR)/ ; done
|
||||
install -dm755 $(DESTDIR)$(MAN5DIR)
|
||||
for i in ${MAN5_PAGES}; do install -m755 $$i $(DESTDIR)$(MAN5DIR)/ ; done
|
||||
|
||||
install_html: html
|
||||
install -dm755 $(DESTDIR)$(DOCDIR)
|
||||
|
@ -90,7 +90,18 @@ class ReflabelMapper(Builder):
|
||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||
filename = self.env.doc2path(docname)
|
||||
filename_html = re.sub('.rst', '.html', filename)
|
||||
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
|
||||
|
||||
# node['ids'][0] contains a normalized version of the
|
||||
# headline. If the ref and headline are the same
|
||||
# (normalized) sphinx will set the node['ids'][1] to a
|
||||
# generic id in the format `idX` where X is numeric. If the
|
||||
# ref and headline are not the same, the ref name will be
|
||||
# stored in node['ids'][1]
|
||||
if re.match('^id[0-9]*$', node['ids'][1]):
|
||||
labelid = node['ids'][0]
|
||||
else:
|
||||
labelid = node['ids'][1]
|
||||
|
||||
title = cast(nodes.title, node[0])
|
||||
logger.info('traversing section {}'.format(title.astext()))
|
||||
ref_name = getattr(title, 'rawsource', title.astext())
|
||||
|
511
docs/api-viewer/PBSAPI.js
Normal file
511
docs/api-viewer/PBSAPI.js
Normal file
@ -0,0 +1,511 @@
|
||||
// avoid errors when running without development tools
|
||||
if (!Ext.isDefined(Ext.global.console)) {
|
||||
var console = {
|
||||
dir: function() {},
|
||||
log: function() {}
|
||||
};
|
||||
}
|
||||
|
||||
Ext.onReady(function() {
|
||||
|
||||
Ext.define('pve-param-schema', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'name', 'type', 'typetext', 'description', 'verbose_description',
|
||||
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
|
||||
'pattern', 'title', 'requires', 'format', 'default',
|
||||
'disallow', 'extends', 'links',
|
||||
{
|
||||
name: 'optional',
|
||||
type: 'boolean'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var store = Ext.define('pve-updated-treestore', {
|
||||
extend: 'Ext.data.TreeStore',
|
||||
model: Ext.define('pve-api-doc', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'path', 'info', 'text',
|
||||
]
|
||||
}),
|
||||
proxy: {
|
||||
type: 'memory',
|
||||
data: pbsapi
|
||||
},
|
||||
sorters: [{
|
||||
property: 'leaf',
|
||||
direction: 'ASC'
|
||||
}, {
|
||||
property: 'text',
|
||||
direction: 'ASC'
|
||||
}],
|
||||
filterer: 'bottomup',
|
||||
doFilter: function(node) {
|
||||
this.filterNodes(node, this.getFilters().getFilterFn(), true);
|
||||
},
|
||||
|
||||
filterNodes: function(node, filterFn, parentVisible) {
|
||||
var me = this,
|
||||
bottomUpFiltering = me.filterer === 'bottomup',
|
||||
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
|
||||
childNodes = node.childNodes,
|
||||
len = childNodes && childNodes.length, i, matchingChildren;
|
||||
|
||||
if (len) {
|
||||
for (i = 0; i < len; ++i) {
|
||||
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
|
||||
}
|
||||
if (bottomUpFiltering) {
|
||||
match = matchingChildren || match;
|
||||
}
|
||||
}
|
||||
|
||||
node.set("visible", match, me._silentOptions);
|
||||
return match;
|
||||
},
|
||||
|
||||
}).create();
|
||||
|
||||
var render_description = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
value = pdef.verbose_description || value;
|
||||
|
||||
// TODO: try to render asciidoc correctly
|
||||
|
||||
metaData.style = 'white-space:pre-wrap;'
|
||||
|
||||
return Ext.htmlEncode(value);
|
||||
};
|
||||
|
||||
var render_type = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
||||
};
|
||||
|
||||
var render_format = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
metaData.style = 'white-space:normal;'
|
||||
|
||||
if (pdef.typetext)
|
||||
return Ext.htmlEncode(pdef.typetext);
|
||||
|
||||
if (pdef['enum'])
|
||||
return pdef['enum'].join(' | ');
|
||||
|
||||
if (pdef.format)
|
||||
return pdef.format;
|
||||
|
||||
if (pdef.pattern)
|
||||
return Ext.htmlEncode(pdef.pattern);
|
||||
|
||||
return '';
|
||||
};
|
||||
|
||||
var real_path = function(path) {
|
||||
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
|
||||
};
|
||||
|
||||
var permission_text = function(permission) {
|
||||
let permhtml = "";
|
||||
|
||||
if (permission.user) {
|
||||
if (!permission.description) {
|
||||
if (permission.user === 'world') {
|
||||
permhtml += "Accessible without any authentication.";
|
||||
} else if (permission.user === 'all') {
|
||||
permhtml += "Accessible by all authenticated users.";
|
||||
} else {
|
||||
permhtml += 'Onyl accessible by user "' +
|
||||
permission.user + '"';
|
||||
}
|
||||
}
|
||||
} else if (permission.check) {
|
||||
permhtml += "<pre>Check: " +
|
||||
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
|
||||
} else if (permission.userParam) {
|
||||
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
|
||||
} else if (permission.or) {
|
||||
permhtml += "<div>Or<div style='padding-left: 10px;'>";
|
||||
Ext.Array.each(permission.or, function(sub_permission) {
|
||||
permhtml += permission_text(sub_permission);
|
||||
})
|
||||
permhtml += "</div></div>";
|
||||
} else if (permission.and) {
|
||||
permhtml += "<div>And<div style='padding-left: 10px;'>";
|
||||
Ext.Array.each(permission.and, function(sub_permission) {
|
||||
permhtml += permission_text(sub_permission);
|
||||
})
|
||||
permhtml += "</div></div>";
|
||||
} else {
|
||||
//console.log(permission);
|
||||
permhtml += "Unknown systax!";
|
||||
}
|
||||
|
||||
return permhtml;
|
||||
};
|
||||
|
||||
var render_docu = function(data) {
|
||||
var md = data.info;
|
||||
|
||||
// console.dir(data);
|
||||
|
||||
var items = [];
|
||||
|
||||
var clicmdhash = {
|
||||
GET: 'get',
|
||||
POST: 'create',
|
||||
PUT: 'set',
|
||||
DELETE: 'delete'
|
||||
};
|
||||
|
||||
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
|
||||
var info = md[method];
|
||||
if (info) {
|
||||
|
||||
var usage = "";
|
||||
|
||||
usage += "<table><tr><td>HTTP: </td><td>"
|
||||
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
|
||||
|
||||
var sections = [
|
||||
{
|
||||
title: 'Description',
|
||||
html: Ext.htmlEncode(info.description),
|
||||
bodyPadding: 10
|
||||
},
|
||||
{
|
||||
title: 'Usage',
|
||||
html: usage,
|
||||
bodyPadding: 10
|
||||
}
|
||||
];
|
||||
|
||||
if (info.parameters && info.parameters.properties) {
|
||||
|
||||
var pstore = Ext.create('Ext.data.Store', {
|
||||
model: 'pve-param-schema',
|
||||
proxy: {
|
||||
type: 'memory'
|
||||
},
|
||||
groupField: 'optional',
|
||||
sorters: [
|
||||
{
|
||||
property: 'name',
|
||||
direction: 'ASC'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
Ext.Object.each(info.parameters.properties, function(name, pdef) {
|
||||
pdef.name = name;
|
||||
pstore.add(pdef);
|
||||
});
|
||||
|
||||
pstore.sort();
|
||||
|
||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||
enableGroupingMenu: false,
|
||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
|
||||
});
|
||||
|
||||
sections.push({
|
||||
xtype: 'gridpanel',
|
||||
title: 'Parameters',
|
||||
features: [groupingFeature],
|
||||
store: pstore,
|
||||
viewConfig: {
|
||||
trackOver: false,
|
||||
stripeRows: true
|
||||
},
|
||||
columns: [
|
||||
{
|
||||
header: 'Name',
|
||||
dataIndex: 'name',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Type',
|
||||
dataIndex: 'type',
|
||||
renderer: render_type,
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Default',
|
||||
dataIndex: 'default',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Format',
|
||||
dataIndex: 'type',
|
||||
renderer: render_format,
|
||||
flex: 2
|
||||
},
|
||||
{
|
||||
header: 'Description',
|
||||
dataIndex: 'description',
|
||||
renderer: render_description,
|
||||
flex: 6
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
if (info.returns) {
|
||||
|
||||
var retinf = info.returns;
|
||||
var rtype = retinf.type;
|
||||
if (!rtype && retinf.items)
|
||||
rtype = 'array';
|
||||
if (!rtype)
|
||||
rtype = 'object';
|
||||
|
||||
var rpstore = Ext.create('Ext.data.Store', {
|
||||
model: 'pve-param-schema',
|
||||
proxy: {
|
||||
type: 'memory'
|
||||
},
|
||||
groupField: 'optional',
|
||||
sorters: [
|
||||
{
|
||||
property: 'name',
|
||||
direction: 'ASC'
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var properties;
|
||||
if (rtype === 'array' && retinf.items.properties) {
|
||||
properties = retinf.items.properties;
|
||||
}
|
||||
|
||||
if (rtype === 'object' && retinf.properties) {
|
||||
properties = retinf.properties;
|
||||
}
|
||||
|
||||
Ext.Object.each(properties, function(name, pdef) {
|
||||
pdef.name = name;
|
||||
rpstore.add(pdef);
|
||||
});
|
||||
|
||||
rpstore.sort();
|
||||
|
||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
||||
enableGroupingMenu: false,
|
||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
|
||||
});
|
||||
var returnhtml;
|
||||
if (retinf.items) {
|
||||
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
|
||||
}
|
||||
|
||||
if (retinf.properties) {
|
||||
returnhtml = returnhtml || '';
|
||||
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
|
||||
}
|
||||
|
||||
var rawSection = Ext.create('Ext.panel.Panel', {
|
||||
bodyPadding: '0px 10px 10px 10px',
|
||||
html: returnhtml,
|
||||
hidden: true
|
||||
});
|
||||
|
||||
sections.push({
|
||||
xtype: 'gridpanel',
|
||||
title: 'Returns: ' + rtype,
|
||||
features: [groupingFeature],
|
||||
store: rpstore,
|
||||
viewConfig: {
|
||||
trackOver: false,
|
||||
stripeRows: true
|
||||
},
|
||||
columns: [
|
||||
{
|
||||
header: 'Name',
|
||||
dataIndex: 'name',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Type',
|
||||
dataIndex: 'type',
|
||||
renderer: render_type,
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Default',
|
||||
dataIndex: 'default',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
header: 'Format',
|
||||
dataIndex: 'type',
|
||||
renderer: render_format,
|
||||
flex: 2
|
||||
},
|
||||
{
|
||||
header: 'Description',
|
||||
dataIndex: 'description',
|
||||
renderer: render_description,
|
||||
flex: 6
|
||||
}
|
||||
],
|
||||
bbar: [
|
||||
{
|
||||
xtype: 'button',
|
||||
text: 'Show RAW',
|
||||
handler: function(btn) {
|
||||
rawSection.setVisible(!rawSection.isVisible());
|
||||
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
|
||||
}}
|
||||
]
|
||||
});
|
||||
|
||||
sections.push(rawSection);
|
||||
|
||||
|
||||
}
|
||||
|
||||
if (!data.path.match(/\/_upgrade_/)) {
|
||||
var permhtml = '';
|
||||
|
||||
if (!info.permissions) {
|
||||
permhtml = "Root only.";
|
||||
} else {
|
||||
if (info.permissions.description) {
|
||||
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
|
||||
Ext.htmlEncode(info.permissions.description) + "</div>";
|
||||
}
|
||||
permhtml += permission_text(info.permissions);
|
||||
}
|
||||
|
||||
// we do not have this information for PBS api
|
||||
//if (!info.allowtoken) {
|
||||
// permhtml += "<br />This API endpoint is not available for API tokens."
|
||||
//}
|
||||
|
||||
sections.push({
|
||||
title: 'Required permissions',
|
||||
bodyPadding: 10,
|
||||
html: permhtml
|
||||
});
|
||||
}
|
||||
|
||||
items.push({
|
||||
title: method,
|
||||
autoScroll: true,
|
||||
defaults: {
|
||||
border: false
|
||||
},
|
||||
items: sections
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
var ct = Ext.getCmp('docview');
|
||||
ct.setTitle("Path: " + real_path(data.path));
|
||||
ct.removeAll(true);
|
||||
ct.add(items);
|
||||
ct.setActiveTab(0);
|
||||
};
|
||||
|
||||
Ext.define('Ext.form.SearchField', {
|
||||
extend: 'Ext.form.field.Text',
|
||||
alias: 'widget.searchfield',
|
||||
|
||||
emptyText: 'Search...',
|
||||
|
||||
flex: 1,
|
||||
|
||||
inputType: 'search',
|
||||
listeners: {
|
||||
'change': function(){
|
||||
|
||||
var value = this.getValue();
|
||||
if (!Ext.isEmpty(value)) {
|
||||
store.filter({
|
||||
property: 'path',
|
||||
value: value,
|
||||
anyMatch: true
|
||||
});
|
||||
} else {
|
||||
store.clearFilter();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
var tree = Ext.create('Ext.tree.Panel', {
|
||||
title: 'Resource Tree',
|
||||
tbar: [
|
||||
{
|
||||
xtype: 'searchfield',
|
||||
}
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'expand',
|
||||
tooltip: 'Expand all',
|
||||
tooltipType: 'title',
|
||||
callback: (tree) => tree.expandAll(),
|
||||
},
|
||||
{
|
||||
type: 'collapse',
|
||||
tooltip: 'Collapse all',
|
||||
tooltipType: 'title',
|
||||
callback: (tree) => tree.collapseAll(),
|
||||
},
|
||||
],
|
||||
store: store,
|
||||
width: 200,
|
||||
region: 'west',
|
||||
split: true,
|
||||
margins: '5 0 5 5',
|
||||
rootVisible: false,
|
||||
listeners: {
|
||||
selectionchange: function(v, selections) {
|
||||
if (!selections[0])
|
||||
return;
|
||||
var rec = selections[0];
|
||||
render_docu(rec.data);
|
||||
location.hash = '#' + rec.data.path;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ext.create('Ext.container.Viewport', {
|
||||
layout: 'border',
|
||||
renderTo: Ext.getBody(),
|
||||
items: [
|
||||
tree,
|
||||
{
|
||||
xtype: 'tabpanel',
|
||||
title: 'Documentation',
|
||||
id: 'docview',
|
||||
region: 'center',
|
||||
margins: '5 5 5 0',
|
||||
layout: 'fit',
|
||||
items: []
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
var deepLink = function() {
|
||||
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
|
||||
var endpoint = store.findNode('path', path);
|
||||
|
||||
if (endpoint) {
|
||||
tree.getSelectionModel().select(endpoint);
|
||||
tree.expandPath(endpoint.getPath());
|
||||
render_docu(endpoint.data);
|
||||
}
|
||||
}
|
||||
window.onhashchange = deepLink;
|
||||
|
||||
deepLink();
|
||||
|
||||
});
|
13
docs/api-viewer/index.html
Normal file
13
docs/api-viewer/index.html
Normal file
@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||
<title>Proxmox Backup Server API Documentation</title>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||
<script type="text/javascript" src="apidoc.js"></script>
|
||||
</head>
|
||||
<body></body>
|
||||
</html>
|
@ -60,33 +60,10 @@ Environment Variables
|
||||
Output Format
|
||||
-------------
|
||||
|
||||
Most commands support the ``--output-format`` parameter. It accepts
|
||||
the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
.. include:: output-format.rst
|
||||
|
||||
|
||||
Please use the following environment variables to modify output behavior:
|
||||
|
||||
``PROXMOX_OUTPUT_FORMAT``
|
||||
Defines the default output format.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_BORDER``
|
||||
If set (to any value), do not render table borders.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format if you need to process the output.
|
||||
|
||||
|
||||
.. _creating-backups:
|
||||
.. _client_creating_backups:
|
||||
|
||||
Creating Backups
|
||||
----------------
|
||||
@ -246,7 +223,7 @@ Restoring this backup will result in:
|
||||
. .. file2
|
||||
|
||||
|
||||
.. _encryption:
|
||||
.. _client_encryption:
|
||||
|
||||
Encryption
|
||||
----------
|
||||
@ -483,16 +460,15 @@ subdirectory and add the corresponding pattern to the list for subsequent restor
|
||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||
host. This will scan the whole archive.
|
||||
|
||||
With ``restore /target/path`` you can restore the sub-archive given by the current
|
||||
working directory to the local target path ``/target/path`` on your host.
|
||||
By additionally passing a glob pattern with ``--pattern <glob>``, the restore is
|
||||
further limited to files matching the pattern.
|
||||
For example:
|
||||
The ``restore`` command can be used to restore all the files contained within
|
||||
the backup archive. This is most helpful when paired with the ``--pattern
|
||||
<glob>`` option, as it allows you to restore all files matching a specific
|
||||
pattern. For example, if you wanted to restore configuration files
|
||||
located in ``/etc``, you could do the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pxar:/ > cd /etc/
|
||||
pxar:/etc/ > restore /target/ --pattern **/*.conf
|
||||
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||
...
|
||||
|
||||
The above will scan trough all the directories below ``/etc`` and restore all
|
||||
@ -657,10 +633,10 @@ shows the list of existing snapshots and what actions prune would take.
|
||||
|
||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`garbage-collection`.
|
||||
space you need to perform :ref:`client_garbage-collection`.
|
||||
|
||||
|
||||
.. _garbage-collection:
|
||||
.. _client_garbage-collection:
|
||||
|
||||
Garbage Collection
|
||||
------------------
|
||||
@ -721,32 +697,34 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client benchmark
|
||||
Uploaded 656 chunks in 5 seconds.
|
||||
Time per request: 7659 microseconds.
|
||||
TLS speed: 547.60 MB/s
|
||||
SHA256 speed: 585.76 MB/s
|
||||
Compression speed: 1923.96 MB/s
|
||||
Decompress speed: 7885.24 MB/s
|
||||
AES256/GCM speed: 3974.03 MB/s
|
||||
Uploaded 1517 chunks in 5 seconds.
|
||||
Time per request: 3309 microseconds.
|
||||
TLS speed: 1267.41 MB/s
|
||||
SHA256 speed: 2066.73 MB/s
|
||||
Compression speed: 775.11 MB/s
|
||||
Decompress speed: 1233.35 MB/s
|
||||
AES256/GCM speed: 3688.27 MB/s
|
||||
Verify speed: 783.43 MB/s
|
||||
┌───────────────────────────────────┬─────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞═══════════════════════════════════╪═════════════════════╡
|
||||
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||
│ TLS (maximal backup upload speed) │ 1267.41 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||
│ SHA256 checksum computation speed │ 2066.73 MB/s (102%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||
│ ZStd level 1 compression speed │ 775.11 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||
│ ZStd level 1 decompression speed │ 1233.35 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||
│ Chunk verification speed │ 783.43 MB/s (103%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3688.27 MB/s (101%) │
|
||||
└───────────────────────────────────┴─────────────────────┘
|
||||
|
||||
|
||||
.. note:: The percentages given in the output table correspond to a
|
||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||
local host, so there is no network involved.
|
||||
|
||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||
rather than the default table format.
|
||||
|
||||
|
||||
|
@ -1,19 +1,140 @@
|
||||
Backup Protocol
|
||||
===============
|
||||
|
||||
.. todo:: add introduction to HTTP2 based backup protocols
|
||||
Proxmox Backup Server uses a REST based API. While the management
|
||||
interface use normal HTTP, the actual backup and restore interface use
|
||||
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
||||
standards, so the following section assumes that you are familiar on
|
||||
how to use them.
|
||||
|
||||
|
||||
Backup Protocol API
|
||||
-------------------
|
||||
|
||||
.. todo:: describe backup writer protocol
|
||||
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
||||
be upgraded to a HTTP/2 connection using
|
||||
``proxmox-backup-protocol-v1`` as protocol name::
|
||||
|
||||
.. include:: backup-protocol-api.rst
|
||||
GET /api2/json/backup HTTP/1.1
|
||||
UPGRADE: proxmox-backup-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
|
||||
The backup protocol allows you to upload three different kind of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
|
||||
The following section gives a short introduction how to upload such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
||||
details about available REST commands.
|
||||
|
||||
|
||||
Reader Protocol API
|
||||
-------------------
|
||||
Upload Blobs
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. todo:: describe backup reader protocol
|
||||
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
.. include:: reader-protocol-api.rst
|
||||
The file name needs to end with ``.blob``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
|
||||
Upload Chunks
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Chunks belong to an index, so you first need to open an index (see
|
||||
below). After that, you can upload chunks using ``POST /fixed_chunk``
|
||||
and ``POST /dynamic_chunk``. The HTTP body contains the chunk data
|
||||
encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
|
||||
Upload Fixed Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Fixed indexes are use to store VM image data. The VM image is split
|
||||
into equally sized chunks, which are uploaded individually. The index
|
||||
file simply contains a list to chunk digests.
|
||||
|
||||
You create a fixed index with ``POST /fixed_index``. Then upload
|
||||
chunks with ``POST /fixed_chunk``, and append them to the index with
|
||||
``PUT /fixed_index``. When finished, you need to close the index using
|
||||
``POST /fixed_close``.
|
||||
|
||||
The file name needs to end with ``.fidx``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
|
||||
Upload Dynamic Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Dynamic indexes are use to store file archive data. The archive data
|
||||
is split into dynamically sized chunks, which are uploaded
|
||||
individually. The index file simply contains a list to chunk digests
|
||||
and offsets.
|
||||
|
||||
You create a dynamic sized index with ``POST /dynamic_index``. Then
|
||||
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
||||
``PUT /dynamic_index``. When finished, you need to close the index using
|
||||
``POST /dynamic_close``.
|
||||
|
||||
The file name needs to end with ``.didx``, and is automatically added
|
||||
to the backup manifest.
|
||||
|
||||
Finish Backup
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Once you have uploaded all data, you need to call ``POST
|
||||
/finish``. This commits all data and ends the backup protocol.
|
||||
|
||||
|
||||
Restore/Reader Protocol API
|
||||
---------------------------
|
||||
|
||||
To start a new reader, the API call ``GET /api2/json/reader`` needs to
|
||||
be upgraded to a HTTP/2 connection using
|
||||
``proxmox-backup-reader-protocol-v1`` as protocol name::
|
||||
|
||||
GET /api2/json/reader HTTP/1.1
|
||||
UPGRADE: proxmox-backup-reader-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
|
||||
The reader protocol allows you to download three different kind of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
|
||||
The following section gives a short introduction how to download such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
||||
available REST commands.
|
||||
|
||||
|
||||
Download Blobs
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Downloading blobs is done using ``GET /download``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||
|
||||
|
||||
Download Chunks
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
|
||||
|
||||
Download Index Files
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading index files is done using ``GET /download``. The HTTP body
|
||||
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
||||
or :ref:`Dynamic Index <dynamic-index-format>`.
|
||||
|
@ -1,5 +1,4 @@
|
||||
|
||||
.. _calendar-events:
|
||||
.. _calendar-event-scheduling:
|
||||
|
||||
Calendar Events
|
||||
===============
|
||||
|
16
docs/conf.py
16
docs/conf.py
@ -107,10 +107,8 @@ today_fmt = '%A, %d %B %Y'
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = [
|
||||
'_build', 'Thumbs.db', '.DS_Store',
|
||||
'proxmox-backup-client/man1.rst',
|
||||
'proxmox-backup-manager/man1.rst',
|
||||
'proxmox-backup-proxy/man1.rst',
|
||||
'pxar/man1.rst',
|
||||
'*/man1.rst',
|
||||
'config/*/man5.rst',
|
||||
'epilog.rst',
|
||||
'pbs-copyright.rst',
|
||||
'local-zfs.rst'
|
||||
@ -171,6 +169,7 @@ html_theme_options = {
|
||||
'extra_nav_links': {
|
||||
'Proxmox Homepage': 'https://proxmox.com',
|
||||
'PDF': 'proxmox-backup.pdf',
|
||||
'API Viewer' : 'api-viewer/index.html',
|
||||
'Prune Simulator' : 'prune-simulator/index.html',
|
||||
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
||||
},
|
||||
@ -246,10 +245,8 @@ html_js_files = [
|
||||
#
|
||||
# html_last_updated_fmt = None
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
# We need to disable smatquotes, else Option Lists do not display long options
|
||||
smartquotes = False
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
@ -467,3 +464,6 @@ epub_exclude_files = ['search.html']
|
||||
# If false, no index is generated.
|
||||
#
|
||||
# epub_use_index = True
|
||||
|
||||
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||
|
22
docs/config/acl/format.rst
Normal file
22
docs/config/acl/format.rst
Normal file
@ -0,0 +1,22 @@
|
||||
This file contains the access control list for the Proxmox Backup
|
||||
Server API.
|
||||
|
||||
Each line starts with ``acl:``, followed by 4 additional values
|
||||
separated by collon.
|
||||
|
||||
:propagate: Propagate permissions down the hierachrchy
|
||||
|
||||
:path: The object path
|
||||
|
||||
:User/Token: List of users and token
|
||||
|
||||
:Role: List of assigned roles
|
||||
|
||||
Here is an example list::
|
||||
|
||||
acl:1:/:root@pam!test:Admin
|
||||
acl:1:/datastore/store1:user1@pbs:DatastoreAdmin
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager acl`` command to manipulate
|
||||
this file.
|
35
docs/config/acl/man5.rst
Normal file
35
docs/config/acl/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
acl.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Access Control Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the access control configuration for the API.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Roles
|
||||
=====
|
||||
|
||||
The following roles exist:
|
||||
|
||||
.. include:: roles.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
18
docs/config/datastore/format.rst
Normal file
18
docs/config/datastore/format.rst
Normal file
@ -0,0 +1,18 @@
|
||||
The file contains a list of datastore configuration sections. Each
|
||||
section starts with a header ``datastore: <name>``, followed by the
|
||||
datastore configuration options.
|
||||
|
||||
::
|
||||
|
||||
datastore: <name1>
|
||||
path <path1>
|
||||
<option1> <value1>
|
||||
...
|
||||
|
||||
datastore: <name2>
|
||||
path <path2>
|
||||
...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager datastore`` command to manipulate
|
||||
this file.
|
33
docs/config/datastore/man5.rst
Normal file
33
docs/config/datastore/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
datastore.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Datastore Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/datastore.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the Datastore configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
13
docs/config/media-pool/format.rst
Normal file
13
docs/config/media-pool/format.rst
Normal file
@ -0,0 +1,13 @@
|
||||
Each entry starts with a header ``pool: <name>``, followed by the
|
||||
media pool configuration options.
|
||||
|
||||
::
|
||||
|
||||
pool: company1
|
||||
allocation always
|
||||
retention overwrite
|
||||
|
||||
pool: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-tape pool`` command to manipulate this file.
|
35
docs/config/media-pool/man5.rst
Normal file
35
docs/config/media-pool/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
media-pool.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Media Pool Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/media-pool.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the medila pool configuration
|
||||
for tape backups.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
17
docs/config/remote/format.rst
Normal file
17
docs/config/remote/format.rst
Normal file
@ -0,0 +1,17 @@
|
||||
This file contains information used to access remote servers.
|
||||
|
||||
Each entry starts with a header ``remote: <name>``, followed by the
|
||||
remote configuration options.
|
||||
|
||||
::
|
||||
|
||||
remote: server1
|
||||
host server1.local
|
||||
auth-id sync@pbs
|
||||
...
|
||||
|
||||
remote: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager remote`` command to manipulate
|
||||
this file.
|
35
docs/config/remote/man5.rst
Normal file
35
docs/config/remote/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
remote.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Remote Server Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/remote.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains information about remote servers,
|
||||
usable for synchronization jobs.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
15
docs/config/sync/format.rst
Normal file
15
docs/config/sync/format.rst
Normal file
@ -0,0 +1,15 @@
|
||||
Each entry starts with a header ``sync: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
sync: job1
|
||||
store store1
|
||||
remote-store store1
|
||||
remote lina
|
||||
|
||||
sync: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
|
||||
this file.
|
35
docs/config/sync/man5.rst
Normal file
35
docs/config/sync/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
sync.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Synchronization Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains the synchronization job
|
||||
configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
16
docs/config/tape-job/format.rst
Normal file
16
docs/config/tape-job/format.rst
Normal file
@ -0,0 +1,16 @@
|
||||
Each entry starts with a header ``backup: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
backup: job1
|
||||
drive hh8
|
||||
pool p4
|
||||
store store3
|
||||
schedule daily
|
||||
|
||||
backup: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-tape backup-job`` command to manipulate
|
||||
this file.
|
34
docs/config/tape-job/man5.rst
Normal file
34
docs/config/tape-job/man5.rst
Normal file
@ -0,0 +1,34 @@
|
||||
==========================
|
||||
tape-job.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Tape Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file ``/etc/proxmox-backup/tape-job.cfg`` is a configuration file for
|
||||
Proxmox Backup Server. It contains the tape job configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
22
docs/config/tape/format.rst
Normal file
22
docs/config/tape/format.rst
Normal file
@ -0,0 +1,22 @@
|
||||
Each drive configuration section starts with a header ``linux: <name>``,
|
||||
followed by the drive configuration options.
|
||||
|
||||
Tape changer configurations starts with ``changer: <name>``,
|
||||
followed by the changer configuration options.
|
||||
|
||||
::
|
||||
|
||||
linux: hh8
|
||||
changer sl3
|
||||
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||
|
||||
changer: sl3
|
||||
export-slots 14,15,16
|
||||
path /dev/tape/by-id/scsi-CJ0JBE0059
|
||||
|
||||
|
||||
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
||||
commands to manipulate this file.
|
||||
|
||||
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
|
||||
for debugging.
|
33
docs/config/tape/man5.rst
Normal file
33
docs/config/tape/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
tape.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Tape Drive and Changer Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/tape.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the tape drive and changer configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
28
docs/config/user/format.rst
Normal file
28
docs/config/user/format.rst
Normal file
@ -0,0 +1,28 @@
|
||||
This file contains the list of API users and API tokens.
|
||||
|
||||
Each user configuration section starts with a header ``user: <name>``,
|
||||
followed by the user configuration options.
|
||||
|
||||
API token configuration starts with a header ``token:
|
||||
<userid!token_name>``, followed by the token configuration. The data
|
||||
used to authenticate tokens is stored in a separate file
|
||||
(``token.shadow``).
|
||||
|
||||
|
||||
::
|
||||
|
||||
user: root@pam
|
||||
comment Superuser
|
||||
email test@example.local
|
||||
...
|
||||
|
||||
token: root@pam!token1
|
||||
comment API test token
|
||||
enable true
|
||||
expire 0
|
||||
|
||||
user: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager user`` command to manipulate
|
||||
this file.
|
33
docs/config/user/man5.rst
Normal file
33
docs/config/user/man5.rst
Normal file
@ -0,0 +1,33 @@
|
||||
==========================
|
||||
user.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
User Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/user.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the user configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
16
docs/config/verification/format.rst
Normal file
16
docs/config/verification/format.rst
Normal file
@ -0,0 +1,16 @@
|
||||
Each entry starts with a header ``verification: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
||||
verification: verify-store2
|
||||
ignore-verified true
|
||||
outdated-after 7
|
||||
schedule daily
|
||||
store store2
|
||||
|
||||
verification: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager verify-job`` command to manipulate
|
||||
this file.
|
35
docs/config/verification/man5.rst
Normal file
35
docs/config/verification/man5.rst
Normal file
@ -0,0 +1,35 @@
|
||||
==========================
|
||||
verification.cfg
|
||||
==========================
|
||||
|
||||
.. include:: ../../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Verification Job Configuration
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 5
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains the verification job
|
||||
configuration.
|
||||
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
97
docs/configuration-files.rst
Normal file
97
docs/configuration-files.rst
Normal file
@ -0,0 +1,97 @@
|
||||
Configuration Files
|
||||
===================
|
||||
|
||||
All Proxmox Backup Server configuration files resides inside directory
|
||||
``/etc/proxmox-backup/``.
|
||||
|
||||
|
||||
``acl.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/acl/format.rst
|
||||
|
||||
|
||||
Roles
|
||||
^^^^^
|
||||
|
||||
The following roles exist:
|
||||
|
||||
.. include:: config/acl/roles.rst
|
||||
|
||||
|
||||
``datastore.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/datastore/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/datastore/config.rst
|
||||
|
||||
|
||||
``user.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/user/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/user/config.rst
|
||||
|
||||
|
||||
``remote.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/remote/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/remote/config.rst
|
||||
|
||||
|
||||
``sync.cfg``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/sync/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/sync/config.rst
|
||||
|
||||
|
||||
``verification.cfg``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/verification/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/verification/config.rst
|
@ -14,6 +14,10 @@ pre {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
background-color: #FAFAFA;
|
||||
}
|
||||
|
||||
li a.current {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid #000;
|
||||
@ -25,6 +29,23 @@ ul li.toctree-l1 > a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
color: #444;
|
||||
}
|
||||
div.sphinxsidebar ul ul {
|
||||
list-style: circle;
|
||||
}
|
||||
div.sphinxsidebar ul ul ul {
|
||||
list-style: square;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul a code {
|
||||
font-weight: normal;
|
||||
}
|
||||
div.sphinxsidebar ul ul a {
|
||||
border-bottom: 1px dotted #CCC;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form.search {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
@ -6,7 +6,113 @@ File Formats
|
||||
Proxmox File Archive Format (``.pxar``)
|
||||
---------------------------------------
|
||||
|
||||
|
||||
.. graphviz:: pxar-format-overview.dot
|
||||
|
||||
|
||||
.. _data-blob-format:
|
||||
|
||||
Data Blob Format (``.blob``)
|
||||
----------------------------
|
||||
|
||||
The data blob format is used to store small binary data. The magic number decides the exact format:
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
|
||||
* - ``[66, 171, 56, 7, 190, 131, 112, 161]``
|
||||
- unencrypted
|
||||
- uncompressed
|
||||
* - ``[49, 185, 88, 66, 111, 182, 163, 127]``
|
||||
- unencrypted
|
||||
- compressed
|
||||
* - ``[123, 103, 133, 190, 34, 45, 76, 240]``
|
||||
- encrypted
|
||||
- uncompressed
|
||||
* - ``[230, 89, 27, 191, 11, 191, 216, 11]``
|
||||
- encrypted
|
||||
- compressed
|
||||
|
||||
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
|
||||
|
||||
Unencrypted blobs use the following format:
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
* - ``CRC32: [u8; 4]``
|
||||
* - ``Data: (max 16MiB)``
|
||||
|
||||
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
|
||||
byte Authenticated Encyryption (AE) tag, followed by the encrypted
|
||||
data:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
* - ``CRC32: [u8; 4]``
|
||||
* - ``ÌV: [u8; 16]``
|
||||
* - ``TAG: [u8; 16]``
|
||||
* - ``Data: (max 16MiB)``
|
||||
|
||||
|
||||
.. _fixed-index-format:
|
||||
|
||||
Fixed Index Format (``.fidx``)
|
||||
-------------------------------
|
||||
|
||||
All numbers are stored as little-endian.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
- ``[47, 127, 65, 237, 145, 253, 15, 205]``
|
||||
* - ``uuid: [u8; 16]``,
|
||||
- Unique ID
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||
* - ``size: u64``,
|
||||
- Image size
|
||||
* - ``chunk_size: u64``,
|
||||
- Chunk size
|
||||
* - ``reserved: [u8; 4016]``,
|
||||
- overall header size is one page (4096 bytes)
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
* - ``digest2: [u8; 32]``
|
||||
- next chunk
|
||||
* - ...
|
||||
- next chunk ...
|
||||
|
||||
|
||||
.. _dynamic-index-format:
|
||||
|
||||
Dynamic Index Format (``.didx``)
|
||||
--------------------------------
|
||||
|
||||
All numbers are stored as little-endian.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ``MAGIC: [u8; 8]``
|
||||
- ``[28, 145, 78, 165, 25, 186, 179, 205]``
|
||||
* - ``uuid: [u8; 16]``,
|
||||
- Unique ID
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
* - ``reserved: [u8; 4032]``,
|
||||
- Overall header size is one page (4096 bytes)
|
||||
* - ``offset1: u64``
|
||||
- End of first chunk
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
* - ``offset2: u64``
|
||||
- End of second chunk
|
||||
* - ``digest2: [u8; 32]``
|
||||
- second chunk digest
|
||||
* - ...
|
||||
- next chunk offset/digest
|
||||
|
@ -129,7 +129,7 @@ top panel to view:
|
||||
* **Content**: Information on the datastore's backup groups and their respective
|
||||
contents
|
||||
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
|
||||
collection <garbage-collection>` operations, and run garbage collection
|
||||
collection <client_garbage-collection>` operations, and run garbage collection
|
||||
manually
|
||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
|
||||
| Version |version| -- |today|
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
@ -45,6 +45,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
:caption: Appendix
|
||||
|
||||
command-syntax.rst
|
||||
configuration-files.rst
|
||||
file-formats.rst
|
||||
backup-protocol.rst
|
||||
calendarevents.rst
|
||||
|
@ -15,7 +15,7 @@ encryption (AE_). Using :term:`Rust` as the implementation language guarantees h
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
Proxmox Backup uses state of the art cryptography for both client-server
|
||||
communication and backup content :ref:`encryption <encryption>`. All
|
||||
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||
client-server communication uses `TLS
|
||||
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
||||
be encrypted on the client-side before sending, making it safer to back up data
|
||||
|
@ -4,7 +4,7 @@
|
||||
// IBM LTO Ultrium Cartridge Label Specification
|
||||
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
||||
|
||||
let code39_codes = {
|
||||
const code39_codes = {
|
||||
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
||||
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
||||
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
||||
@ -53,10 +53,10 @@ let code39_codes = {
|
||||
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
||||
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
||||
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
||||
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
|
||||
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b'],
|
||||
};
|
||||
|
||||
let colors = [
|
||||
const colors = [
|
||||
'#BB282E',
|
||||
'#FAE54A',
|
||||
'#9AC653',
|
||||
@ -66,25 +66,22 @@ let colors = [
|
||||
'#E27B99',
|
||||
'#67A945',
|
||||
'#F6B855',
|
||||
'#705A81'
|
||||
'#705A81',
|
||||
];
|
||||
|
||||
let lto_label_width = 70;
|
||||
let lto_label_height = 17;
|
||||
const lto_label_width = 70;
|
||||
const lto_label_height = 16.9;
|
||||
|
||||
function foreach_label(page_layout, callback) {
|
||||
|
||||
let count = 0;
|
||||
let row = 0;
|
||||
let height = page_layout.margin_top;
|
||||
|
||||
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
||||
|
||||
let column = 0;
|
||||
let width = page_layout.margin_left;
|
||||
|
||||
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
||||
|
||||
callback(column, row, count, width, height);
|
||||
count += 1;
|
||||
|
||||
@ -97,11 +94,9 @@ function foreach_label(page_layout, callback) {
|
||||
height += page_layout.label_height;
|
||||
height += page_layout.row_spacing;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function compute_max_labels(page_layout) {
|
||||
|
||||
let max_labels = 0;
|
||||
foreach_label(page_layout, function() { max_labels += 1; });
|
||||
return max_labels;
|
||||
@ -110,10 +105,10 @@ function compute_max_labels(page_layout) {
|
||||
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
let svg = "";
|
||||
|
||||
if (label.length != 6) {
|
||||
if (label.length !== 6) {
|
||||
throw "wrong label length";
|
||||
}
|
||||
if (label_type.length != 2) {
|
||||
if (label_type.length !== 2) {
|
||||
throw "wrong label_type length";
|
||||
}
|
||||
|
||||
@ -126,20 +121,22 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
let xpos = pagex + code_width;
|
||||
let height = 12;
|
||||
|
||||
let label_rect = `x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}'`;
|
||||
|
||||
if (mode === 'placeholder') {
|
||||
if (label_borders) {
|
||||
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
svg += `<rect class='unprintable' ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
}
|
||||
return svg;
|
||||
}
|
||||
if (label_borders) {
|
||||
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
svg += `<rect ${label_rect} fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||
}
|
||||
|
||||
if (mode === "color" || mode == "frame") {
|
||||
if (mode === "color" || mode === "frame") {
|
||||
let w = lto_label_width/8;
|
||||
let h = lto_label_height - height;
|
||||
for (var i = 0; i < 7; i++) {
|
||||
for (let i = 0; i < 7; i++) {
|
||||
let textx = w/2 + pagex + i*w;
|
||||
let texty = pagey;
|
||||
|
||||
@ -168,7 +165,7 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
|
||||
let raw_label = `*${label}${label_type}*`;
|
||||
|
||||
for (var i = 0; i < raw_label.length; i++) {
|
||||
for (let i = 0; i < raw_label.length; i++) {
|
||||
let letter = raw_label.charAt(i);
|
||||
|
||||
let code = code39_codes[letter];
|
||||
@ -186,7 +183,6 @@ function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||
}
|
||||
|
||||
for (let c of code) {
|
||||
|
||||
if (c === 's') {
|
||||
xpos += small;
|
||||
continue;
|
||||
@ -241,7 +237,6 @@ function printBarcodePage() {
|
||||
}
|
||||
|
||||
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
||||
|
||||
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||
|
||||
let c = calibration;
|
||||
@ -255,7 +250,6 @@ function generate_barcode_page(target_id, page_layout, label_list, calibration)
|
||||
svg += '>';
|
||||
|
||||
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
||||
|
||||
if (count >= label_list.length) { return; }
|
||||
|
||||
let item = label_list[count];
|
||||
@ -297,12 +291,11 @@ function setupPrintFrame(frame, page_width, page_height) {
|
||||
}
|
||||
|
||||
function generate_calibration_page(target_id, page_layout, calibration) {
|
||||
|
||||
let frame = document.getElementById(target_id);
|
||||
|
||||
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||
|
||||
let svg = svg_page_header( page_layout.page_width, page_layout.page_height);
|
||||
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||
|
||||
svg += "<defs>";
|
||||
svg += "<marker id='endarrow' markerWidth='10' markerHeight='7' ";
|
||||
|
@ -4,7 +4,7 @@ Ext.define('LabelList', {
|
||||
|
||||
plugins: {
|
||||
ptype: 'cellediting',
|
||||
clicksToEdit: 1
|
||||
clicksToEdit: 1,
|
||||
},
|
||||
|
||||
selModel: 'cellmodel',
|
||||
@ -44,7 +44,7 @@ Ext.define('LabelList', {
|
||||
xtype: 'prefixfield',
|
||||
allowBlank: false,
|
||||
},
|
||||
renderer: function (value, metaData, record) {
|
||||
renderer: function(value, metaData, record) {
|
||||
console.log(record);
|
||||
if (record.data.mode === 'placeholder') {
|
||||
return "-";
|
||||
@ -60,7 +60,7 @@ Ext.define('LabelList', {
|
||||
xtype: 'ltoTapeType',
|
||||
allowBlank: false,
|
||||
},
|
||||
renderer: function (value, metaData, record) {
|
||||
renderer: function(value, metaData, record) {
|
||||
console.log(record);
|
||||
if (record.data.mode === 'placeholder') {
|
||||
return "-";
|
||||
@ -133,7 +133,7 @@ Ext.define('LabelList', {
|
||||
handler: function(grid, rowIndex) {
|
||||
grid.getStore().removeAt(rowIndex);
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
@ -4,7 +4,6 @@ if (Ext.isFirefox) {
|
||||
}
|
||||
|
||||
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
|
||||
let max_labels = compute_max_labels(page_layout);
|
||||
|
||||
let count_fixed = 0;
|
||||
@ -44,20 +43,16 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
count = fill_size;
|
||||
}
|
||||
rest -= count;
|
||||
} else {
|
||||
if (item.end <= item.start) {
|
||||
} else if (item.end <= item.start) {
|
||||
count = 1;
|
||||
} else {
|
||||
count = (item.end - item.start) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < count; j++) {
|
||||
|
||||
let id = item.start + j;
|
||||
|
||||
if (item.prefix.length == 6) {
|
||||
|
||||
list.push({
|
||||
label: item.prefix,
|
||||
tape_type: item.tape_type,
|
||||
@ -66,9 +61,7 @@ function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||
});
|
||||
rest += count - j - 1;
|
||||
break;
|
||||
|
||||
} else {
|
||||
|
||||
let pad_len = 6-item.prefix.length;
|
||||
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
||||
|
||||
@ -195,19 +188,18 @@ Ext.define('MainView', {
|
||||
border: false,
|
||||
flex: 1,
|
||||
scrollable: true,
|
||||
tools:[{
|
||||
tools: [{
|
||||
type: 'print',
|
||||
tooltip: 'Open Print Dialog',
|
||||
handler: function(event, toolEl, panelHeader) {
|
||||
printBarcodePage();
|
||||
}
|
||||
},
|
||||
}],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
Ext.onReady(function() {
|
||||
|
||||
Ext.create('MainView', {
|
||||
renderTo: Ext.getBody(),
|
||||
});
|
||||
|
@ -31,8 +31,8 @@ Ext.define('PageCalibration', {
|
||||
scalex = 100/values.d_x;
|
||||
scaley = 100/values.d_y;
|
||||
|
||||
let offsetx = ((50*scalex) - values.s_x)/scalex;
|
||||
let offsety = ((50*scaley) - values.s_y)/scaley;
|
||||
let offsetx = ((50 - values.s_x) - (50*scalex - 50))/scalex;
|
||||
let offsety = ((50 - values.s_y) - (50*scaley - 50))/scaley;
|
||||
|
||||
return {
|
||||
scalex: scalex,
|
||||
@ -139,4 +139,4 @@ Ext.define('PageCalibration', {
|
||||
],
|
||||
},
|
||||
],
|
||||
})
|
||||
});
|
||||
|
@ -106,7 +106,7 @@ Ext.define('PageLayoutPanel', {
|
||||
xtype: 'numberfield',
|
||||
name: 'label_height',
|
||||
fieldLabel: 'Label height',
|
||||
minValue: 17,
|
||||
minValue: 15,
|
||||
allowBlank: false,
|
||||
value: 17,
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
let paper_sizes = {
|
||||
const paper_sizes = {
|
||||
a4: {
|
||||
comment: 'A4 (plain)',
|
||||
page_width: 210,
|
||||
@ -15,13 +15,13 @@ let paper_sizes = {
|
||||
page_width: 210,
|
||||
page_height: 297,
|
||||
label_width: 70,
|
||||
label_height: 17,
|
||||
label_height: 16.9,
|
||||
margin_left: 0,
|
||||
margin_top: 4,
|
||||
margin_top: 5,
|
||||
column_spacing: 0,
|
||||
row_spacing: 0,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
function paper_size_combo_data() {
|
||||
let data = [];
|
||||
|
@ -118,11 +118,11 @@ high, but you cannot recreate backup snapshots from the past.
|
||||
Garbage Collection
|
||||
------------------
|
||||
|
||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||
You can monitor and run :ref:`garbage collection <client_garbage-collection>` on the
|
||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
|
||||
start garbage collection on an entire datastore and the ``status`` subcommand to
|
||||
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||
|
||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
||||
@ -142,7 +142,7 @@ Verification
|
||||
Proxmox Backup offers various verification options to ensure that backup data is
|
||||
intact. Verification is generally carried out through the creation of verify
|
||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
||||
:ref:`calendar-events`). With these, you can set whether already verified
|
||||
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
||||
are checked again. The interface for creating verify jobs can be found under the
|
||||
**Verify Jobs** tab of the datastore.
|
||||
|
@ -65,7 +65,7 @@ the ``proxmox-backup-manager sync-job`` command. The configuration information
|
||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
||||
After creating a sync job, you can either start it manually from the GUI or
|
||||
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
|
||||
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
24
docs/output-format.rst
Normal file
24
docs/output-format.rst
Normal file
@ -0,0 +1,24 @@
|
||||
Most commands producing output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Also, the following environment variables can modify output behavior:
|
||||
|
||||
``PROXMOX_OUTPUT_FORMAT``
|
||||
Defines the default output format.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_BORDER``
|
||||
If set (to any value), do not render table borders.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format if you need to process the output.
|
@ -1,7 +1,7 @@
|
||||
Copyright and Disclaimer
|
||||
========================
|
||||
|
||||
Copyright (C) 2007-2019 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2007-2021 Proxmox Server Solutions GmbH
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as
|
||||
|
2
docs/pmt/description.rst
Normal file
2
docs/pmt/description.rst
Normal file
@ -0,0 +1,2 @@
|
||||
The ``pmt`` command controls Linux tape devices.
|
||||
|
42
docs/pmt/man1.rst
Normal file
42
docs/pmt/man1.rst
Normal file
@ -0,0 +1,42 @@
|
||||
==========================
|
||||
pmt
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Control Linux Tape Devices
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
.. include:: options.rst
|
||||
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
ENVIRONMENT
|
||||
===========
|
||||
|
||||
:TAPE: If set, replaces the `--device` option.
|
||||
|
||||
:PROXMOX_TAPE_DRIVE: If set, replaces the `--drive` option.
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
51
docs/pmt/options.rst
Normal file
51
docs/pmt/options.rst
Normal file
@ -0,0 +1,51 @@
|
||||
All command supports the following parameters to specify the tape device:
|
||||
|
||||
--device <path> Path to the Linux tape device
|
||||
|
||||
--drive <name> Use drive from Proxmox Backup Server configuration.
|
||||
|
||||
|
||||
Commands generating output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Human readable.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Device driver options can be specified as integer numbers (see
|
||||
``/usr/include/linux/mtio.h``), or using symbolic names:
|
||||
|
||||
:``buffer-writes``: Enable buffered writes
|
||||
|
||||
:``async-writes``: Enable async writes
|
||||
|
||||
:``read-ahead``: Use read-ahead for fixed block size
|
||||
|
||||
:``debugging``: Enable debugging if compiled into the driver
|
||||
|
||||
:``two-fm``: Write two file marks when closing the file
|
||||
|
||||
:``fast-mteom``: Space directly to eod (and lose file number)
|
||||
|
||||
:``auto-lock``: Automatically lock/unlock drive door
|
||||
|
||||
:``def-writes``: Defaults are meant only for writes
|
||||
|
||||
:``can-bsr``: Indicates that the drive can space backwards
|
||||
|
||||
:``no-blklims``: Drive does not support read block limits
|
||||
|
||||
:``can-partitions``: Drive can handle partitioned tapes
|
||||
|
||||
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
||||
|
||||
:``sysv``: Enable the System V semantics
|
||||
|
||||
:``nowait``: Do not wait for rewind, etc. to complete
|
||||
|
||||
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
||||
in variable block mode to enhance performance when reading blocks
|
||||
shorter than the byte count
|
@ -1,6 +1,3 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
The ``pmtx`` command controls SCSI media changer devices (tape
|
||||
autoloader).
|
||||
|
||||
|
@ -18,11 +18,40 @@ Synopsis
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
All command supports the following parameters to specify the changer device:
|
||||
|
||||
--device <path> Path to Linux generic SCSI device (e.g. '/dev/sg4')
|
||||
|
||||
--changer <name> Use changer from Proxmox Backup Server configuration.
|
||||
|
||||
|
||||
Commands generating output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Human readable.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
ENVIRONMENT
|
||||
===========
|
||||
|
||||
:CHANGER: If set, replaces the `--device` option
|
||||
|
||||
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
|
||||
configuration to find the associcated changer device.
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
@ -1,4 +1,4 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
This tool implements a backup server client, i.e. it can connect to a
|
||||
backup servers to issue management commands and to create or restore
|
||||
backups.
|
||||
|
||||
|
@ -31,6 +31,12 @@ Those command are available when you start an intercative restore shell:
|
||||
.. include:: catalog-shell-synopsis.rst
|
||||
|
||||
|
||||
Common Options
|
||||
==============
|
||||
|
||||
.. include:: ../output-format.rst
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
|
||||
This tool exposes the whole backup server management API on the
|
||||
command line.
|
||||
|
@ -1,4 +1,5 @@
|
||||
This is just a test.
|
||||
|
||||
.. NOTE:: No further info.
|
||||
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||
permissions. Operation requiring more permissions are forwarded to
|
||||
the local ``proxmox-backup`` service.
|
||||
|
||||
|
7
docs/proxmox-backup/description.rst
Normal file
7
docs/proxmox-backup/description.rst
Normal file
@ -0,0 +1,7 @@
|
||||
This daemon exposes the Proxmox Backup Server management API on
|
||||
``127.0.0.1:82``. It runs as ``root`` and has permission to do all
|
||||
privileged operations.
|
||||
|
||||
NOTE: The daemon listens to a local address only, so you cannot access
|
||||
it from outside. The ``proxmox-backup-proxy`` daemon exposes the API
|
||||
to the outside world.
|
41
docs/proxmox-backup/man1.rst
Normal file
41
docs/proxmox-backup/man1.rst
Normal file
@ -0,0 +1,41 @@
|
||||
==========================
|
||||
proxmox-backup
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox Backup Local API Server
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
==========
|
||||
|
||||
This daemon is normally started and managed as ``systemd`` service::
|
||||
|
||||
systemctl start proxmox-backup
|
||||
|
||||
systemctl stop proxmox-backup
|
||||
|
||||
systemctl status proxmox-backup
|
||||
|
||||
For debugging, you can start the daemon in foreground using::
|
||||
|
||||
proxmox-backup-api
|
||||
|
||||
.. NOTE:: You need to stop the service before starting the daemon in
|
||||
foreground.
|
||||
|
||||
|
||||
Description
|
||||
============
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
1
docs/proxmox-tape/description.rst
Normal file
1
docs/proxmox-tape/description.rst
Normal file
@ -0,0 +1 @@
|
||||
This tool can configure and manage tape backups.
|
28
docs/proxmox-tape/man1.rst
Normal file
28
docs/proxmox-tape/man1.rst
Normal file
@ -0,0 +1,28 @@
|
||||
==========================
|
||||
proxmox-tape
|
||||
==========================
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox Tape Backup Command Line Tool
|
||||
-------------------------------------------------------------
|
||||
|
||||
:Author: |AUTHOR|
|
||||
:Version: Version |VERSION|
|
||||
:Manual section: 1
|
||||
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
.. include:: synopsis.rst
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
.. include:: description.rst
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
@ -1,6 +1,3 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
``pxar`` is a command line utility to create and manipulate archives in the
|
||||
:ref:`pxar-format`.
|
||||
It is inspired by `casync file archive format
|
||||
@ -80,7 +77,7 @@ These files must contain one pattern per line, again later patterns win over
|
||||
previous ones.
|
||||
The patterns control file exclusions of files present within the given directory
|
||||
or further below it in the tree.
|
||||
The behavior is the same as described in :ref:`creating-backups`.
|
||||
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -4,6 +4,9 @@ pxar
|
||||
|
||||
.. include:: ../epilog.rst
|
||||
|
||||
.. Avoid errors with sphinx ref role
|
||||
.. role:: ref(emphasis)
|
||||
|
||||
-------------------------------------------------------------
|
||||
Proxmox File Archive Command Line Tool
|
||||
-------------------------------------------------------------
|
||||
@ -25,4 +28,3 @@ Description
|
||||
|
||||
|
||||
.. include:: ../pbs-copyright.rst
|
||||
|
||||
|
@ -6,3 +6,9 @@ Service Daemons
|
||||
|
||||
.. include:: proxmox-backup-proxy/description.rst
|
||||
|
||||
|
||||
``proxmox-backup``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. include:: proxmox-backup/description.rst
|
||||
|
||||
|
@ -119,8 +119,8 @@ directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
||||
|
||||
|
||||
.. _storage_datastore_create:
|
||||
|
@ -25,4 +25,7 @@ either explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
|
||||
.. include:: local-zfs.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
@ -1,12 +1,22 @@
|
||||
.. _tape_backup:
|
||||
|
||||
Tape Backup
|
||||
===========
|
||||
|
||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||
production usage. To enable the GUI, you need to issue the
|
||||
following command (as root user on the console):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# touch /etc/proxmox-backup/tape.cfg
|
||||
|
||||
Proxmox tape backup provides an easy way to store datastore content
|
||||
onto magnetic tapes. This increases data safety because you get:
|
||||
|
||||
- an additional copy of the data
|
||||
- to a different media type (tape)
|
||||
- to an additional location (you can move tapes offsite)
|
||||
- to an additional location (you can move tapes off-site)
|
||||
|
||||
In most restore jobs, only data from the last backup job is restored.
|
||||
Restore requests further decline the older the data
|
||||
@ -17,8 +27,8 @@ years.
|
||||
|
||||
Tape backups do not provide random access to the stored data. Instead,
|
||||
you need to restore the data to disk before you can access it
|
||||
again. Also, if you store your tapes offsite (using some kind of tape
|
||||
vaulting service), you need to bring them onsite before you can do any
|
||||
again. Also, if you store your tapes off-site (using some kind of tape
|
||||
vaulting service), you need to bring them on-site before you can do any
|
||||
restore. So please consider that restores from tapes can take much
|
||||
longer than restores from disk.
|
||||
|
||||
@ -30,7 +40,7 @@ Tape Technology Primer
|
||||
|
||||
As of 2021, the only broadly available tape technology standard is
|
||||
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
||||
drives, autoloaders and LTO tape cartridges.
|
||||
drives, auto-loaders and LTO tape cartridges.
|
||||
|
||||
There are a few vendors offering proprietary drives with
|
||||
slight advantages in performance and capacity, but they have
|
||||
@ -51,14 +61,14 @@ In general, LTO tapes offer the following advantages:
|
||||
- Multiple vendors (for both media and drives)
|
||||
- Build in AES-CGM Encryption engine
|
||||
|
||||
Please note that `Proxmox Backup Server` already stores compressed
|
||||
data, so we do not need/use the tape compression feature.
|
||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||
tape compression feature has no advantage.
|
||||
|
||||
|
||||
Supported Hardware
|
||||
------------------
|
||||
|
||||
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
|
||||
Proxmox Backup Server supports `Linear Tape Open`_ generation 4 (LTO4)
|
||||
or later. In general, all SCSI2 tape drives supported by the Linux
|
||||
kernel should work, but feature like hardware encryptions needs LTO4
|
||||
or later.
|
||||
@ -70,7 +80,7 @@ tool. So any changer device supported by that tool should work.
|
||||
Drive Performance
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
|
||||
Current LTO-8 tapes provide read/write speeds up to 360 MB/s. This means,
|
||||
that it still takes a minimum of 9 hours to completely write or
|
||||
read a single tape (even at maximum speed).
|
||||
|
||||
@ -89,7 +99,7 @@ datastore is able to deliver that performance (e.g, by using SSDs).
|
||||
Terminology
|
||||
-----------
|
||||
|
||||
:Tape Labels: are used to uniquely indentify a tape. You normally use
|
||||
:Tape Labels: are used to uniquely identify a tape. You normally use
|
||||
some sticky paper labels and apply them on the front of the
|
||||
cartridge. We additionally store the label text magnetically on the
|
||||
tape (first file on tape).
|
||||
@ -102,7 +112,7 @@ Terminology
|
||||
|
||||
:Barcodes: are a special form of tape labels, which are electronically
|
||||
readable. Most LTO tape robots use an 8 character string encoded as
|
||||
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||
Specification`_.
|
||||
|
||||
You can either buy such barcode labels from your cartridge vendor,
|
||||
@ -122,7 +132,7 @@ Terminology
|
||||
:Media Set: A group of continuously written tapes (all from the same
|
||||
media pool).
|
||||
|
||||
:Tape drive: The decive used to read and write data to the tape. There
|
||||
:Tape drive: The device used to read and write data to the tape. There
|
||||
are standalone drives, but drives often ship within tape libraries.
|
||||
|
||||
:Tape changer: A device which can change the tapes inside a tape drive
|
||||
@ -135,7 +145,7 @@ Terminology
|
||||
identify tape cartridges and an automated method for loading tapes
|
||||
(a robot).
|
||||
|
||||
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||
This is also commonly known as 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||
|
||||
:Inventory: The inventory stores the list of known tapes (with
|
||||
additional status information).
|
||||
@ -162,6 +172,7 @@ Please note that you can configure anything using the graphical user
|
||||
interface or the command line interface. Both methods results in the
|
||||
same configuration.
|
||||
|
||||
.. _tape_changer_config:
|
||||
|
||||
Tape changers
|
||||
~~~~~~~~~~~~~
|
||||
@ -170,7 +181,9 @@ Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||
this step if you are using a standalone drive.
|
||||
|
||||
Linux is able to auto detect those devices, and you can get a list
|
||||
of available devices using::
|
||||
of available devices using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer scan
|
||||
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||
@ -180,7 +193,9 @@ of available devices using::
|
||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||
|
||||
In order to use that device with Proxmox, you need to create a
|
||||
configuration entry::
|
||||
configuration entry:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
||||
|
||||
@ -190,7 +205,9 @@ Where ``sl3`` is an arbitrary name you can choose.
|
||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||
different device after reboot, and that is not what you want.
|
||||
|
||||
You can show the final configuration with::
|
||||
You can show the final configuration with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer config sl3
|
||||
┌──────┬─────────────────────────────┐
|
||||
@ -201,7 +218,9 @@ You can show the final configuration with::
|
||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||
└──────┴─────────────────────────────┘
|
||||
|
||||
Or simply list all configured changer devices::
|
||||
Or simply list all configured changer devices:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer list
|
||||
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
||||
@ -213,7 +232,9 @@ Or simply list all configured changer devices::
|
||||
The Vendor, Model and Serial number are auto detected, but only shown
|
||||
if the device is online.
|
||||
|
||||
To test your setup, please query the status of the changer device with::
|
||||
To test your setup, please query the status of the changer device with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -231,7 +252,7 @@ To test your setup, please query the status of the changer device with::
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
Tape libraries usually provide some special import/export slots (also
|
||||
called "mail slots"). Tapes inside those slots are acessible from
|
||||
called "mail slots"). Tapes inside those slots are accessible from
|
||||
outside, making it easy to add/remove tapes to/from the library. Those
|
||||
tapes are considered to be "offline", so backup jobs will not use
|
||||
them. Those special slots are auto-detected and marked as
|
||||
@ -247,12 +268,16 @@ the status output.
|
||||
As a workaround, you can mark some of the normal slots as export
|
||||
slot. The software treats those slots like real ``import-export``
|
||||
slots, and the media inside those slots is considered to be 'offline'
|
||||
(not available for backup)::
|
||||
(not available for backup):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer update sl3 --export-slots 15,16
|
||||
|
||||
After that, you can see those artificial ``import-export`` slots in
|
||||
the status output::
|
||||
the status output:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -273,12 +298,15 @@ the status output::
|
||||
│ slot │ 14 │ │ │
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
.. _tape_drive_config:
|
||||
|
||||
Tape drives
|
||||
~~~~~~~~~~~
|
||||
|
||||
Linux is able to auto detect tape drives, and you can get a list
|
||||
of available tape drives using::
|
||||
of available tape drives using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive scan
|
||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||
@ -288,7 +316,9 @@ of available tape drives using::
|
||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||
|
||||
In order to use that drive with Proxmox, you need to create a
|
||||
configuration entry::
|
||||
configuration entry:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||
|
||||
@ -297,15 +327,19 @@ configuration entry::
|
||||
different device after reboot, and that is not what you want.
|
||||
|
||||
If you have a tape library, you also need to set the associated
|
||||
changer device::
|
||||
changer device:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||
|
||||
The ``--changer-drivenum`` is only necessary if the tape library
|
||||
includes more than one drive (The changer status command lists all
|
||||
drivenums).
|
||||
drive numbers).
|
||||
|
||||
You can show the final configuration with::
|
||||
You can show the final configuration with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive config mydrive
|
||||
┌─────────┬────────────────────────────────┐
|
||||
@ -321,7 +355,9 @@ You can show the final configuration with::
|
||||
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||
configuration, because that is the default.
|
||||
|
||||
To list all configured drives use::
|
||||
To list all configured drives use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive list
|
||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||
@ -333,7 +369,9 @@ To list all configured drives use::
|
||||
The Vendor, Model and Serial number are auto detected, but only shown
|
||||
if the device is online.
|
||||
|
||||
For testing, you can simply query the drive status with::
|
||||
For testing, you can simply query the drive status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape status --drive mydrive
|
||||
┌───────────┬────────────────────────┐
|
||||
@ -348,6 +386,8 @@ For testing, you can simply query the drive status with::
|
||||
mode). This is the default anyways.
|
||||
|
||||
|
||||
.. _tape_media_pool_config:
|
||||
|
||||
Media Pools
|
||||
~~~~~~~~~~~
|
||||
|
||||
@ -417,7 +457,7 @@ one media pool, so a job only uses tapes from that pool.
|
||||
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
||||
will create a new set each week.
|
||||
|
||||
This balances between space efficency and media count.
|
||||
This balances between space efficiency and media count.
|
||||
|
||||
.. NOTE:: Retention period starts when the calendar event
|
||||
triggers.
|
||||
@ -426,7 +466,7 @@ one media pool, so a job only uses tapes from that pool.
|
||||
|
||||
- Required tape is offline (and you use a tape library).
|
||||
|
||||
- Current set contains damaged of retired tapes.
|
||||
- Current set contains damaged or retired tapes.
|
||||
|
||||
- Media pool encryption changed
|
||||
|
||||
@ -460,29 +500,36 @@ one media pool, so a job only uses tapes from that pool.
|
||||
will be double encrypted.
|
||||
|
||||
The password protected key is stored on each media, so it is
|
||||
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
|
||||
possbible to `restore the key <tape_restore_encryption_key_>`_ using the password. Please make sure
|
||||
you remember the password in case you need to restore the key.
|
||||
|
||||
|
||||
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
|
||||
the source datastore, so it is impossible to distinguish
|
||||
store1:/vm/100 from store2:/vm/100. Please use different media
|
||||
pools if the source is from a different name space)
|
||||
.. NOTE:: We use global content namespace, i.e. we do not store the
|
||||
source datastore, so it is impossible to distinguish store1:/vm/100
|
||||
from store2:/vm/100. Please use different media pools if the
|
||||
sources are from different name spaces with conflicting names
|
||||
(E.g. if the sources are from different Proxmox VE clusters).
|
||||
|
||||
|
||||
The following command creates a new media pool::
|
||||
The following command creates a new media pool:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
||||
|
||||
# proxmox-tape pool create daily --drive mydrive
|
||||
|
||||
|
||||
Additional option can be set later using the update command::
|
||||
Additional option can be set later using the update command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape pool update daily --allocation daily --retention 7days
|
||||
|
||||
|
||||
To list all configured pools use::
|
||||
To list all configured pools use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape pool list
|
||||
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
||||
@ -491,9 +538,85 @@ To list all configured pools use::
|
||||
│ daily │ mydrive │ daily │ 7days │ │
|
||||
└───────┴──────────┴────────────┴───────────┴──────────┘
|
||||
|
||||
.. _tape_backup_job_config:
|
||||
|
||||
Tape Jobs
|
||||
~~~~~~~~~
|
||||
Tape Backup Jobs
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
To automate tape backup, you can configure tape backup jobs which
|
||||
store datastore content to a media pool at a specific time
|
||||
schedule. Required settings are:
|
||||
|
||||
- ``store``: The datastore you want to backup
|
||||
|
||||
- ``pool``: The media pool - only tape cartridges from that pool are
|
||||
used.
|
||||
|
||||
- ``drive``: The tape drive.
|
||||
|
||||
- ``schedule``: Job schedule (see :ref:`calendar-event-scheduling`)
|
||||
|
||||
For example, to configure a tape backup job for datastore ``vmstore1``
|
||||
use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job create job2 --store vmstore1 \
|
||||
--pool yourpool --drive yourdrive --schedule daily
|
||||
|
||||
Backup includes all snapshot from a backup group by default. You can
|
||||
set the ``latest-only`` flag to include only the latest snapshots:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --latest-only
|
||||
|
||||
Backup jobs can use email to send tape requests notifications or
|
||||
report errors. You can set the notification user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --notify-user root@pam
|
||||
|
||||
.. Note:: The email address is a property of the user (see :ref:`user_mgmt`).
|
||||
|
||||
It is sometimes useful to eject the tape from the drive after a
|
||||
backup. For a standalone drive, the ``eject-media`` option eject the
|
||||
tape, making sure that the following backup cannot use the tape
|
||||
(unless someone manually loads the tape again). For tape libraries,
|
||||
this option unloads the tape to a free slot, which provides better
|
||||
dust protection than inside a drive:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --eject-media
|
||||
|
||||
.. Note:: For failed jobs, the tape remain in the drive.
|
||||
|
||||
For tape libraries, the ``export-media`` options moves all tapes from
|
||||
the media set to an export slot, making sure that the following backup
|
||||
cannot use the tapes. An operator can pickup those tapes and move them
|
||||
to a vault.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job update job2 --export-media
|
||||
|
||||
.. Note:: The ``export-media`` option can be used to force the start
|
||||
of a new media set, because tapes from the current set are no
|
||||
longer online.
|
||||
|
||||
It is also possible to run backup jobs manually:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job run job2
|
||||
|
||||
To remove a job, please use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup-job remove job2
|
||||
|
||||
|
||||
Administration
|
||||
@ -502,13 +625,17 @@ Administration
|
||||
Many sub-command of the ``proxmox-tape`` command line tools take a
|
||||
parameter called ``--drive``, which specifies the tape drive you want
|
||||
to work on. For convenience, you can set that in an environment
|
||||
variable::
|
||||
variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# export PROXMOX_TAPE_DRIVE=mydrive
|
||||
|
||||
You can then omit the ``--drive`` parameter from the command. If the
|
||||
drive has an associated changer device, you may also omit the changer
|
||||
parameter from commands that needs a changer device, for example::
|
||||
parameter from commands that needs a changer device, for example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status
|
||||
|
||||
@ -533,16 +660,20 @@ Next, you need to write that same label text to the tape, so that the
|
||||
software can uniquely identify the tape too.
|
||||
|
||||
For a standalone drive, manually insert the new tape cartidge into the
|
||||
drive and run::
|
||||
drive and run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
||||
|
||||
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||
|
||||
.. Note:: For safety reasons, this command fails if the tape contain
|
||||
any data. If you want to overwrite it anways, erase the tape first.
|
||||
any data. If you want to overwrite it anyway, erase the tape first.
|
||||
|
||||
You can verify success by reading back the label::
|
||||
You can verify success by reading back the label:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape read-label
|
||||
┌─────────────────┬──────────────────────────────────────┐
|
||||
@ -566,7 +697,9 @@ You can verify success by reading back the label::
|
||||
|
||||
If you have a tape library, apply the sticky barcode label to the tape
|
||||
cartridges first. Then load those empty tapes into the library. You
|
||||
can then label all unlabeled tapes with a single command::
|
||||
can then label all unlabeled tapes with a single command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape barcode-label [--pool <pool-name>]
|
||||
|
||||
@ -574,7 +707,9 @@ can then label all unlabeled tapes with a single command::
|
||||
Run Tape Backups
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
To manually run a backup job use::
|
||||
To manually run a backup job use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape backup <store> <pool> [OPTIONS]
|
||||
|
||||
@ -587,7 +722,7 @@ The following options are available:
|
||||
|
||||
--export-media-set Export media set upon job completion.
|
||||
|
||||
After a sucessful backup job, this moves all tapes from the used
|
||||
After a successful backup job, this moves all tapes from the used
|
||||
media set into import-export slots. The operator can then pick up
|
||||
those tapes and move them to a media vault.
|
||||
|
||||
@ -602,7 +737,9 @@ catalogs, you need to restore them first. Please note that you need
|
||||
the catalog to find your data, but restoring a complete media-set does
|
||||
not need media catalogs.
|
||||
|
||||
The following command shows the media content (from catalog)::
|
||||
The following command shows the media content (from catalog):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape media content
|
||||
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
||||
@ -615,7 +752,9 @@ The following command shows the media content (from catalog)::
|
||||
|
||||
|
||||
A restore job reads the data from the media set and moves it back to
|
||||
data disk (datastore)::
|
||||
data disk (datastore):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
// proxmox-tape restore <media-set-uuid> <datastore>
|
||||
|
||||
@ -633,14 +772,18 @@ Restore Catalog
|
||||
Encryption Key Management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Creating a new encryption key::
|
||||
Creating a new encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key create --hint "tape pw 2020"
|
||||
Tape Encryption Key Password: **********
|
||||
Verify Password: **********
|
||||
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
||||
|
||||
List existing encryption keys::
|
||||
List existing encryption keys:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key list
|
||||
┌───────────────────────────────────────────────────┬───────────────┐
|
||||
@ -649,7 +792,9 @@ List existing encryption keys::
|
||||
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
||||
└───────────────────────────────────────────────────┴───────────────┘
|
||||
|
||||
To show encryption key details::
|
||||
To show encryption key details:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
||||
┌─────────────┬───────────────────────────────────────────────┐
|
||||
@ -668,25 +813,29 @@ To show encryption key details::
|
||||
|
||||
The ``paperkey`` subcommand can be used to create a QR encoded
|
||||
version of a tape encryption key. The following command sends the output of the
|
||||
``paperkey`` command to a text file, for easy printing::
|
||||
``paperkey`` command to a text file, for easy printing:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
||||
|
||||
|
||||
.. _restore_encryption_key:
|
||||
.. _tape_restore_encryption_key:
|
||||
|
||||
Restoring Encryption Keys
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can restore the encryption key from the tape, using the password
|
||||
used to generate the key. First, load the tape you want to restore
|
||||
into the drive. Then run::
|
||||
into the drive. Then run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape key restore
|
||||
Tepe Encryption Key Password: ***********
|
||||
|
||||
If the password is correct, the key will get imported to the
|
||||
database. Further restore jobs automatically use any availbale key.
|
||||
database. Further restore jobs automatically use any available key.
|
||||
|
||||
|
||||
Tape Cleaning
|
||||
@ -698,7 +847,9 @@ standalone drives.
|
||||
|
||||
For tape libraries, cleaning cartridges are identified using special
|
||||
labels starting with letters "CLN". For example, our tape library has a
|
||||
cleaning cartridge inside slot 3::
|
||||
cleaning cartridge inside slot 3:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer status sl3
|
||||
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||
@ -715,7 +866,9 @@ cleaning cartridge inside slot 3::
|
||||
│ ... │ ... │ │ │
|
||||
└───────────────┴──────────┴────────────┴─────────────┘
|
||||
|
||||
To initiate a cleaning operation simply run::
|
||||
To initiate a cleaning operation simply run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape clean
|
||||
|
||||
@ -730,3 +883,78 @@ This command does the following:
|
||||
- run drive cleaning operation
|
||||
|
||||
- unload the cleaning tape (to slot 3)
|
||||
|
||||
|
||||
|
||||
Configuration Files
|
||||
-------------------
|
||||
|
||||
``media-pool.cfg``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/media-pool/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/media-pool/config.rst
|
||||
|
||||
|
||||
``tape.cfg``
|
||||
~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/tape/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/tape/config.rst
|
||||
|
||||
|
||||
``tape-job.cfg``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/tape-job/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/tape-job/config.rst
|
||||
|
||||
|
||||
|
||||
Command Syntax
|
||||
--------------
|
||||
|
||||
``proxmox-tape``
|
||||
----------------
|
||||
|
||||
.. include:: proxmox-tape/synopsis.rst
|
||||
|
||||
|
||||
``pmt``
|
||||
-------
|
||||
|
||||
.. include:: pmt/options.rst
|
||||
|
||||
....
|
||||
|
||||
.. include:: pmt/synopsis.rst
|
||||
|
||||
|
||||
``pmtx``
|
||||
--------
|
||||
|
||||
.. include:: pmtx/synopsis.rst
|
||||
|
@ -1,14 +1,14 @@
|
||||
.. _tech_design_overview:
|
||||
|
||||
Technical Overview
|
||||
==================
|
||||
|
||||
.. _technical_overview:
|
||||
|
||||
Datastores
|
||||
----------
|
||||
|
||||
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||
<backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terminology`), and are
|
||||
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||
stored in the following directory structure:
|
||||
|
||||
<datastore-root>/<type>/<id>/<time>/
|
||||
|
@ -1,4 +1,4 @@
|
||||
.. _terminology:
|
||||
.. _terms:
|
||||
|
||||
Terminology
|
||||
===========
|
||||
@ -101,7 +101,7 @@ Backup Group
|
||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||
may contain one or more backup snapshots.
|
||||
|
||||
.. _backup_snapshot:
|
||||
.. _term_backup_snapshot:
|
||||
|
||||
Backup Snapshot
|
||||
---------------
|
||||
|
@ -286,26 +286,26 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
||||
- Datastore.Backup (*)
|
||||
|
||||
.. _user_tfa:
|
||||
|
||||
Two-factor authentication
|
||||
-------------------------
|
||||
|
||||
Introduction
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Simple authentication requires only secret piece of evidence (one factor) that
|
||||
a user can successfully claim a identiy (authenticate), for example, that you
|
||||
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
|
||||
If the password gets stolen, or leaked in another way, anybody can use it to
|
||||
login - even if they should not be allowed to do so.
|
||||
With simple authentication, only a password (single factor) is required to
|
||||
successfully claim an identity (authenticate), for example, to be able to log in
|
||||
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||
the password gets stolen or leaked, anybody can use it to log in - even if they
|
||||
should not be allowed to do so.
|
||||
|
||||
With Two-factor authentication (TFA) a user is asked for an additional factor,
|
||||
to proof his authenticity. The extra factor is different from a password
|
||||
(something only the user knows), it is something only the user has, for example
|
||||
a piece of hardware (security key) or an secret saved on the users smartphone.
|
||||
|
||||
This means that a remote user can never get hold on such a physical object. So,
|
||||
even if that user would know your password they cannot successfully
|
||||
authenticate as you, as your second factor is missing.
|
||||
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||
to verify their authenticity. Rather than relying on something only the user
|
||||
knows (a password), this extra factor requires something only the user has, for
|
||||
example, a piece of hardware (security key) or a secret saved on the user's
|
||||
smartphone. This prevents a remote user from gaining unauthorized access to an
|
||||
account, as even if they have the password, they will not have access to the
|
||||
physical object (second factor).
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
||||
:align: right
|
||||
@ -314,30 +314,33 @@ authenticate as you, as your second factor is missing.
|
||||
Available Second Factors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can setup more than one second factor to avoid that losing your smartphone
|
||||
or security key permanently locks you out from your account.
|
||||
You can set up multiple second factors, in order to avoid a situation in which
|
||||
losing your smartphone or security key locks you out of your account
|
||||
permanently.
|
||||
|
||||
There are three different two-factor authentication methods supported:
|
||||
Proxmox Backup Server supports three different two-factor authentication
|
||||
methods:
|
||||
|
||||
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
||||
A short code derived from a shared secret and the current time, it switches
|
||||
A short code derived from a shared secret and the current time, it changes
|
||||
every 30 seconds.
|
||||
|
||||
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
||||
A general standard for authentication. It is implemented by various security
|
||||
devices like hardware keys or trusted platform modules (TPM) from a computer
|
||||
devices, like hardware keys or trusted platform modules (TPM) from a computer
|
||||
or smart phone.
|
||||
|
||||
* Single use Recovery Keys. A list of keys which should either be printed out
|
||||
and locked in a secure fault or saved digitally in a electronic vault.
|
||||
Each key can be used only once, they are perfect for ensuring you are not
|
||||
locked out even if all of your other second factors are lost or corrupt.
|
||||
and locked in a secure place or saved digitally in an electronic vault.
|
||||
Each key can be used only once. These are perfect for ensuring that you are
|
||||
not locked out, even if all of your other second factors are lost or corrupt.
|
||||
|
||||
|
||||
Setup
|
||||
~~~~~
|
||||
|
||||
.. _user_tfa_setup_totp:
|
||||
|
||||
TOTP
|
||||
^^^^
|
||||
|
||||
@ -345,15 +348,16 @@ TOTP
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
There is not server setup required, simply install a TOTP app on your
|
||||
There is no server setup required. Simply install a TOTP app on your
|
||||
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
||||
Proxmox Backup Server web-interface to add a TOTP factor.
|
||||
|
||||
.. _user_tfa_setup_webauthn:
|
||||
|
||||
WebAuthn
|
||||
^^^^^^^^
|
||||
|
||||
For WebAuthn to work you need to have two things:
|
||||
For WebAuthn to work, you need to have two things:
|
||||
|
||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||
@ -361,10 +365,11 @@ For WebAuthn to work you need to have two things:
|
||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||
|
||||
Once you fullfilled both of those requirements, you can add a WebAuthn
|
||||
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||
configuration in the *Access Control* panel.
|
||||
|
||||
.. _user_tfa_setup_recovery_keys:
|
||||
|
||||
Recovery Keys
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
@ -372,7 +377,7 @@ Recovery Keys
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
Recovery key codes do not need any preparation, you can simply create a set of
|
||||
Recovery key codes do not need any preparation; you can simply create a set of
|
||||
recovery keys in the *Access Control* panel.
|
||||
|
||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||
@ -381,7 +386,7 @@ recovery keys in the *Access Control* panel.
|
||||
TFA and Automated Access
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Two-factor authentication is only implemented for the web-interface, you should
|
||||
Two-factor authentication is only implemented for the web-interface. You should
|
||||
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
||||
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
|
||||
non-interactive ones (for example, adding a Proxmox Backup Server to Proxmox VE
|
||||
as a storage).
|
||||
|
@ -15,19 +15,19 @@ fn extract_acl_node_data(
|
||||
path: &str,
|
||||
list: &mut Vec<AclListItem>,
|
||||
exact: bool,
|
||||
token_user: &Option<Authid>,
|
||||
auth_id_filter: &Option<Authid>,
|
||||
) {
|
||||
// tokens can't have tokens, so we can early return
|
||||
if let Some(token_user) = token_user {
|
||||
if token_user.is_token() {
|
||||
if let Some(auth_id_filter) = auth_id_filter {
|
||||
if auth_id_filter.is_token() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (user, roles) in &node.users {
|
||||
if let Some(token_user) = token_user {
|
||||
if let Some(auth_id_filter) = auth_id_filter {
|
||||
if !user.is_token()
|
||||
|| user.user() != token_user.user() {
|
||||
|| user.user() != auth_id_filter.user() {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -43,7 +43,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
if token_user.is_some() {
|
||||
if auth_id_filter.is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
for (comp, child) in &node.children {
|
||||
let new_path = format!("{}/{}", path, comp);
|
||||
extract_acl_node_data(child, &new_path, list, exact, token_user);
|
||||
extract_acl_node_data(child, &new_path, list, exact, auth_id_filter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Get a single TFA entry.
|
||||
/// Delete a single TFA entry.
|
||||
fn delete_tfa(
|
||||
userid: Userid,
|
||||
id: String,
|
||||
|
@ -3,8 +3,6 @@
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -22,19 +20,20 @@ use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::api2::helpers;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::pxar::create_zip;
|
||||
|
||||
use crate::server::{jobstate::Job, WorkerTask};
|
||||
use crate::tools::{
|
||||
self,
|
||||
zip::{ZipEncoder, ZipEntry},
|
||||
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
||||
};
|
||||
|
||||
@ -1294,7 +1293,7 @@ pub fn catalog(
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
@ -1326,112 +1325,14 @@ pub fn catalog(
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
let mut current = catalog_reader.root()?;
|
||||
let mut components = vec![];
|
||||
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if !components.is_empty() && components[0] == b'/' {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == b'/') {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
let path = if filepath != "root" && filepath != "/" {
|
||||
base64::decode(filepath)?
|
||||
} else {
|
||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push(b'/');
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
let mut entry = json!({
|
||||
"filepath": path,
|
||||
"text": text,
|
||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||
"leaf": true,
|
||||
});
|
||||
match direntry.attr {
|
||||
DirEntryAttribute::Directory { start: _ } => {
|
||||
entry["leaf"] = false.into();
|
||||
},
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
entry["size"] = size.into();
|
||||
entry["mtime"] = mtime.into();
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
fn recurse_files<'a, T, W>(
|
||||
zip: &'a mut ZipEncoder<W>,
|
||||
decoder: &'a mut Accessor<T>,
|
||||
prefix: &'a Path,
|
||||
file: FileEntry<T>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
let metadata = file.entry().metadata();
|
||||
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(file.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Hardlink(_) => {
|
||||
let realfile = decoder.follow_hardlink(&file).await?;
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(realfile.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
let dir = file.enter_directory().await?;
|
||||
let mut readdir = dir.read_dir();
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
false,
|
||||
);
|
||||
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
||||
while let Some(entry) = readdir.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
recurse_files(zip, decoder, prefix, entry).await?;
|
||||
}
|
||||
}
|
||||
_ => {} // ignore all else
|
||||
vec![b'/']
|
||||
};
|
||||
|
||||
Ok(())
|
||||
})
|
||||
helpers::list_dir_content(&mut catalog_reader, &path)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
@ -1509,9 +1410,10 @@ pub fn pxar_file_download(
|
||||
|
||||
let decoder = Accessor::new(reader, archive_size).await?;
|
||||
let root = decoder.open_root().await?;
|
||||
let path = OsStr::from_bytes(file_path).to_os_string();
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
||||
.lookup(&path).await?
|
||||
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
|
||||
|
||||
let body = match file.kind() {
|
||||
EntryKind::File { .. } => Body::wrap_stream(
|
||||
@ -1525,37 +1427,19 @@ pub fn pxar_file_download(
|
||||
.map_err(move |err| {
|
||||
eprintln!(
|
||||
"error during streaming of hardlink '{:?}' - {}",
|
||||
filepath, err
|
||||
path, err
|
||||
);
|
||||
err
|
||||
}),
|
||||
),
|
||||
EntryKind::Directory => {
|
||||
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
||||
let mut prefix = PathBuf::new();
|
||||
let mut components = file.entry().path().components();
|
||||
components.next_back(); // discar last
|
||||
for comp in components {
|
||||
prefix.push(comp);
|
||||
}
|
||||
|
||||
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
||||
|
||||
crate::server::spawn_internal_task(async move {
|
||||
let mut zipencoder = ZipEncoder::new(channelwriter);
|
||||
let mut decoder = decoder;
|
||||
recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during creating of zip: {}", err))?;
|
||||
|
||||
zipencoder
|
||||
.finish()
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
||||
});
|
||||
|
||||
crate::server::spawn_internal_task(
|
||||
create_zip(channelwriter, decoder, path.clone(), false)
|
||||
);
|
||||
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
|
||||
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
||||
eprintln!("error during streaming of zip '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
}
|
||||
|
@ -7,16 +7,35 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::do_sync_job;
|
||||
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::UPID;
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
use crate::{
|
||||
api2::{
|
||||
types::{
|
||||
DATASTORE_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
pull::do_sync_job,
|
||||
config::sync::{
|
||||
check_sync_job_modify_access,
|
||||
check_sync_job_read_access,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
cached_user_info::CachedUserInfo,
|
||||
sync::{
|
||||
self,
|
||||
SyncJobStatus,
|
||||
SyncJobConfig,
|
||||
},
|
||||
},
|
||||
server::{
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -30,7 +49,7 @@ use crate::tools::systemd::time::{
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobStatus },
|
||||
items: { type: SyncJobStatus },
|
||||
},
|
||||
access: {
|
||||
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
||||
@ -49,48 +68,29 @@ pub fn list_sync_jobs(
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config
|
||||
let job_config_iter = config
|
||||
.convert_to_typed_array("sync")?
|
||||
.into_iter()
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
if let Some(store) = &store {
|
||||
&job.store == store
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
let as_config: SyncJobConfig = job.into();
|
||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||
}).collect();
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
check_sync_job_read_access(&user_info, &auth_id, &job)
|
||||
});
|
||||
|
||||
for job in &mut list {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_config_iter {
|
||||
let last_state = JobState::load("syncjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
list.push(SyncJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
@ -1,24 +1,40 @@
|
||||
//! Datastore Verify Job Management
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::server::do_verification_job;
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::config::acl::{
|
||||
use crate::{
|
||||
api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
server::{
|
||||
do_verification_job,
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
},
|
||||
cached_user_info::CachedUserInfo,
|
||||
verify::{
|
||||
self,
|
||||
VerificationJobConfig,
|
||||
VerificationJobStatus,
|
||||
},
|
||||
},
|
||||
};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::verify;
|
||||
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
||||
use serde_json::Value;
|
||||
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
||||
use crate::server::UPID;
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -52,10 +68,10 @@ pub fn list_verification_jobs(
|
||||
|
||||
let (config, digest) = verify::config()?;
|
||||
|
||||
let mut list: Vec<VerificationJobStatus> = config
|
||||
let job_config_iter = config
|
||||
.convert_to_typed_array("verification")?
|
||||
.into_iter()
|
||||
.filter(|job: &VerificationJobStatus| {
|
||||
.filter(|job: &VerificationJobConfig| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
if privs & required_privs == 0 {
|
||||
return false;
|
||||
@ -66,36 +82,17 @@ pub fn list_verification_jobs(
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}).collect();
|
||||
});
|
||||
|
||||
for job in &mut list {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_config_iter {
|
||||
let last_state = JobState::load("verificationjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
list.push(VerificationJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
@ -268,7 +268,7 @@ async move {
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"blob", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BLOB)
|
||||
|
@ -12,6 +12,7 @@ pub mod drive;
|
||||
pub mod changer;
|
||||
pub mod media_pool;
|
||||
pub mod tape_encryption_keys;
|
||||
pub mod tape_backup_job;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("access", &access::ROUTER),
|
||||
@ -21,6 +22,7 @@ const SUBDIRS: SubdirMap = &[
|
||||
("media-pool", &media_pool::ROUTER),
|
||||
("remote", &remote::ROUTER),
|
||||
("sync", &sync::ROUTER),
|
||||
("tape-backup-job", &tape_backup_job::ROUTER),
|
||||
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
||||
("verify", &verify::ROUTER),
|
||||
];
|
||||
|
@ -5,6 +5,7 @@ use anyhow::Error;
|
||||
|
||||
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
||||
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
||||
use proxmox::api::schema::Updatable;
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
||||
@ -73,9 +74,9 @@ pub fn update_webauthn_config(
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
||||
}
|
||||
webauthn.apply_to(wa);
|
||||
wa.update_from::<&str>(webauthn, &[])?;
|
||||
} else {
|
||||
tfa.webauthn = Some(webauthn.build()?);
|
||||
tfa.webauthn = Some(WebauthnConfig::try_build_from(webauthn)?);
|
||||
}
|
||||
|
||||
tfa::write(&tfa)?;
|
||||
|
@ -6,15 +6,24 @@ use proxmox::api::{
|
||||
api,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
schema::parse_property_string,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
LINUX_DRIVE_PATH_SCHEMA,
|
||||
SCSI_CHANGER_PATH_SCHEMA,
|
||||
SLOT_ARRAY_SCHEMA,
|
||||
EXPORT_SLOT_LIST_SCHEMA,
|
||||
ScsiTapeChanger,
|
||||
@ -34,7 +43,7 @@ use crate::{
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
},
|
||||
"export-slots": {
|
||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||
@ -42,6 +51,9 @@ use crate::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new changer device
|
||||
pub fn create_changer(
|
||||
@ -94,7 +106,9 @@ pub fn create_changer(
|
||||
returns: {
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get tape changer configuration
|
||||
pub fn get_config(
|
||||
@ -123,17 +137,31 @@ pub fn get_config(
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List changers
|
||||
pub fn list_changers(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::drive::config()?;
|
||||
|
||||
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|changer| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &changer.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
@ -156,7 +184,7 @@ pub enum DeletableProperty {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"export-slots": {
|
||||
@ -177,6 +205,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update a tape changer configuration
|
||||
pub fn update_changer(
|
||||
@ -251,6 +282,9 @@ pub fn update_changer(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a tape changer configuration
|
||||
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
||||
|
@ -2,11 +2,19 @@ use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
@ -41,6 +49,9 @@ use crate::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new drive
|
||||
pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||
@ -84,6 +95,9 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||
returns: {
|
||||
type: LinuxTapeDrive,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get drive configuration
|
||||
pub fn get_config(
|
||||
@ -112,17 +126,31 @@ pub fn get_config(
|
||||
type: LinuxTapeDrive,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape drives filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List drives
|
||||
pub fn list_drives(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::drive::config()?;
|
||||
|
||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||
|
||||
let drive_list = drive_list
|
||||
.into_iter()
|
||||
.filter(|drive| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "device", &drive.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(drive_list)
|
||||
@ -173,6 +201,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update a drive configuration
|
||||
pub fn update_drive(
|
||||
@ -246,6 +277,9 @@ pub fn update_drive(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a drive configuration
|
||||
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
||||
|
@ -6,75 +6,61 @@ use proxmox::{
|
||||
api,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api2::types::{
|
||||
Authid,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
},
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
allocation: {
|
||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
retention: {
|
||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
template: {
|
||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encrypt: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
config: {
|
||||
type: MediaPoolConfig,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new media pool
|
||||
pub fn create_pool(
|
||||
name: String,
|
||||
allocation: Option<String>,
|
||||
retention: Option<String>,
|
||||
template: Option<String>,
|
||||
encrypt: Option<String>,
|
||||
config: MediaPoolConfig,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = config::media_pool::lock()?;
|
||||
|
||||
let (mut config, _digest) = config::media_pool::config()?;
|
||||
let (mut section_config, _digest) = config::media_pool::config()?;
|
||||
|
||||
if config.sections.get(&name).is_some() {
|
||||
bail!("Media pool '{}' already exists", name);
|
||||
if section_config.sections.get(&config.name).is_some() {
|
||||
bail!("Media pool '{}' already exists", config.name);
|
||||
}
|
||||
|
||||
let item = MediaPoolConfig {
|
||||
name: name.clone(),
|
||||
allocation,
|
||||
retention,
|
||||
template,
|
||||
encrypt,
|
||||
};
|
||||
section_config.set_data(&config.name, "pool", &config)?;
|
||||
|
||||
config.set_data(&name, "pool", &item)?;
|
||||
|
||||
config::media_pool::save_config(&config)?;
|
||||
config::media_pool::save_config(§ion_config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -87,15 +73,29 @@ pub fn create_pool(
|
||||
type: MediaPoolConfig,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured media pools filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List media pools
|
||||
pub fn list_pools(
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaPoolConfig>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::media_pool::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("pool")?;
|
||||
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|pool| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
@ -113,6 +113,9 @@ pub fn list_pools(
|
||||
returns: {
|
||||
type: MediaPoolConfig,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get media pool configuration
|
||||
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
||||
@ -137,6 +140,8 @@ pub enum DeletableProperty {
|
||||
template,
|
||||
/// Delete encryption fingerprint
|
||||
encrypt,
|
||||
/// Delete comment
|
||||
comment,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -162,6 +167,10 @@ pub enum DeletableProperty {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
@ -172,6 +181,9 @@ pub enum DeletableProperty {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update media pool settings
|
||||
pub fn update_pool(
|
||||
@ -180,6 +192,7 @@ pub fn update_pool(
|
||||
retention: Option<String>,
|
||||
template: Option<String>,
|
||||
encrypt: Option<String>,
|
||||
comment: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -196,6 +209,7 @@ pub fn update_pool(
|
||||
DeletableProperty::retention => { data.retention = None; },
|
||||
DeletableProperty::template => { data.template = None; },
|
||||
DeletableProperty::encrypt => { data.encrypt = None; },
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -205,6 +219,15 @@ pub fn update_pool(
|
||||
if template.is_some() { data.template = template; }
|
||||
if encrypt.is_some() { data.encrypt = encrypt; }
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
config.set_data(&name, "pool", &data)?;
|
||||
|
||||
config::media_pool::save_config(&config)?;
|
||||
@ -221,6 +244,9 @@ pub fn update_pool(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool", "{name}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete a media pool configuration
|
||||
pub fn delete_pool(name: String) -> Result<(), Error> {
|
||||
|
341
src/api2/config/tape_backup_job.rs
Normal file
341
src/api2/config/tape_backup_job.rs
Normal file
@ -0,0 +1,341 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::{
|
||||
api2::types::{
|
||||
Authid,
|
||||
Userid,
|
||||
JOB_ID_SCHEMA,
|
||||
DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
tape_job::{
|
||||
TAPE_JOB_CFG_LOCKFILE,
|
||||
TapeBackupJobConfig,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs.",
|
||||
type: Array,
|
||||
items: { type: TapeBackupJobConfig },
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all tape backup jobs
|
||||
pub fn list_tape_backup_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TapeBackupJobConfig>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array::<TapeBackupJobConfig>("backup")?;
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|job| {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||
privs & PRIV_TAPE_AUDIT != 0
|
||||
})
|
||||
.collect();
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
job: {
|
||||
type: TapeBackupJobConfig,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new tape backup job.
|
||||
pub fn create_tape_backup_job(
|
||||
job: TapeBackupJobConfig,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, _digest) = config::tape_job::config()?;
|
||||
|
||||
if config.sections.get(&job.id).is_some() {
|
||||
bail!("job '{}' already exists.", job.id);
|
||||
}
|
||||
|
||||
config.set_data(&job.id, "backup", &job)?;
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::create_state_file("tape-backup-job", &job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: { type: TapeBackupJobConfig },
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a tape backup job configuration.
|
||||
pub fn read_tape_backup_job(
|
||||
id: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<TapeBackupJobConfig, Error> {
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let job = config.lookup("backup", &id)?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(job)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
Comment,
|
||||
/// Delete the job schedule.
|
||||
Schedule,
|
||||
/// Delete the eject-media property
|
||||
EjectMedia,
|
||||
/// Delete the export-media-set property
|
||||
ExportMediaSet,
|
||||
/// Delete the 'latest-only' property
|
||||
LatestOnly,
|
||||
/// Delete the 'notify-user' property
|
||||
NotifyUser,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"latest-only": {
|
||||
description: "Backup latest snapshots only.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update the tape backup job
|
||||
pub fn update_tape_backup_job(
|
||||
id: String,
|
||||
store: Option<String>,
|
||||
pool: Option<String>,
|
||||
drive: Option<String>,
|
||||
eject_media: Option<bool>,
|
||||
export_media_set: Option<bool>,
|
||||
latest_only: Option<bool>,
|
||||
notify_user: Option<Userid>,
|
||||
comment: Option<String>,
|
||||
schedule: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||
|
||||
let mut data: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::EjectMedia => { data.setup.eject_media = None; },
|
||||
DeletableProperty::ExportMediaSet => { data.setup.export_media_set = None; },
|
||||
DeletableProperty::LatestOnly => { data.setup.latest_only = None; },
|
||||
DeletableProperty::NotifyUser => { data.setup.notify_user = None; },
|
||||
DeletableProperty::Schedule => { data.schedule = None; },
|
||||
DeletableProperty::Comment => { data.comment = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(store) = store { data.setup.store = store; }
|
||||
if let Some(pool) = pool { data.setup.pool = pool; }
|
||||
if let Some(drive) = drive { data.setup.drive = drive; }
|
||||
|
||||
if eject_media.is_some() { data.setup.eject_media = eject_media; };
|
||||
if export_media_set.is_some() { data.setup.export_media_set = export_media_set; }
|
||||
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
||||
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
||||
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
config.set_data(&id, "backup", &data)?;
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "job", "{id}"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a tape backup job configuration
|
||||
pub fn delete_tape_backup_job(
|
||||
id: String,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let _lock = open_file_locked(TAPE_JOB_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = config::tape_job::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.lookup::<TapeBackupJobConfig>("backup", &id) {
|
||||
Ok(_job) => {
|
||||
config.sections.remove(&id);
|
||||
},
|
||||
Err(_) => { bail!("job '{}' does not exist.", id) },
|
||||
};
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::remove_state_file("tape-backup-job", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_TAPE_BACKUP_JOB)
|
||||
.put(&API_METHOD_UPDATE_TAPE_BACKUP_JOB)
|
||||
.delete(&API_METHOD_DELETE_TAPE_BACKUP_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||
.post(&API_METHOD_CREATE_TAPE_BACKUP_JOB)
|
||||
.match_all("id", &ITEM_ROUTER);
|
@ -7,12 +7,17 @@ use proxmox::{
|
||||
ApiMethod,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
},
|
||||
tools::fs::open_file_locked,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_MODIFY,
|
||||
},
|
||||
tape_encryption_keys::{
|
||||
TAPE_KEYS_LOCKFILE,
|
||||
load_keys,
|
||||
@ -44,6 +49,9 @@ use crate::{
|
||||
type: Array,
|
||||
items: { type: KeyInfo },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List existing keys
|
||||
pub fn list_keys(
|
||||
@ -93,6 +101,9 @@ pub fn list_keys(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Change the encryption key's password (and password hint).
|
||||
pub fn change_passphrase(
|
||||
@ -161,6 +172,9 @@ pub fn change_passphrase(
|
||||
returns: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new encryption key
|
||||
pub fn create_key(
|
||||
@ -198,6 +212,9 @@ pub fn create_key(
|
||||
returns: {
|
||||
type: KeyInfo,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get key config (public key part)
|
||||
pub fn read_key(
|
||||
@ -232,6 +249,9 @@ pub fn read_key(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "pool"], PRIV_TAPE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a encryption key from the database
|
||||
///
|
||||
|
@ -1,3 +1,4 @@
|
||||
use std::io::{Read, Seek};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Error;
|
||||
@ -6,6 +7,9 @@ use hyper::{Body, Response, StatusCode, header};
|
||||
|
||||
use proxmox::http_bail;
|
||||
|
||||
use crate::api2::types::ArchiveEntry;
|
||||
use crate::backup::{CatalogReader, DirEntryAttribute};
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = match tokio::fs::File::open(path.clone()).await {
|
||||
Ok(file) => file,
|
||||
@ -27,3 +31,30 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
||||
|
||||
/// Returns the list of content of the given path
|
||||
pub fn list_dir_content<R: Read + Seek>(
|
||||
reader: &mut CatalogReader<R>,
|
||||
path: &[u8],
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let dir = reader.lookup_recursive(path)?;
|
||||
let mut res = vec![];
|
||||
let mut path = path.to_vec();
|
||||
if !path.is_empty() && path[0] == b'/' {
|
||||
path.remove(0);
|
||||
}
|
||||
|
||||
for direntry in reader.read_dir(&dir)? {
|
||||
let mut components = path.clone();
|
||||
components.push(b'/');
|
||||
components.extend(&direntry.name);
|
||||
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
|
||||
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
||||
entry.size = size.into();
|
||||
entry.mtime = mtime.into();
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -7,19 +7,61 @@ use hyper::http::request::Parts;
|
||||
use hyper::{Body, Response, Request, StatusCode};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::http_err;
|
||||
use proxmox::{
|
||||
http_err,
|
||||
sortable,
|
||||
identity,
|
||||
list_subdirs_api_method,
|
||||
api::{
|
||||
ApiResponseFuture,
|
||||
ApiHandler,
|
||||
ApiMethod,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
Permission,
|
||||
router::SubdirMap,
|
||||
schema::{
|
||||
ObjectSchema,
|
||||
BooleanSchema,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
use crate::tools::fs::lock_dir_noblock_shared;
|
||||
use crate::{
|
||||
api2::{
|
||||
helpers,
|
||||
types::{
|
||||
DATASTORE_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA,
|
||||
BACKUP_TIME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA,
|
||||
CHUNK_DIGEST_SCHEMA,
|
||||
Authid,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
DataStore,
|
||||
ArchiveType,
|
||||
BackupDir,
|
||||
IndexFile,
|
||||
archive_type,
|
||||
},
|
||||
server::{
|
||||
WorkerTask,
|
||||
H2Service,
|
||||
},
|
||||
tools::{
|
||||
self,
|
||||
fs::lock_dir_noblock_shared,
|
||||
},
|
||||
config::{
|
||||
acl::{
|
||||
PRIV_DATASTORE_READ,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
},
|
||||
cached_user_info::CachedUserInfo,
|
||||
},
|
||||
};
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -171,8 +213,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
pub const READER_API_ROUTER: Router = Router::new()
|
||||
.subdirs(&[
|
||||
const READER_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"chunk", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
||||
@ -185,7 +226,11 @@ pub const READER_API_ROUTER: Router = Router::new()
|
||||
"speedtest", &Router::new()
|
||||
.download(&API_METHOD_SPEEDTEST)
|
||||
),
|
||||
]);
|
||||
];
|
||||
|
||||
pub const READER_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(READER_API_SUBDIRS))
|
||||
.subdirs(READER_API_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||
|
@ -160,12 +160,11 @@ pub fn datastore_status(
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{
|
||||
@ -10,6 +10,7 @@ use proxmox::{
|
||||
RpcEnvironment,
|
||||
RpcEnvironmentType,
|
||||
Router,
|
||||
Permission,
|
||||
},
|
||||
};
|
||||
|
||||
@ -17,7 +18,25 @@ use crate::{
|
||||
task_log,
|
||||
config::{
|
||||
self,
|
||||
drive::check_drive_exists,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_DATASTORE_READ,
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_WRITE,
|
||||
},
|
||||
tape_job::{
|
||||
TapeBackupJobConfig,
|
||||
TapeBackupJobSetup,
|
||||
TapeBackupJobStatus,
|
||||
},
|
||||
},
|
||||
server::{
|
||||
lookup_user_email,
|
||||
jobstate::{
|
||||
Job,
|
||||
JobState,
|
||||
compute_schedule_status,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
DataStore,
|
||||
@ -26,11 +45,10 @@ use crate::{
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
DATASTORE_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA,
|
||||
UPID_SCHEMA,
|
||||
JOB_ID_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
Userid,
|
||||
},
|
||||
server::WorkerTask,
|
||||
task::TaskState,
|
||||
@ -40,90 +58,320 @@ use crate::{
|
||||
PoolWriter,
|
||||
MediaPool,
|
||||
SnapshotReader,
|
||||
drive::media_changer,
|
||||
drive::{
|
||||
media_changer,
|
||||
lock_tape_device,
|
||||
set_tape_device_state,
|
||||
},
|
||||
changer::update_changer_online_status,
|
||||
},
|
||||
};
|
||||
|
||||
const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
||||
.post(&API_METHOD_BACKUP)
|
||||
.match_all("id", &TAPE_BACKUP_JOB_ROUTER);
|
||||
|
||||
fn check_backup_permission(
|
||||
auth_id: &Authid,
|
||||
store: &str,
|
||||
pool: &str,
|
||||
drive: &str,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||
if (privs & PRIV_DATASTORE_READ) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
|
||||
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||
bail!("no permissions on /tape/drive/{}", drive);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
|
||||
if (privs & PRIV_TAPE_WRITE) == 0 {
|
||||
bail!("no permissions on /tape/pool/{}", pool);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List configured thape backup jobs and their status",
|
||||
type: Array,
|
||||
items: { type: TapeBackupJobStatus },
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all tape backup jobs
|
||||
pub fn list_tape_backup_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TapeBackupJobStatus>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, digest) = config::tape_job::config()?;
|
||||
|
||||
let job_list_iter = config
|
||||
.convert_to_typed_array("backup")?
|
||||
.into_iter()
|
||||
.filter(|_job: &TapeBackupJobConfig| {
|
||||
// fixme: check access permission
|
||||
true
|
||||
});
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for job in job_list_iter {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let last_state = JobState::load("tape-backup-job", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
|
||||
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
||||
|
||||
list.push(TapeBackupJobStatus { config: job, status });
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn do_tape_backup_job(
|
||||
mut job: Job,
|
||||
setup: TapeBackupJobSetup,
|
||||
auth_id: &Authid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let job_id = format!("{}:{}:{}:{}",
|
||||
setup.store,
|
||||
setup.pool,
|
||||
setup.drive,
|
||||
job.jobname());
|
||||
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
&worker_type,
|
||||
Some(job_id.clone()),
|
||||
auth_id.clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let job_result = backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
);
|
||||
|
||||
let status = worker.create_state(&job_result);
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_tape_backup_status(
|
||||
&email,
|
||||
Some(job.jobname()),
|
||||
&setup,
|
||||
&job_result,
|
||||
) {
|
||||
eprintln!("send tape backup notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(err) = set_tape_device_state(&setup.drive, "") {
|
||||
eprintln!(
|
||||
"could not unset drive state for {}: {}",
|
||||
setup.drive,
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
job_result
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
access: {
|
||||
// Note: parameters are from job config, so we need to test inside function body
|
||||
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
)]
|
||||
/// Runs a tape backup job manually.
|
||||
pub fn run_tape_backup_job(
|
||||
id: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let (config, _digest) = config::tape_job::config()?;
|
||||
let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
||||
|
||||
check_backup_permission(
|
||||
&auth_id,
|
||||
&backup_job.setup.store,
|
||||
&backup_job.setup.pool,
|
||||
&backup_job.setup.drive,
|
||||
)?;
|
||||
|
||||
let job = Job::new("tape-backup-job", &id)?;
|
||||
|
||||
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
setup: {
|
||||
type: TapeBackupJobSetup,
|
||||
flatten: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
// Note: parameters are no uri parameter, so we need to test inside function body
|
||||
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Backup datastore to tape media pool
|
||||
pub fn backup(
|
||||
store: String,
|
||||
pool: String,
|
||||
drive: String,
|
||||
eject_media: Option<bool>,
|
||||
export_media_set: Option<bool>,
|
||||
setup: TapeBackupJobSetup,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
check_backup_permission(
|
||||
&auth_id,
|
||||
&setup.store,
|
||||
&setup.pool,
|
||||
&setup.drive,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &drive)?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let eject_media = eject_media.unwrap_or(false);
|
||||
let export_media_set = export_media_set.unwrap_or(false);
|
||||
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"tape-backup",
|
||||
Some(store),
|
||||
Some(job_id),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
|
||||
Ok(())
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
let job_result = backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
);
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_tape_backup_status(
|
||||
&email,
|
||||
None,
|
||||
&setup,
|
||||
&job_result,
|
||||
) {
|
||||
eprintln!("send tape backup notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// ignore errors
|
||||
let _ = set_tape_device_state(&setup.drive, "");
|
||||
job_result
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_BACKUP);
|
||||
|
||||
|
||||
fn backup_worker(
|
||||
worker: &WorkerTask,
|
||||
datastore: Arc<DataStore>,
|
||||
drive: &str,
|
||||
pool_config: &MediaPoolConfig,
|
||||
eject_media: bool,
|
||||
export_media_set: bool,
|
||||
setup: &TapeBackupJobSetup,
|
||||
email: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
@ -131,20 +379,36 @@ fn backup_worker(
|
||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
||||
|
||||
task_log!(worker, "update media online status");
|
||||
let changer_name = update_media_online_status(drive)?;
|
||||
let changer_name = update_media_online_status(&setup.drive)?;
|
||||
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
||||
|
||||
let mut pool_writer = PoolWriter::new(pool, drive)?;
|
||||
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
||||
|
||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||
|
||||
group_list.sort_unstable();
|
||||
|
||||
let latest_only = setup.latest_only.unwrap_or(false);
|
||||
|
||||
if latest_only {
|
||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||
}
|
||||
|
||||
for group in group_list {
|
||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||
|
||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||
|
||||
if latest_only {
|
||||
if let Some(info) = snapshot_list.pop() {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
continue;
|
||||
}
|
||||
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
||||
}
|
||||
} else {
|
||||
for info in snapshot_list {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
continue;
|
||||
@ -153,12 +417,13 @@ fn backup_worker(
|
||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pool_writer.commit()?;
|
||||
|
||||
if export_media_set {
|
||||
if setup.export_media_set.unwrap_or(false) {
|
||||
pool_writer.export_media_set(worker)?;
|
||||
} else if eject_media {
|
||||
} else if setup.eject_media.unwrap_or(false) {
|
||||
pool_writer.eject_media(worker)?;
|
||||
}
|
||||
|
||||
|
@ -1,16 +1,26 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router, SubdirMap};
|
||||
use proxmox::api::{api, Router, SubdirMap, RpcEnvironment, Permission};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_READ,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
ChangerListEntry,
|
||||
LinuxTapeDrive,
|
||||
MtxEntryKind,
|
||||
MtxStatusEntry,
|
||||
ScsiTapeChanger,
|
||||
@ -25,6 +35,7 @@ use crate::{
|
||||
ScsiMediaChange,
|
||||
mtx_status_to_online_set,
|
||||
},
|
||||
drive::get_tape_device_state,
|
||||
lookup_device_identification,
|
||||
},
|
||||
};
|
||||
@ -36,6 +47,11 @@ use crate::{
|
||||
name: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
cache: {
|
||||
description: "Use cached value.",
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -45,16 +61,22 @@ use crate::{
|
||||
type: MtxStatusEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get tape changer status
|
||||
pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
pub async fn get_status(
|
||||
name: String,
|
||||
cache: bool,
|
||||
) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||
|
||||
let status = tokio::task::spawn_blocking(move || {
|
||||
changer_config.status()
|
||||
changer_config.status(cache)
|
||||
}).await??;
|
||||
|
||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||
@ -66,9 +88,26 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
|
||||
inventory.update_online_status(&map)?;
|
||||
|
||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
||||
|
||||
for drive in drive_list {
|
||||
if let Some(changer) = drive.changer {
|
||||
if changer != name {
|
||||
continue;
|
||||
}
|
||||
let num = drive.changer_drivenum.unwrap_or(0);
|
||||
drive_map.insert(num, drive.name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||
let mut state = None;
|
||||
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||
state = get_tape_device_state(&config, &drive)?;
|
||||
}
|
||||
let entry = MtxStatusEntry {
|
||||
entry_kind: MtxEntryKind::Drive,
|
||||
entry_id: id as u64,
|
||||
@ -78,6 +117,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||
},
|
||||
loaded_slot: drive_status.loaded_slot,
|
||||
state,
|
||||
};
|
||||
list.push(entry);
|
||||
}
|
||||
@ -96,6 +136,7 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||
},
|
||||
loaded_slot: None,
|
||||
state: None,
|
||||
};
|
||||
list.push(entry);
|
||||
}
|
||||
@ -119,6 +160,9 @@ pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Transfers media from one slot to another
|
||||
pub async fn transfer(
|
||||
@ -132,7 +176,8 @@ pub async fn transfer(
|
||||
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
changer_config.transfer(from, to)
|
||||
changer_config.transfer(from, to)?;
|
||||
Ok(())
|
||||
}).await?
|
||||
}
|
||||
|
||||
@ -147,11 +192,18 @@ pub async fn transfer(
|
||||
type: ChangerListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape changer filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List changers
|
||||
pub fn list_changers(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<ChangerListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -162,6 +214,11 @@ pub fn list_changers(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for changer in changer_list {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "changer", &changer.name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let info = lookup_device_identification(&linux_changers, &changer.path);
|
||||
let entry = ChangerListEntry { config: changer, info };
|
||||
list.push(entry);
|
||||
|
@ -1,7 +1,8 @@
|
||||
use std::panic::UnwindSafe;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{
|
||||
@ -12,17 +13,25 @@ use proxmox::{
|
||||
sys::error::SysError,
|
||||
api::{
|
||||
api,
|
||||
section_config::SectionConfigData,
|
||||
RpcEnvironment,
|
||||
RpcEnvironmentType,
|
||||
Permission,
|
||||
Router,
|
||||
SubdirMap,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
task_log,
|
||||
config::{
|
||||
self,
|
||||
drive::check_drive_exists,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
PRIV_TAPE_READ,
|
||||
PRIV_TAPE_WRITE,
|
||||
},
|
||||
},
|
||||
api2::{
|
||||
types::{
|
||||
@ -62,11 +71,67 @@ use crate::{
|
||||
media_changer,
|
||||
required_media_changer,
|
||||
open_drive,
|
||||
lock_tape_device,
|
||||
set_tape_device_state,
|
||||
get_tape_device_state,
|
||||
tape_alert_flags_critical,
|
||||
},
|
||||
changer::update_changer_online_status,
|
||||
},
|
||||
};
|
||||
|
||||
fn run_drive_worker<F>(
|
||||
rpcenv: &dyn RpcEnvironment,
|
||||
drive: String,
|
||||
worker_type: &str,
|
||||
job_id: Option<String>,
|
||||
f: F,
|
||||
) -> Result<String, Error>
|
||||
where
|
||||
F: Send
|
||||
+ UnwindSafe
|
||||
+ 'static
|
||||
+ FnOnce(Arc<WorkerTask>, SectionConfigData) -> Result<(), Error>,
|
||||
{
|
||||
// early check/lock before starting worker
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
let lock_guard = lock_tape_device(&config, &drive)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
WorkerTask::new_thread(worker_type, job_id, auth_id, to_stdout, move |worker| {
|
||||
let _lock_guard = lock_guard;
|
||||
set_tape_device_state(&drive, &worker.upid().to_string())
|
||||
.map_err(|err| format_err!("could not set tape device state: {}", err))?;
|
||||
|
||||
let result = f(worker, config);
|
||||
set_tape_device_state(&drive, "")
|
||||
.map_err(|err| format_err!("could not unset tape device state: {}", err))?;
|
||||
result
|
||||
})
|
||||
}
|
||||
|
||||
async fn run_drive_blocking_task<F, R>(drive: String, state: String, f: F) -> Result<R, Error>
|
||||
where
|
||||
F: Send + 'static + FnOnce(SectionConfigData) -> Result<R, Error>,
|
||||
R: Send + 'static,
|
||||
{
|
||||
// early check/lock before starting worker
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
let lock_guard = lock_tape_device(&config, &drive)?;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let _lock_guard = lock_guard;
|
||||
set_tape_device_state(&drive, &state)
|
||||
.map_err(|err| format_err!("could not set tape device state: {}", err))?;
|
||||
let result = f(config);
|
||||
set_tape_device_state(&drive, "")
|
||||
.map_err(|err| format_err!("could not unset tape device state: {}", err))?;
|
||||
result
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -78,18 +143,37 @@ use crate::{
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Load media with specified label
|
||||
///
|
||||
/// Issue a media load request to the associated changer device.
|
||||
pub async fn load_media(drive: String, label_text: String) -> Result<(), Error> {
|
||||
pub fn load_media(
|
||||
drive: String,
|
||||
label_text: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let job_id = format!("{}:{}", drive, label_text);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"load-media",
|
||||
Some(job_id),
|
||||
move |worker, config| {
|
||||
task_log!(worker, "loading media '{}' into drive '{}'", label_text, drive);
|
||||
let (mut changer, _) = required_media_changer(&config, &drive)?;
|
||||
changer.load_media(&label_text)
|
||||
}).await?
|
||||
changer.load_media(&label_text)?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -104,18 +188,24 @@ pub async fn load_media(drive: String, label_text: String) -> Result<(), Error>
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Load media from the specified slot
|
||||
///
|
||||
/// Issue a media load request to the associated changer device.
|
||||
pub async fn load_slot(drive: String, source_slot: u64) -> Result<(), Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
format!("load from slot {}", source_slot),
|
||||
move |config| {
|
||||
let (mut changer, _) = required_media_changer(&config, &drive)?;
|
||||
changer.load_media_from_slot(source_slot)
|
||||
}).await?
|
||||
changer.load_media_from_slot(source_slot)?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -134,19 +224,28 @@ pub async fn load_slot(drive: String, source_slot: u64) -> Result<(), Error> {
|
||||
type: u64,
|
||||
minimum: 1,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Export media with specified label
|
||||
pub async fn export_media(drive: String, label_text: String) -> Result<u64, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
format!("export media {}", label_text),
|
||||
move |config| {
|
||||
let (mut changer, changer_name) = required_media_changer(&config, &drive)?;
|
||||
match changer.export_media(&label_text)? {
|
||||
Some(slot) => Ok(slot),
|
||||
None => bail!("media '{}' is not online (via changer '{}')", label_text, changer_name),
|
||||
None => bail!(
|
||||
"media '{}' is not online (via changer '{}')",
|
||||
label_text,
|
||||
changer_name
|
||||
),
|
||||
}
|
||||
}).await?
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -162,20 +261,34 @@ pub async fn export_media(drive: String, label_text: String) -> Result<u64, Erro
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Unload media via changer
|
||||
pub async fn unload(
|
||||
pub fn unload(
|
||||
drive: String,
|
||||
target_slot: Option<u64>,
|
||||
_param: Value,
|
||||
) -> Result<(), Error> {
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"unload-media",
|
||||
Some(drive.clone()),
|
||||
move |worker, config| {
|
||||
task_log!(worker, "unloading media from drive '{}'", drive);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let (mut changer, _) = required_media_changer(&config, &drive)?;
|
||||
changer.unload_media(target_slot)
|
||||
}).await?
|
||||
changer.unload_media(target_slot)?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -190,37 +303,86 @@ pub async fn unload(
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
||||
},
|
||||
)]
|
||||
/// Erase media
|
||||
/// Erase media. Check for label-text if given (cancels if wrong media).
|
||||
pub fn erase_media(
|
||||
drive: String,
|
||||
fast: Option<bool>,
|
||||
label_text: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"erase-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |_worker| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
drive.erase_media(fast.unwrap_or(true))?;
|
||||
Ok(())
|
||||
move |worker, config| {
|
||||
if let Some(ref label) = label_text {
|
||||
task_log!(worker, "try to load media '{}'", label);
|
||||
if let Some((mut changer, _)) = media_changer(&config, &drive)? {
|
||||
changer.load_media(label)?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut handle = open_drive(&config, &drive)?;
|
||||
|
||||
match handle.read_label() {
|
||||
Err(err) => {
|
||||
if let Some(label) = label_text {
|
||||
bail!("expected label '{}', found unrelated data", label);
|
||||
}
|
||||
/* assume drive contains no or unrelated data */
|
||||
task_log!(worker, "unable to read media label: {}", err);
|
||||
task_log!(worker, "erase anyways");
|
||||
handle.erase_media(fast.unwrap_or(true))?;
|
||||
}
|
||||
Ok((None, _)) => {
|
||||
if let Some(label) = label_text {
|
||||
bail!("expected label '{}', found empty tape", label);
|
||||
}
|
||||
task_log!(worker, "found empty media - erase anyways");
|
||||
handle.erase_media(fast.unwrap_or(true))?;
|
||||
}
|
||||
Ok((Some(media_id), _key_config)) => {
|
||||
if let Some(label_text) = label_text {
|
||||
if media_id.label.label_text != label_text {
|
||||
bail!(
|
||||
"expected label '{}', found '{}', aborting",
|
||||
label_text,
|
||||
media_id.label.label_text
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
"found media '{}' with uuid '{}'",
|
||||
media_id.label.label_text, media_id.label.uuid,
|
||||
);
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
|
||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||
inventory.remove_media(&media_id.label.uuid)?;
|
||||
handle.erase_media(fast.unwrap_or(true))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -237,31 +399,25 @@ pub fn erase_media(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Rewind tape
|
||||
pub fn rewind(
|
||||
drive: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"rewind-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |_worker| {
|
||||
move |_worker, config| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
drive.rewind()?;
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -278,27 +434,21 @@ pub fn rewind(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Eject/Unload drive media
|
||||
pub fn eject_media(
|
||||
drive: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"eject-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |_worker| {
|
||||
move |_worker, config| {
|
||||
if let Some((mut changer, _)) = media_changer(&config, &drive)? {
|
||||
changer.unload_media(None)?;
|
||||
} else {
|
||||
@ -306,7 +456,8 @@ pub fn eject_media(
|
||||
drive.eject_media()?;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
@ -329,6 +480,9 @@ pub fn eject_media(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
||||
},
|
||||
)]
|
||||
/// Label media
|
||||
///
|
||||
@ -342,9 +496,6 @@ pub fn label_media(
|
||||
label_text: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
if let Some(ref pool) = pool {
|
||||
let (pool_config, _digest) = config::media_pool::config()?;
|
||||
|
||||
@ -352,18 +503,12 @@ pub fn label_media(
|
||||
bail!("no such pool ('{}')", pool);
|
||||
}
|
||||
}
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"label-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
|
||||
move |worker, config| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
|
||||
drive.rewind()?;
|
||||
@ -388,7 +533,7 @@ pub fn label_media(
|
||||
};
|
||||
|
||||
write_media_label(worker, &mut drive, label, pool)
|
||||
}
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -470,16 +615,19 @@ fn write_media_label(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Try to restore a tape encryption key
|
||||
pub async fn restore_key(
|
||||
drive: String,
|
||||
password: String,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"restore key".to_string(),
|
||||
move |config| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
|
||||
let (_media_id, key_config) = drive.read_label()?;
|
||||
@ -493,7 +641,9 @@ pub async fn restore_key(
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}).await?
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -511,16 +661,19 @@ pub async fn restore_key(
|
||||
returns: {
|
||||
type: MediaIdFlat,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Read media label (optionally inventorize media)
|
||||
pub async fn read_label(
|
||||
drive: String,
|
||||
inventorize: Option<bool>,
|
||||
) -> Result<MediaIdFlat, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"reading label".to_string(),
|
||||
move |config| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
|
||||
let (media_id, _key_config) = drive.read_label()?;
|
||||
@ -570,7 +723,9 @@ pub async fn read_label(
|
||||
};
|
||||
|
||||
Ok(media_id)
|
||||
}).await?
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -584,38 +739,54 @@ pub async fn read_label(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Clean drive
|
||||
pub fn clean_drive(
|
||||
drive: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"clean-drive",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
|
||||
move |worker, config| {
|
||||
let (mut changer, _changer_name) = required_media_changer(&config, &drive)?;
|
||||
|
||||
worker.log("Starting drive clean");
|
||||
|
||||
changer.clean_drive()?;
|
||||
|
||||
if let Ok(drive_config) = config.lookup::<LinuxTapeDrive>("linux", &drive) {
|
||||
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
|
||||
let mut handle = LinuxTapeHandle::new(open_linux_tape_device(&drive_config.path)?);
|
||||
|
||||
// test for critical tape alert flags
|
||||
if let Ok(alert_flags) = handle.tape_alert_flags() {
|
||||
if !alert_flags.is_empty() {
|
||||
worker.log(format!("TapeAlertFlags: {:?}", alert_flags));
|
||||
if tape_alert_flags_critical(alert_flags) {
|
||||
bail!("found critical tape alert flags: {:?}", alert_flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test wearout (max. 50 mounts)
|
||||
if let Ok(volume_stats) = handle.volume_statistics() {
|
||||
worker.log(format!("Volume mounts: {}", volume_stats.volume_mounts));
|
||||
let wearout = volume_stats.volume_mounts * 2; // (*100.0/50.0);
|
||||
worker.log(format!("Cleaning tape wearout: {}%", wearout));
|
||||
}
|
||||
}
|
||||
|
||||
worker.log("Drive cleaned sucessfully");
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
}
|
||||
@ -635,6 +806,9 @@ pub fn clean_drive(
|
||||
type: LabelUuidMap,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// List known media labels (Changer Inventory)
|
||||
///
|
||||
@ -646,10 +820,10 @@ pub fn clean_drive(
|
||||
pub async fn inventory(
|
||||
drive: String,
|
||||
) -> Result<Vec<LabelUuidMap>, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"inventorize".to_string(),
|
||||
move |config| {
|
||||
let (mut changer, changer_name) = required_media_changer(&config, &drive)?;
|
||||
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
@ -683,7 +857,9 @@ pub async fn inventory(
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}).await?
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -702,6 +878,9 @@ pub async fn inventory(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Update inventory
|
||||
///
|
||||
@ -717,22 +896,12 @@ pub fn update_inventory(
|
||||
read_all_labels: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"inventory-update",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
|
||||
move |worker, config| {
|
||||
let (mut changer, changer_name) = required_media_changer(&config, &drive)?;
|
||||
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
@ -784,7 +953,7 @@ pub fn update_inventory(
|
||||
changer.unload_media(None)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -806,6 +975,9 @@ pub fn update_inventory(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
||||
},
|
||||
)]
|
||||
/// Label media with barcodes from changer device
|
||||
pub fn barcode_label_media(
|
||||
@ -813,7 +985,6 @@ pub fn barcode_label_media(
|
||||
pool: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
if let Some(ref pool) = pool {
|
||||
let (pool_config, _digest) = config::media_pool::config()?;
|
||||
|
||||
@ -822,18 +993,12 @@ pub fn barcode_label_media(
|
||||
}
|
||||
}
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"barcode-label-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
barcode_label_media_worker(worker, drive, pool)
|
||||
}
|
||||
move |worker, config| barcode_label_media_worker(worker, drive, &config, pool),
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -842,12 +1007,10 @@ pub fn barcode_label_media(
|
||||
fn barcode_label_media_worker(
|
||||
worker: Arc<WorkerTask>,
|
||||
drive: String,
|
||||
drive_config: &SectionConfigData,
|
||||
pool: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let (mut changer, changer_name) = required_media_changer(&config, &drive)?;
|
||||
let (mut changer, changer_name) = required_media_changer(drive_config, &drive)?;
|
||||
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
|
||||
@ -855,7 +1018,7 @@ fn barcode_label_media_worker(
|
||||
|
||||
let mut inventory = Inventory::load(state_path)?;
|
||||
|
||||
update_changer_online_status(&config, &mut inventory, &changer_name, &label_text_list)?;
|
||||
update_changer_online_status(drive_config, &mut inventory, &changer_name, &label_text_list)?;
|
||||
|
||||
if label_text_list.is_empty() {
|
||||
bail!("changer device does not list any media labels");
|
||||
@ -877,7 +1040,7 @@ fn barcode_label_media_worker(
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
let mut drive = open_drive(drive_config, &drive)?;
|
||||
drive.rewind()?;
|
||||
|
||||
match drive.read_next_file() {
|
||||
@ -924,16 +1087,23 @@ fn barcode_label_media_worker(
|
||||
type: MamAttribute,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read Cartridge Memory (Medium auxiliary memory attributes)
|
||||
pub fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
pub async fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error> {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"reading cartridge memory".to_string(),
|
||||
move |config| {
|
||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
||||
let mut handle = drive_config.open()?;
|
||||
|
||||
handle.cartridge_memory()
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -947,16 +1117,23 @@ pub fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error> {
|
||||
returns: {
|
||||
type: Lp17VolumeStatistics,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read Volume Statistics (SCSI log page 17h)
|
||||
pub fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Error> {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"reading volume statistics".to_string(),
|
||||
move |config| {
|
||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
||||
let mut handle = drive_config.open()?;
|
||||
|
||||
handle.volume_statistics()
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -970,12 +1147,16 @@ pub fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Error> {
|
||||
returns: {
|
||||
type: LinuxDriveAndMediaStatus,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get drive/media status
|
||||
pub fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
||||
run_drive_blocking_task(
|
||||
drive.clone(),
|
||||
"reading drive status".to_string(),
|
||||
move |config| {
|
||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
||||
|
||||
// Note: use open_linux_tape_device, because this also works if no medium loaded
|
||||
@ -984,6 +1165,9 @@ pub fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
||||
let mut handle = LinuxTapeHandle::new(file);
|
||||
|
||||
handle.get_drive_and_media_status()
|
||||
}
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -1007,6 +1191,9 @@ pub fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_READ, false),
|
||||
},
|
||||
)]
|
||||
/// Scan media and record content
|
||||
pub fn catalog_media(
|
||||
@ -1015,25 +1202,15 @@ pub fn catalog_media(
|
||||
verbose: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let verbose = verbose.unwrap_or(false);
|
||||
let force = force.unwrap_or(false);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
check_drive_exists(&config, &drive)?; // early check before starting worker
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
let upid_str = run_drive_worker(
|
||||
rpcenv,
|
||||
drive.clone(),
|
||||
"catalog-media",
|
||||
Some(drive.clone()),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
|
||||
move |worker, config| {
|
||||
let mut drive = open_drive(&config, &drive)?;
|
||||
|
||||
drive.rewind()?;
|
||||
@ -1090,8 +1267,7 @@ pub fn catalog_media(
|
||||
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
}
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(upid_str.into())
|
||||
@ -1113,12 +1289,19 @@ pub fn catalog_media(
|
||||
type: DriveListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List configured tape drives filtered by Tape.Audit privileges",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List drives
|
||||
pub fn list_drives(
|
||||
changer: Option<String>,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<DriveListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _) = config::drive::config()?;
|
||||
|
||||
@ -1133,8 +1316,14 @@ pub fn list_drives(
|
||||
continue;
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive.name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let info = lookup_device_identification(&linux_drives, &drive.path);
|
||||
let entry = DriveListEntry { config: drive, info };
|
||||
let state = get_tape_device_state(&config, &drive.name)?;
|
||||
let entry = DriveListEntry { config: drive, info, state };
|
||||
list.push(entry);
|
||||
}
|
||||
|
||||
@ -1187,7 +1376,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"load-media",
|
||||
&Router::new()
|
||||
.put(&API_METHOD_LOAD_MEDIA)
|
||||
.post(&API_METHOD_LOAD_MEDIA)
|
||||
),
|
||||
(
|
||||
"load-slot",
|
||||
@ -1209,6 +1398,11 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
&Router::new()
|
||||
.get(&API_METHOD_READ_LABEL)
|
||||
),
|
||||
(
|
||||
"restore-key",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_RESTORE_KEY)
|
||||
),
|
||||
(
|
||||
"rewind",
|
||||
&Router::new()
|
||||
@ -1222,7 +1416,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"unload",
|
||||
&Router::new()
|
||||
.put(&API_METHOD_UNLOAD)
|
||||
.post(&API_METHOD_UNLOAD)
|
||||
),
|
||||
]);
|
||||
|
||||
|
@ -4,7 +4,7 @@ use anyhow::{bail, format_err, Error};
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
use proxmox::{
|
||||
api::{api, Router, SubdirMap},
|
||||
api::{api, Router, SubdirMap, RpcEnvironment, Permission},
|
||||
list_subdirs_api_method,
|
||||
tools::Uuid,
|
||||
};
|
||||
@ -12,18 +12,25 @@ use proxmox::{
|
||||
use crate::{
|
||||
config::{
|
||||
self,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_TAPE_AUDIT,
|
||||
},
|
||||
},
|
||||
api2::types::{
|
||||
Authid,
|
||||
BACKUP_ID_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
MEDIA_LABEL_SCHEMA,
|
||||
MEDIA_UUID_SCHEMA,
|
||||
MEDIA_SET_UUID_SCHEMA,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
MediaListEntry,
|
||||
MediaStatus,
|
||||
MediaContentEntry,
|
||||
VAULT_NAME_SCHEMA,
|
||||
},
|
||||
backup::{
|
||||
BackupDir,
|
||||
@ -44,6 +51,16 @@ use crate::{
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"update-status": {
|
||||
description: "Try to update tape library status (check what tapes are online).",
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
"update-status-changer": {
|
||||
// only update status for a single changer
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -53,20 +70,33 @@ use crate::{
|
||||
type: MediaListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List of registered backup media filtered by Tape.Audit privileges on pool",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List pool media
|
||||
pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Error> {
|
||||
pub async fn list_media(
|
||||
pool: Option<String>,
|
||||
update_status: bool,
|
||||
update_status_changer: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
|
||||
let catalogs = tokio::task::spawn_blocking(move || {
|
||||
if update_status {
|
||||
// update online media status
|
||||
if let Err(err) = update_online_status(status_path) {
|
||||
if let Err(err) = update_online_status(status_path, update_status_changer.as_deref()) {
|
||||
eprintln!("{}", err);
|
||||
eprintln!("update online media status failed - using old state");
|
||||
}
|
||||
}
|
||||
// test what catalog files we have
|
||||
MediaCatalog::media_with_catalogs(status_path)
|
||||
}).await??;
|
||||
@ -84,13 +114,23 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
|
||||
}
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", pool_name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||
|
||||
let changer_name = None; // does not matter here
|
||||
let pool = MediaPool::with_config(status_path, &config, changer_name)?;
|
||||
let changer_name = None; // assume standalone drive
|
||||
let mut pool = MediaPool::with_config(status_path, &config, changer_name)?;
|
||||
|
||||
let current_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
// Call start_write_session, so that we show the same status a
|
||||
// backup job would see.
|
||||
pool.force_media_availability();
|
||||
pool.start_write_session(current_time)?;
|
||||
|
||||
for media in pool.list_media() {
|
||||
let expired = pool.media_is_expired(&media, current_time);
|
||||
|
||||
@ -130,10 +170,12 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
|
||||
}
|
||||
}
|
||||
|
||||
if pool.is_none() {
|
||||
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]);
|
||||
if (privs & PRIV_TAPE_AUDIT) != 0 {
|
||||
if pool.is_none() {
|
||||
|
||||
for media_id in inventory.list_unassigned_media() {
|
||||
|
||||
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
||||
@ -158,10 +200,88 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add media with missing pool configuration
|
||||
// set status to MediaStatus::Unknown
|
||||
for uuid in inventory.media_list() {
|
||||
let media_id = inventory.lookup_media(uuid).unwrap();
|
||||
let media_set_label = match media_id.media_set_label {
|
||||
Some(ref set) => set,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
if config.sections.get(&media_set_label.pool).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &media_set_label.pool]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (_status, location) = inventory.status_and_location(uuid);
|
||||
|
||||
let media_set_name = inventory.generate_media_set_name(&media_set_label.uuid, None)?;
|
||||
|
||||
list.push(MediaListEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
label_text: media_id.label.label_text.clone(),
|
||||
ctime: media_id.label.ctime,
|
||||
pool: Some(media_set_label.pool.clone()),
|
||||
location,
|
||||
status: MediaStatus::Unknown,
|
||||
catalog: catalogs.contains(uuid),
|
||||
expired: false,
|
||||
media_set_ctime: Some(media_set_label.ctime),
|
||||
media_set_uuid: Some(media_set_label.uuid.clone()),
|
||||
media_set_name: Some(media_set_name),
|
||||
seq_nr: Some(media_set_label.seq_nr),
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
},
|
||||
"vault-name": {
|
||||
schema: VAULT_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Change Tape location to vault (if given), or offline.
|
||||
pub fn move_tape(
|
||||
label_text: String,
|
||||
vault_name: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
|
||||
let uuid = inventory.find_media_by_label_text(&label_text)
|
||||
.ok_or_else(|| format_err!("no such media '{}'", label_text))?
|
||||
.label
|
||||
.uuid
|
||||
.clone();
|
||||
|
||||
if let Some(vault_name) = vault_name {
|
||||
inventory.set_media_location_vault(&uuid, &vault_name)?;
|
||||
} else {
|
||||
inventory.set_media_location_offline(&uuid)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -259,11 +379,18 @@ pub struct MediaContentListFilter {
|
||||
type: MediaContentEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List content filtered by Tape.Audit privilege on pool",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List media content
|
||||
pub fn list_content(
|
||||
filter: MediaContentListFilter,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaContentEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
|
||||
@ -283,6 +410,11 @@ pub fn list_content(
|
||||
if &set.pool != pool { continue; }
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &set.pool]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ref media_uuid) = filter.media {
|
||||
if &media_id.label.uuid != media_uuid { continue; }
|
||||
}
|
||||
@ -291,10 +423,13 @@ pub fn list_content(
|
||||
if &set.uuid != media_set_uuid { continue; }
|
||||
}
|
||||
|
||||
let config: MediaPoolConfig = config.lookup("pool", &set.pool)?;
|
||||
let template = match config.lookup::<MediaPoolConfig>("pool", &set.pool) {
|
||||
Ok(pool_config) => pool_config.template.clone(),
|
||||
_ => None, // simply use default if there is no pool config
|
||||
};
|
||||
|
||||
let media_set_name = inventory
|
||||
.generate_media_set_name(&set.uuid, config.template.clone())
|
||||
.generate_media_set_name(&set.uuid, template)
|
||||
.unwrap_or_else(|_| set.uuid.to_string());
|
||||
|
||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
||||
@ -326,6 +461,76 @@ pub fn list_content(
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Get current media status
|
||||
pub fn get_media_status(uuid: Uuid) -> Result<MediaStatus, Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
|
||||
let (status, _location) = inventory.status_and_location(&uuid);
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
status: {
|
||||
type: MediaStatus,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Update media status (None, 'full', 'damaged' or 'retired')
|
||||
///
|
||||
/// It is not allowed to set status to 'writable' or 'unknown' (those
|
||||
/// are internaly managed states).
|
||||
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let mut inventory = Inventory::load(status_path)?;
|
||||
|
||||
match status {
|
||||
None => inventory.clear_media_status(&uuid)?,
|
||||
Some(MediaStatus::Retired) => inventory.set_media_status_retired(&uuid)?,
|
||||
Some(MediaStatus::Damaged) => inventory.set_media_status_damaged(&uuid)?,
|
||||
Some(MediaStatus::Full) => inventory.set_media_status_full(&uuid)?,
|
||||
Some(status) => bail!("setting media status '{:?}' is not allowed", status),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const MEDIA_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"status",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_GET_MEDIA_STATUS)
|
||||
.post(&API_METHOD_UPDATE_MEDIA_STATUS)
|
||||
),
|
||||
];
|
||||
|
||||
pub const MEDIA_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(MEDIA_SUBDIRS))
|
||||
.subdirs(MEDIA_SUBDIRS);
|
||||
|
||||
pub const MEDIA_LIST_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_MEDIA)
|
||||
.match_all("uuid", &MEDIA_ROUTER);
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"content",
|
||||
@ -337,10 +542,11 @@ const SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.get(&API_METHOD_DESTROY_MEDIA)
|
||||
),
|
||||
( "list", &MEDIA_LIST_ROUTER ),
|
||||
(
|
||||
"list",
|
||||
"move",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_MEDIA)
|
||||
.post(&API_METHOD_MOVE_TAPE)
|
||||
),
|
||||
];
|
||||
|
||||
|
@ -11,6 +11,7 @@ use proxmox::{
|
||||
RpcEnvironment,
|
||||
RpcEnvironmentType,
|
||||
Router,
|
||||
Permission,
|
||||
section_config::SectionConfigData,
|
||||
},
|
||||
tools::{
|
||||
@ -32,11 +33,15 @@ use crate::{
|
||||
DRIVE_NAME_SCHEMA,
|
||||
UPID_SCHEMA,
|
||||
Authid,
|
||||
MediaPoolConfig,
|
||||
Userid,
|
||||
},
|
||||
config::{
|
||||
self,
|
||||
drive::check_drive_exists,
|
||||
cached_user_info::CachedUserInfo,
|
||||
acl::{
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
PRIV_TAPE_READ,
|
||||
},
|
||||
},
|
||||
backup::{
|
||||
archive_type,
|
||||
@ -51,7 +56,10 @@ use crate::{
|
||||
DynamicIndexReader,
|
||||
FixedIndexReader,
|
||||
},
|
||||
server::WorkerTask,
|
||||
server::{
|
||||
lookup_user_email,
|
||||
WorkerTask,
|
||||
},
|
||||
tape::{
|
||||
TAPE_STATUS_DIR,
|
||||
TapeRead,
|
||||
@ -71,14 +79,15 @@ use crate::{
|
||||
drive::{
|
||||
TapeDriver,
|
||||
request_and_load_media,
|
||||
}
|
||||
lock_tape_device,
|
||||
set_tape_device_state,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_RESTORE);
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -92,23 +101,43 @@ pub const ROUTER: Router = Router::new()
|
||||
description: "Media set UUID.",
|
||||
type: String,
|
||||
},
|
||||
"notify-user": {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
// Note: parameters are no uri parameter, so we need to test inside function body
|
||||
description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
|
||||
and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Restore data from media-set
|
||||
pub fn restore(
|
||||
store: String,
|
||||
drive: String,
|
||||
media_set: String,
|
||||
notify_user: Option<Userid>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||
if (privs & PRIV_TAPE_READ) == 0 {
|
||||
bail!("no permissions on /tape/drive/{}", drive);
|
||||
}
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = Inventory::load(status_path)?;
|
||||
@ -117,13 +146,17 @@ pub fn restore(
|
||||
|
||||
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
|
||||
|
||||
// check if pool exists
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
let _pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
|
||||
if (privs & PRIV_TAPE_READ) == 0 {
|
||||
bail!("no permissions on /tape/pool/{}", pool);
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let (drive_config, _digest) = config::drive::config()?;
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &drive)?;
|
||||
|
||||
// early check/lock before starting worker
|
||||
let drive_lock = lock_tape_device(&drive_config, &drive)?;
|
||||
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
@ -133,6 +166,9 @@ pub fn restore(
|
||||
auth_id.clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
|
||||
set_tape_device_state(&drive, &worker.upid().to_string())?;
|
||||
|
||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
||||
|
||||
@ -185,10 +221,21 @@ pub fn restore(
|
||||
&drive,
|
||||
&datastore,
|
||||
&auth_id,
|
||||
¬ify_user,
|
||||
)?;
|
||||
}
|
||||
|
||||
task_log!(worker, "Restore mediaset '{}' done", media_set);
|
||||
|
||||
if let Err(err) = set_tape_device_state(&drive, "") {
|
||||
task_log!(
|
||||
worker,
|
||||
"could not unset drive state for {}: {}",
|
||||
drive,
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
)?;
|
||||
@ -204,6 +251,7 @@ pub fn request_and_restore_media(
|
||||
drive_name: &str,
|
||||
datastore: &DataStore,
|
||||
authid: &Authid,
|
||||
notify_user: &Option<Userid>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let media_set_uuid = match media_id.media_set_label {
|
||||
@ -211,7 +259,12 @@ pub fn request_and_restore_media(
|
||||
Some(ref set) => &set.uuid,
|
||||
};
|
||||
|
||||
let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
|
||||
let email = notify_user
|
||||
.as_ref()
|
||||
.and_then(|userid| lookup_user_email(userid))
|
||||
.or_else(|| lookup_user_email(&authid.clone().into()));
|
||||
|
||||
let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
|
||||
|
||||
match info.media_set_label {
|
||||
None => {
|
||||
|
@ -12,6 +12,8 @@ use crate::{
|
||||
CryptMode,
|
||||
Fingerprint,
|
||||
BACKUP_ID_REGEX,
|
||||
DirEntryAttribute,
|
||||
CatalogEntryType,
|
||||
},
|
||||
server::UPID,
|
||||
config::acl::Role,
|
||||
@ -367,21 +369,25 @@ pub const MEDIA_UUID_SCHEMA: Schema =
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
@ -1299,6 +1305,47 @@ pub struct DatastoreNotify {
|
||||
pub sync: Option<Notify>,
|
||||
}
|
||||
|
||||
/// An entry in a hierarchy of files for restore and listing.
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ArchiveEntry {
|
||||
/// Base64-encoded full path to the file, including the filename
|
||||
pub filepath: String,
|
||||
/// Displayable filename text for UIs
|
||||
pub text: String,
|
||||
/// File or directory type of this entry
|
||||
#[serde(rename = "type")]
|
||||
pub entry_type: String,
|
||||
/// Is this entry a leaf node, or does it have children (i.e. a directory)?
|
||||
pub leaf: bool,
|
||||
/// The file size, if entry_type is 'f' (file)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
/// The file "last modified" time stamp, if entry_type is 'f' (file)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub mtime: Option<i64>,
|
||||
}
|
||||
|
||||
impl ArchiveEntry {
|
||||
pub fn new(filepath: &[u8], entry_type: &DirEntryAttribute) -> Self {
|
||||
Self {
|
||||
filepath: base64::encode(filepath),
|
||||
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
||||
.to_string(),
|
||||
entry_type: CatalogEntryType::from(entry_type).to_string(),
|
||||
leaf: !matches!(entry_type, DirEntryAttribute::Directory { .. }),
|
||||
size: match entry_type {
|
||||
DirEntryAttribute::File { size, .. } => Some(*size),
|
||||
_ => None
|
||||
},
|
||||
mtime: match entry_type {
|
||||
DirEntryAttribute::File { mtime, .. } => Some(*mtime),
|
||||
_ => None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||
"Datastore notification setting")
|
||||
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
||||
@ -1360,3 +1407,73 @@ pub struct KeyInfo {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub hint: Option<String>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// RSA public key information
|
||||
pub struct RsaPubKeyInfo {
|
||||
/// Path to key (if stored in a file)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
/// RSA exponent
|
||||
pub exponent: String,
|
||||
/// Hex-encoded RSA modulus
|
||||
pub modulus: String,
|
||||
/// Key (modulus) length in bits
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
|
||||
let modulus = value.n().to_hex_str()?.to_string();
|
||||
let exponent = value.e().to_dec_str()?.to_string();
|
||||
let length = value.size() as usize * 8;
|
||||
|
||||
Ok(Self {
|
||||
path: None,
|
||||
exponent,
|
||||
modulus,
|
||||
length,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize,Default)]
|
||||
/// Job Scheduling Status
|
||||
pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
@ -40,11 +40,11 @@ pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
.schema())
|
||||
.schema();
|
||||
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(r###"\
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new("\
|
||||
A list of slot numbers, comma separated. Those slots are reserved for
|
||||
Import/Export, i.e. any media in those slots are considered to be
|
||||
'offline'.
|
||||
"###)
|
||||
")
|
||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
@ -129,4 +129,7 @@ pub struct MtxStatusEntry {
|
||||
/// The slot the drive was loaded from
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub loaded_slot: Option<u64>,
|
||||
/// The current state of the drive
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
}
|
||||
|
@ -99,6 +99,9 @@ pub struct DriveListEntry {
|
||||
pub config: LinuxTapeDrive,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
/// the state of the drive if locked
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user