Compare commits
244 Commits
Author | SHA1 | Date | |
---|---|---|---|
c002d48b0c | |||
15998ed12a | |||
9d8ab62769 | |||
3526a76ef3 | |||
b9e0fcbdcd | |||
a7188b3a75 | |||
b6c06dce9d | |||
4adf47b606 | |||
4d0dc29951 | |||
1011fb552b | |||
2fd2d29281 | |||
9104152a83 | |||
02a58862dd | |||
26153589ba | |||
17b3e4451f | |||
a2072cc346 | |||
fea23d0323 | |||
71e83e1b1f | |||
28570d19a6 | |||
1369bcdbba | |||
5e4d81e957 | |||
0f4721f305 | |||
5547f90ba7 | |||
2e1b63fb25 | |||
7b2d3a5fe9 | |||
0216f56241 | |||
80acdd71fa | |||
26af61debc | |||
e7f94010d3 | |||
a4e871f52c | |||
bc3072ef7a | |||
f4bb2510b9 | |||
2ab12cd0cb | |||
c894909e17 | |||
7f394c807b | |||
7afb98a912 | |||
3847008e1b | |||
f6ed2eff47 | |||
23eed6755a | |||
384a2b4d4f | |||
910177a388 | |||
54311a38c6 | |||
983edbc54a | |||
10439718e2 | |||
ebddccef5f | |||
9cfe0ff350 | |||
295bae14b7 | |||
53939bb438 | |||
329c2cbe66 | |||
55334cf45a | |||
a2e30cd51d | |||
4bf2ab1109 | |||
1dd1c9eb5c | |||
6dde015f8c | |||
5f3b2330c8 | |||
4ba5d3b3dd | |||
e7e3d7360a | |||
fd8b00aed7 | |||
2631e57d20 | |||
90461b76fb | |||
629103d60c | |||
dc232b8946 | |||
6fed819dc2 | |||
646fc7f086 | |||
ecc5602c88 | |||
6a15cce540 | |||
f281b8d3a9 | |||
4465b76812 | |||
61df02cda1 | |||
3b0321365b | |||
0dfce17a43 | |||
a38dccf0e8 | |||
f05085ab22 | |||
bc42bb3c6e | |||
94b7f56e65 | |||
0417e9af1b | |||
ce5327badc | |||
368f4c5416 | |||
318b310638 | |||
164ad7b706 | |||
a5322f3c50 | |||
fa29d7eb49 | |||
a21f9852fd | |||
79e2473c63 | |||
375b1f6150 | |||
109ccd300f | |||
c287b28725 | |||
c560cfddca | |||
44f6bb019c | |||
d6d42702d1 | |||
3fafd0e2a1 | |||
59648eac3d | |||
5b6b5bba68 | |||
b13089cdf5 | |||
1f03196c0b | |||
edf0940649 | |||
801ec1dbf9 | |||
34ac5cd889 | |||
58421ec112 | |||
a5bdc987dc | |||
d32a8652bd | |||
a26ebad5f9 | |||
dd9cef56fc | |||
26858dba84 | |||
9fe3358ce6 | |||
76425d84b3 | |||
42355b11a4 | |||
511e4f6987 | |||
3f0e344bc1 | |||
a316178768 | |||
dff8ea92aa | |||
88e1f7997c | |||
4c3eabeaf3 | |||
4c7be5f59d | |||
6d4fbbc3ea | |||
1a23132262 | |||
48c4193f7c | |||
8204d9b095 | |||
fad95a334a | |||
973e985d73 | |||
e5a13382b2 | |||
81c0b90447 | |||
ee9fa953de | |||
09acf0a70d | |||
15d1435789 | |||
80ea23e1b9 | |||
5d6379f8db | |||
566b946f9b | |||
7f7459677d | |||
0892a512bc | |||
b717871d2a | |||
7b11a8098d | |||
8b2c6f5dbc | |||
d26985a600 | |||
e29f456efc | |||
a79082a0dd | |||
1336ae8249 | |||
0db5712493 | |||
c47609fedb | |||
b84e8aaee9 | |||
d84e4073af | |||
e8656da70d | |||
59477ad252 | |||
2f29f1c765 | |||
4d84e869bf | |||
79d841014e | |||
ea62611d8e | |||
f3c867a034 | |||
aae5db916e | |||
a417c8a93e | |||
79e58a903e | |||
9f40e09d0a | |||
553e57f914 | |||
2200a38671 | |||
ba39ab20fb | |||
ff8945fd2f | |||
4876393562 | |||
971bc6f94b | |||
cab92acb3c | |||
a1d90719e4 | |||
eeff085d9d | |||
d43c407a00 | |||
6bc87d3952 | |||
04c1c68f31 | |||
94b17c804a | |||
94352256b7 | |||
b3bed7e41f | |||
a4672dd0b1 | |||
17bbcb57d7 | |||
843146479a | |||
cf1e117fc7 | |||
03eac20b87 | |||
11f5d59396 | |||
6f63c29306 | |||
c0e365fd49 | |||
93fb2e0d21 | |||
c553407e98 | |||
4830de408b | |||
7f78528308 | |||
2843ba9017 | |||
e244b9d03d | |||
657c47db35 | |||
a32bb86df9 | |||
654c56e05d | |||
589c4dad9e | |||
0320deb0a9 | |||
4c4e5c2b1e | |||
924373d2df | |||
3b60b5098f | |||
4abb3edd9f | |||
932e69a837 | |||
ef6d49670b | |||
52ea00e9df | |||
870681013a | |||
c046739461 | |||
8b1289f3e4 | |||
f1d76ecf6c | |||
074503f288 | |||
c6f55139f8 | |||
20cc25d749 | |||
30316192b3 | |||
e93263be1e | |||
2ab2ca9c24 | |||
54fcb7f5d8 | |||
4abd4dbe38 | |||
eac1beef3c | |||
166a48f903 | |||
82775c4764 | |||
88bc9635aa | |||
1037f2bc2d | |||
f24cbee77d | |||
25b4d52dce | |||
2729d134bd | |||
32b75d36a8 | |||
c4430a937d | |||
237314ad0d | |||
caf76ec592 | |||
0af8c26b74 | |||
825dfe7e0d | |||
30a0809553 | |||
6ee3035523 | |||
b627ebbf40 | |||
ef4bdf6b8b | |||
54722acada | |||
0e2bf3aa1d | |||
365126efa9 | |||
03d4c9217d | |||
8498290848 | |||
654db565cb | |||
51f83548ed | |||
5847a6bdb5 | |||
313e5e2047 | |||
7914e62b10 | |||
84d3284609 | |||
70fab5b46e | |||
e36135031d | |||
5a5ee0326e | |||
776dabfb2e | |||
5c4755ad08 | |||
7c1666289d | |||
cded320e92 | |||
b31cdec225 | |||
591b120d35 | |||
e8913fea12 |
13
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.10"
|
version = "1.1.3"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -29,7 +29,10 @@ bitflags = "1.2.1"
|
|||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
|
env_logger = "0.7"
|
||||||
|
flate2 = "1.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
thiserror = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
@ -48,11 +51,11 @@ percent-encoding = "2.1"
|
|||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.11.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
@ -60,10 +63,10 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
tokio = { version = "1.0", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.6", features = [ "codec" ] }
|
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
|
27
Makefile
@ -9,6 +9,7 @@ SUBDIRS := etc www docs
|
|||||||
# Binaries usable by users
|
# Binaries usable by users
|
||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
|
proxmox-file-restore \
|
||||||
pxar \
|
pxar \
|
||||||
proxmox-tape \
|
proxmox-tape \
|
||||||
pmtx \
|
pmtx \
|
||||||
@ -25,6 +26,10 @@ SERVICE_BIN := \
|
|||||||
proxmox-backup-proxy \
|
proxmox-backup-proxy \
|
||||||
proxmox-daily-update
|
proxmox-daily-update
|
||||||
|
|
||||||
|
# Single file restore daemon
|
||||||
|
RESTORE_BIN := \
|
||||||
|
proxmox-restore-daemon
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
COMPILEDIR := target/release
|
COMPILEDIR := target/release
|
||||||
@ -39,7 +44,7 @@ endif
|
|||||||
CARGO ?= cargo
|
CARGO ?= cargo
|
||||||
|
|
||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||||
|
|
||||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
@ -47,9 +52,12 @@ SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
|||||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DEB=proxmox-backup-file-restore_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
|
${RESTORE_DEB} ${RESTORE_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
@ -117,8 +125,8 @@ clean:
|
|||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
|
|
||||||
.PHONY: dinstall
|
.PHONY: dinstall
|
||||||
dinstall: ${DEBS}
|
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||||
dpkg -i ${DEBS}
|
dpkg -i $^
|
||||||
|
|
||||||
# make sure we build binaries before docs
|
# make sure we build binaries before docs
|
||||||
docs: cargo-build
|
docs: cargo-build
|
||||||
@ -144,6 +152,9 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||||
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
|
||||||
|
$(foreach i,$(RESTORE_BIN), \
|
||||||
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
|
||||||
# install sg-tape-cmd as setuid binary
|
# install sg-tape-cmd as setuid binary
|
||||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
@ -152,8 +163,10 @@ install: $(COMPILED_BINS)
|
|||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
|
||||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
|
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||||
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg" --dist buster
|
||||||
|
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster
|
||||||
|
119
debian/changelog
vendored
@ -1,3 +1,122 @@
|
|||||||
|
rust-proxmox-backup (1.1.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape restore: improve datastore locking when GC runs at the same time
|
||||||
|
|
||||||
|
* tape restore: always do quick chunk verification
|
||||||
|
|
||||||
|
* tape: improve compatibillity with some changers
|
||||||
|
|
||||||
|
* tape: work-around missing format command on LTO-4 drives, fall-back to
|
||||||
|
slower rewind erease
|
||||||
|
|
||||||
|
* fix #3393: pxar: allow and safe the 'security.NTACL' extended attribute
|
||||||
|
|
||||||
|
* file-restore: support encrypted VM backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Apr 2021 20:14:58 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* backup verify: always re-check if we can skip a chunk in the actual verify
|
||||||
|
loop.
|
||||||
|
|
||||||
|
* tape: do not try to backup unfinished backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 15 Apr 2021 13:26:52 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* docs: include tape in table of contents
|
||||||
|
|
||||||
|
* docs: tape: improve definition-list format and add screenshots
|
||||||
|
|
||||||
|
* docs: reorder maintenance and network chapters after client-usage/tools
|
||||||
|
chapters
|
||||||
|
|
||||||
|
* ui: tape changer status: add Format button to drive grid
|
||||||
|
|
||||||
|
* backup/verify: improve speed on disks with slow random-IO (spinners) by
|
||||||
|
iterating over chunks sorted by inode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Apr 2021 14:50:29 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* enable tape backup as technology preview by default
|
||||||
|
|
||||||
|
* tape: read drive status: clear deferred error or media changed events.
|
||||||
|
|
||||||
|
* tape: improve end-of-tape (EOT) error handling
|
||||||
|
|
||||||
|
* tape: cleanup media catalog on tape reuse
|
||||||
|
|
||||||
|
* zfs: re-use underlying pool wide IO stats for datasets
|
||||||
|
|
||||||
|
* api daemon: only log error from accepting new connections to avoid opening
|
||||||
|
to many file descriptors
|
||||||
|
|
||||||
|
* api/datastore: allow downloading the entire archive as ZIP archive, not
|
||||||
|
only sub-paths
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 13 Apr 2021 14:42:18 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.14-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* server: compress API call response and static files if client accepts that
|
||||||
|
|
||||||
|
* compress generated ZIP archives with deflate
|
||||||
|
|
||||||
|
* tape: implement LTO userspace driver
|
||||||
|
|
||||||
|
* docs: mention new user space tape driver, adopt device path names
|
||||||
|
|
||||||
|
* tape: always clear encryption key after backup (for security reasons)
|
||||||
|
|
||||||
|
* ui: improve changer status view
|
||||||
|
|
||||||
|
* add proxmox-file-restore package, providing a central file-restore binary
|
||||||
|
with preparations for restoring files also from block level backups using
|
||||||
|
QEMU for a safe encapsulation.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Apr 2021 16:35:11 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: improve handling ACL entries on create and restore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Apr 2021 15:32:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: write catalogs to tape (speedup catalog restore)
|
||||||
|
|
||||||
|
* tape: add --scan option for catalog restore
|
||||||
|
|
||||||
|
* tape: improve locking (lock media-sets)
|
||||||
|
|
||||||
|
* tape: ui: enable datastore mappings
|
||||||
|
|
||||||
|
* fix #3359: fix blocking writes in async code during pxar create
|
||||||
|
|
||||||
|
* api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
|
||||||
|
|
||||||
|
* docu improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Mar 2021 14:08:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix feature flag logic in pxar create
|
||||||
|
|
||||||
|
* tools/zip: add missing start_disk field for zip64 extension to improve
|
||||||
|
compatibility with some strict archive tools
|
||||||
|
|
||||||
|
* tape: speedup backup by doing read/write in parallel
|
||||||
|
|
||||||
|
* tape: store datastore name in tape archives and media catalog
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Mar 2021 12:36:01 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
||||||
|
32
debian/control
vendored
@ -15,6 +15,8 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
|
librust-env-logger-0.7+default-dev,
|
||||||
|
librust-flate2-1+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
@ -36,13 +38,13 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-1+default-dev,
|
librust-pin-project-1+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.11+api-macro-dev,
|
librust-proxmox-0.11+api-macro-dev (>= 0.11.1-~~),
|
||||||
librust-proxmox-0.11+default-dev,
|
librust-proxmox-0.11+default-dev (>= 0.11.1-~~),
|
||||||
librust-proxmox-0.11+sortable-macro-dev,
|
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.1-~~),
|
||||||
librust-proxmox-0.11+websocket-dev,
|
librust-proxmox-0.11+websocket-dev (>= 0.11.1-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.9+default-dev,
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-pxar-0.9+tokio-io-dev,
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
@ -50,8 +52,10 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
|
librust-thiserror-1+default-dev,
|
||||||
librust-tokio-1+default-dev,
|
librust-tokio-1+default-dev,
|
||||||
librust-tokio-1+fs-dev,
|
librust-tokio-1+fs-dev,
|
||||||
|
librust-tokio-1+io-std-dev,
|
||||||
librust-tokio-1+io-util-dev,
|
librust-tokio-1+io-util-dev,
|
||||||
librust-tokio-1+macros-dev,
|
librust-tokio-1+macros-dev,
|
||||||
librust-tokio-1+net-dev,
|
librust-tokio-1+net-dev,
|
||||||
@ -65,6 +69,7 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-tokio-stream-0.1+default-dev,
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-util-0.6+codec-dev,
|
librust-tokio-util-0.6+codec-dev,
|
||||||
librust-tokio-util-0.6+default-dev,
|
librust-tokio-util-0.6+default-dev,
|
||||||
|
librust-tokio-util-0.6+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
@ -109,14 +114,12 @@ Depends: fonts-font-awesome,
|
|||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.5-1),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -146,3 +149,14 @@ Depends: libjs-extjs,
|
|||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
This package contains the Proxmox Backup Documentation files.
|
This package contains the Proxmox Backup Documentation files.
|
||||||
|
|
||||||
|
Package: proxmox-backup-file-restore
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
|
proxmox-backup-restore-image,
|
||||||
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
|
This package contains the Proxmox Backup single file restore client for
|
||||||
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
device backups. It includes a block device restore driver using QEMU.
|
||||||
|
15
debian/control.in
vendored
@ -6,14 +6,12 @@ Depends: fonts-font-awesome,
|
|||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.5-1),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -43,3 +41,14 @@ Depends: libjs-extjs,
|
|||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
This package contains the Proxmox Backup Documentation files.
|
This package contains the Proxmox Backup Documentation files.
|
||||||
|
|
||||||
|
Package: proxmox-backup-file-restore
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
|
proxmox-backup-restore-image,
|
||||||
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
|
This package contains the Proxmox Backup single file restore client for
|
||||||
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
device backups. It includes a block device restore driver using QEMU.
|
||||||
|
10
debian/postinst
vendored
@ -48,6 +48,16 @@ case "$1" in
|
|||||||
/etc/proxmox-backup/remote.cfg || true
|
/etc/proxmox-backup/remote.cfg || true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
if dpkg --compare-versions "$2" 'le' '1.0.14-1'; then
|
||||||
|
# FIXME: Remove with 2.0
|
||||||
|
if grep -s -q -P -e '^linux:' /etc/proxmox-backup/tape.cfg; then
|
||||||
|
echo "========="
|
||||||
|
echo "= NOTE: You have now unsupported 'linux' tape drives configured."
|
||||||
|
echo "= * Execute 'udevadm control --reload-rules && udevadm trigger' to update /dev"
|
||||||
|
echo "= * Edit '/etc/proxmox-backup/tape.cfg', remove 'linux' entries and re-add over CLI/GUI"
|
||||||
|
echo "========="
|
||||||
|
fi
|
||||||
|
fi
|
||||||
# FIXME: remove with 2.0
|
# FIXME: remove with 2.0
|
||||||
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
||||||
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
||||||
|
1
debian/proxmox-backup-file-restore.bash-completion
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
debian/proxmox-file-restore.bc proxmox-file-restore
|
8
debian/proxmox-backup-file-restore.bc
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# proxmox-file-restore bash completion
|
||||||
|
|
||||||
|
# see http://tiswww.case.edu/php/chet/bash/FAQ
|
||||||
|
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
|
||||||
|
# this modifies global var, but I found no better way
|
||||||
|
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
|
||||||
|
|
||||||
|
complete -C 'proxmox-file-restore bashcomplete' proxmox-file-restore
|
4
debian/proxmox-backup-file-restore.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
usr/bin/proxmox-file-restore
|
||||||
|
usr/share/man/man1/proxmox-file-restore.1
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-file-restore
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
|
64
debian/proxmox-backup-file-restore.postinst
vendored
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
update_initramfs() {
|
||||||
|
# regenerate initramfs for single file restore VM
|
||||||
|
INST_PATH="/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
|
||||||
|
CACHE_PATH="/var/cache/proxmox-backup/file-restore-initramfs.img"
|
||||||
|
|
||||||
|
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||||
|
# not want an unuseable image lying around
|
||||||
|
rm -f "$CACHE_PATH"
|
||||||
|
|
||||||
|
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||||
|
echo "proxmox-backup-restore-image is not installed correctly, skipping update" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Updating file-restore initramfs..."
|
||||||
|
|
||||||
|
# avoid leftover temp file
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$CACHE_PATH.tmp"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
mkdir -p "/var/cache/proxmox-backup"
|
||||||
|
cp "$INST_PATH/initramfs.img" "$CACHE_PATH.tmp"
|
||||||
|
|
||||||
|
# cpio uses passed in path as offset inside the archive as well, so we need
|
||||||
|
# to be in the same dir as the daemon binary to ensure it's placed in /
|
||||||
|
( cd "$INST_PATH"; \
|
||||||
|
printf "./proxmox-restore-daemon" \
|
||||||
|
| cpio -o --format=newc -A -F "$CACHE_PATH.tmp" )
|
||||||
|
mv -f "$CACHE_PATH.tmp" "$CACHE_PATH"
|
||||||
|
|
||||||
|
trap - EXIT
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
# in case restore daemon was updated
|
||||||
|
update_initramfs
|
||||||
|
;;
|
||||||
|
|
||||||
|
triggered)
|
||||||
|
if [ "$2" = "proxmox-backup-restore-image-update" ]; then
|
||||||
|
# in case base-image was updated
|
||||||
|
update_initramfs
|
||||||
|
else
|
||||||
|
echo "postinst called with unknown trigger name: \`$2'" >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
exit 0
|
1
debian/proxmox-backup-file-restore.triggers
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
interest-noawait proxmox-backup-restore-image-update
|
18
debian/proxmox-backup-server.udev
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# do not edit this file, it will be overwritten on update
|
||||||
|
|
||||||
|
# persistent storage links: /dev/tape/{by-id,by-path}
|
||||||
|
|
||||||
|
ACTION=="remove", GOTO="persistent_storage_tape_end"
|
||||||
|
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_tape_end"
|
||||||
|
|
||||||
|
# also see: /lib/udev/rules.d/60-persistent-storage-tape.rules
|
||||||
|
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", IMPORT{program}="scsi_id --sg-version=3 --export --whitelisted -d $devnode", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SERIAL}-sg"
|
||||||
|
|
||||||
|
# iSCSI devices from the same host have all the same ID_SERIAL,
|
||||||
|
# but additionally a property named ID_SCSI_SERIAL.
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SERIAL}=="?*", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
||||||
|
|
||||||
|
LABEL="persistent_storage_tape_end"
|
7
debian/rules
vendored
@ -52,8 +52,11 @@ override_dh_dwz:
|
|||||||
|
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
dh_strip
|
dh_strip
|
||||||
for exe in $$(find debian/proxmox-backup-client/usr \
|
for exe in $$(find \
|
||||||
debian/proxmox-backup-server/usr -executable -type f); do \
|
debian/proxmox-backup-client/usr \
|
||||||
|
debian/proxmox-backup-server/usr \
|
||||||
|
debian/proxmox-backup-file-restore \
|
||||||
|
-executable -type f); do \
|
||||||
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
|
proxmox-file-restore/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
pmtx/synopsis.rst \
|
pmtx/synopsis.rst \
|
||||||
pmt/synopsis.rst \
|
pmt/synopsis.rst \
|
||||||
@ -25,7 +26,8 @@ MAN1_PAGES := \
|
|||||||
proxmox-tape.1 \
|
proxmox-tape.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1 \
|
||||||
|
proxmox-file-restore.1
|
||||||
|
|
||||||
MAN5_PAGES := \
|
MAN5_PAGES := \
|
||||||
media-pool.cfg.5 \
|
media-pool.cfg.5 \
|
||||||
@ -179,6 +181,12 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
|
|||||||
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
|
||||||
|
${COMPILEDIR}/proxmox-file-restore printdoc > proxmox-file-restore/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
.PHONY: onlinehelpinfo
|
.PHONY: onlinehelpinfo
|
||||||
onlinehelpinfo:
|
onlinehelpinfo:
|
||||||
@echo "Generating OnlineHelpInfo.js..."
|
@echo "Generating OnlineHelpInfo.js..."
|
||||||
|
@ -143,7 +143,7 @@ Ext.onReady(function() {
|
|||||||
permhtml += "</div></div>";
|
permhtml += "</div></div>";
|
||||||
} else {
|
} else {
|
||||||
//console.log(permission);
|
//console.log(permission);
|
||||||
permhtml += "Unknown systax!";
|
permhtml += "Unknown syntax!";
|
||||||
}
|
}
|
||||||
|
|
||||||
return permhtml;
|
return permhtml;
|
||||||
|
@ -3,9 +3,10 @@ Backup Client Usage
|
|||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
.. _client_repository:
|
||||||
|
|
||||||
Repository Locations
|
Backup Repository Locations
|
||||||
--------------------
|
---------------------------
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following notation to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server.
|
||||||
@ -471,7 +472,7 @@ located in ``/etc``, you could do the following:
|
|||||||
pxar:/ > restore target/ --pattern etc/**/*.conf
|
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||||
...
|
...
|
||||||
|
|
||||||
The above will scan trough all the directories below ``/etc`` and restore all
|
The above will scan through all the directories below ``/etc`` and restore all
|
||||||
files ending in ``.conf``.
|
files ending in ``.conf``.
|
||||||
|
|
||||||
.. todo:: Explain interactive restore in more detail
|
.. todo:: Explain interactive restore in more detail
|
||||||
@ -691,8 +692,15 @@ Benchmarking
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
The backup client also comes with a benchmarking tool. This tool measures
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
various metrics relating to compression and encryption speeds. You can run a
|
various metrics relating to compression and encryption speeds. If a Proxmox
|
||||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
Backup repository (remote or local) is specified, the TLS upload speed will get
|
||||||
|
measured too.
|
||||||
|
|
||||||
|
You can run a benchmark using the ``benchmark`` subcommand of
|
||||||
|
``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. note:: The TLS speed test is only included if a :ref:`backup server
|
||||||
|
repository is specified <client_repository>`.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -723,8 +731,7 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
|||||||
|
|
||||||
|
|
||||||
.. note:: The percentages given in the output table correspond to a
|
.. note:: The percentages given in the output table correspond to a
|
||||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
comparison against a Ryzen 7 2700X.
|
||||||
local host, so there is no network involved.
|
|
||||||
|
|
||||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
rather than the default table format.
|
rather than the default table format.
|
||||||
|
@ -6,6 +6,11 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: proxmox-backup-client/description.rst
|
.. include:: proxmox-backup-client/description.rst
|
||||||
|
|
||||||
|
``proxmox-file-restore``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-file-restore/description.rst
|
||||||
|
|
||||||
``proxmox-backup-manager``
|
``proxmox-backup-manager``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -26,6 +26,27 @@ Those command are available when you start an interactive restore shell:
|
|||||||
.. include:: proxmox-backup-manager/synopsis.rst
|
.. include:: proxmox-backup-manager/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-tape``
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. include:: proxmox-tape/synopsis.rst
|
||||||
|
|
||||||
|
``pmt``
|
||||||
|
-------
|
||||||
|
|
||||||
|
.. include:: pmt/options.rst
|
||||||
|
|
||||||
|
....
|
||||||
|
|
||||||
|
.. include:: pmt/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``pmtx``
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. include:: pmtx/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
``pxar``
|
``pxar``
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
|
extensions = ["sphinx.ext.graphviz", 'sphinx.ext.mathjax', "sphinx.ext.todo", "proxmox-scanrefs"]
|
||||||
|
|
||||||
todo_link_only = True
|
todo_link_only = True
|
||||||
|
|
||||||
@ -307,6 +307,9 @@ html_show_sourcelink = False
|
|||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = 'ProxmoxBackupdoc'
|
htmlhelp_basename = 'ProxmoxBackupdoc'
|
||||||
|
|
||||||
|
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||||
|
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||||
|
|
||||||
# -- Options for LaTeX output ---------------------------------------------
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
latex_engine = 'xelatex'
|
latex_engine = 'xelatex'
|
||||||
@ -464,6 +467,3 @@ epub_exclude_files = ['search.html']
|
|||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
#
|
#
|
||||||
# epub_use_index = True
|
# epub_use_index = True
|
||||||
|
|
||||||
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
|
||||||
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each drive configuration section starts with a header ``linux: <name>``,
|
Each LTO drive configuration section starts with a header ``lto: <name>``,
|
||||||
followed by the drive configuration options.
|
followed by the drive configuration options.
|
||||||
|
|
||||||
Tape changer configurations starts with ``changer: <name>``,
|
Tape changer configurations starts with ``changer: <name>``,
|
||||||
@ -6,7 +6,7 @@ followed by the changer configuration options.
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
linux: hh8
|
lto: hh8
|
||||||
changer sl3
|
changer sl3
|
||||||
path /dev/tape/by-id/scsi-10WT065325-nst
|
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||||
|
|
||||||
|
@ -37,8 +37,53 @@ Options
|
|||||||
.. include:: config/datastore/config.rst
|
.. include:: config/datastore/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``media-pool.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape.cfg``
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape-job.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/config.rst
|
||||||
|
|
||||||
|
|
||||||
``user.cfg``
|
``user.cfg``
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
@ -57,6 +57,11 @@ div.sphinxsidebar h3 {
|
|||||||
div.sphinxsidebar h1.logo-name {
|
div.sphinxsidebar h1.logo-name {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.document, div.footer {
|
||||||
|
width: min(100%, 1320px);
|
||||||
|
}
|
||||||
|
|
||||||
@media screen and (max-width: 875px) {
|
@media screen and (max-width: 875px) {
|
||||||
div.sphinxsidebar p.logo {
|
div.sphinxsidebar p.logo {
|
||||||
display: initial;
|
display: initial;
|
||||||
@ -65,9 +70,19 @@ div.sphinxsidebar h1.logo-name {
|
|||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
div.sphinxsidebar span {
|
div.sphinxsidebar span {
|
||||||
color: #AAA;
|
color: #EEE;
|
||||||
}
|
}
|
||||||
ul li.toctree-l1 > a {
|
.sphinxsidebar ul li.toctree-l1 > a, div.sphinxsidebar a {
|
||||||
color: #FFF;
|
color: #FFF;
|
||||||
}
|
}
|
||||||
|
div.sphinxsidebar {
|
||||||
|
background-color: #555;
|
||||||
|
}
|
||||||
|
div.body {
|
||||||
|
min-width: 300px;
|
||||||
|
}
|
||||||
|
div.footer {
|
||||||
|
display: block;
|
||||||
|
margin: 15px auto 0px auto;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,9 +61,7 @@ attacker gains access to the server or any point of the network, they will not
|
|||||||
be able to read the data.
|
be able to read the data.
|
||||||
|
|
||||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||||
`Encryption
|
:ref:`backup client encryption section <client_encryption>`.
|
||||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
|
||||||
of the Proxmox Backup Server Administration Guide.
|
|
||||||
|
|
||||||
|
|
||||||
Is the backup incremental/deduplicated?
|
Is the backup incremental/deduplicated?
|
||||||
|
12
docs/gui.rst
@ -112,6 +112,18 @@ The administration menu item also contains a disk management subsection:
|
|||||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
* **ZFS**: Create and view information on *ZFS* disks
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
|
Tape Backup
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
|
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
||||||
|
inventories, drives, changers and the tape backup jobs itself.
|
||||||
|
|
||||||
|
It also contains a subsection per standalone drive and per changer, with a
|
||||||
|
status and management view for those devices.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
|
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs-add.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs.png
Normal file
After Width: | Height: | Size: 75 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changer-overview.png
Normal file
After Width: | Height: | Size: 117 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers-add.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-crypt-keys.png
Normal file
After Width: | Height: | Size: 72 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives-add.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives.png
Normal file
After Width: | Height: | Size: 112 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools-add.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools.png
Normal file
After Width: | Height: | Size: 70 KiB |
@ -25,14 +25,15 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
terminology.rst
|
terminology.rst
|
||||||
gui.rst
|
gui.rst
|
||||||
storage.rst
|
storage.rst
|
||||||
network-management.rst
|
|
||||||
user-management.rst
|
user-management.rst
|
||||||
managing-remotes.rst
|
|
||||||
maintenance.rst
|
|
||||||
backup-client.rst
|
backup-client.rst
|
||||||
pve-integration.rst
|
pve-integration.rst
|
||||||
pxar-tool.rst
|
pxar-tool.rst
|
||||||
|
tape-backup.rst
|
||||||
|
managing-remotes.rst
|
||||||
|
maintenance.rst
|
||||||
sysadmin.rst
|
sysadmin.rst
|
||||||
|
network-management.rst
|
||||||
technical-overview.rst
|
technical-overview.rst
|
||||||
faq.rst
|
faq.rst
|
||||||
|
|
||||||
|
@ -113,9 +113,9 @@ Client Installation
|
|||||||
Install `Proxmox Backup`_ Client on Debian
|
Install `Proxmox Backup`_ Client on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages to be installed on
|
Proxmox ships as a set of Debian packages to be installed on top of a standard
|
||||||
top of a standard Debian installation. After configuring the
|
Debian installation. After configuring the :ref:`package_repositories_client_only_apt`,
|
||||||
:ref:`sysadmin_package_repositories`, you need to run:
|
you need to run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -123,12 +123,6 @@ top of a standard Debian installation. After configuring the
|
|||||||
# apt-get install proxmox-backup-client
|
# apt-get install proxmox-backup-client
|
||||||
|
|
||||||
|
|
||||||
Installing from source
|
.. note:: The client-only repository should be usable by most recent Debian and
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
Ubuntu derivatives.
|
||||||
|
|
||||||
.. todo:: Add section "Installing from source"
|
|
||||||
|
|
||||||
Installing statically linked binary
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. todo:: Add section "Installing statically linked binary"
|
|
||||||
|
@ -65,10 +65,10 @@ Main Features
|
|||||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 GCM_.
|
||||||
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
This authenticated encryption (AE_) mode provides very high performance on
|
||||||
provides very high performance on modern hardware. In addition to client-side
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
encryption, all data is transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
@ -76,8 +76,16 @@ Main Features
|
|||||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
software. The source code is licensed under AGPL, v3.
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
:No Limits: Proxmox Backup Server has no artificial limits for backup storage or
|
||||||
phase is over.
|
backup-clients.
|
||||||
|
|
||||||
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
|
form of `Proxmox Backup Server Subscription Plans
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
|
Standard or Premium subscription, users have access to the :ref:`Proxmox
|
||||||
|
Customer Portal <get_help_enterprise_support>`.
|
||||||
|
|
||||||
|
|
||||||
Reasons for Data Backup?
|
Reasons for Data Backup?
|
||||||
@ -117,8 +125,8 @@ Proxmox Backup Server consists of multiple components:
|
|||||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
any `Linux amd64` environment
|
any `Linux amd64` environment
|
||||||
|
|
||||||
Aside from the web interface, everything is written in the Rust programming
|
Aside from the web interface, most parts of Proxmox Backup Server are written in
|
||||||
language.
|
the Rust programming language.
|
||||||
|
|
||||||
"The Rust programming language helps you write faster, more reliable software.
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
High-level ergonomics and low-level control are often at odds in programming
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
@ -134,6 +142,17 @@ language.
|
|||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
.. _get_help_enterprise_support:
|
||||||
|
|
||||||
|
Enterprise Support
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
||||||
|
`Proxmox Customer Portal <https://my.proxmox.com>`_. The customer portal
|
||||||
|
provides support with guaranteed response times from the Proxmox developers.
|
||||||
|
For more information or for volume discounts, please contact office@proxmox.com.
|
||||||
|
|
||||||
Community Support Forum
|
Community Support Forum
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ are checked again. The interface for creating verify jobs can be found under the
|
|||||||
**Verify Jobs** tab of the datastore.
|
**Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
if a previous verification was successful. This is becuase physical drives
|
if a previous verification was successful. This is because physical drives
|
||||||
are susceptible to damage over time, which can cause an old, working backup
|
are susceptible to damage over time, which can cause an old, working backup
|
||||||
to become corrupted in a process known as `bit rot/data degradation
|
to become corrupted in a process known as `bit rot/data degradation
|
||||||
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
||||||
|
@ -29,6 +29,8 @@ update``.
|
|||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
updates.
|
updates.
|
||||||
|
|
||||||
|
.. _package_repos_secure_apt:
|
||||||
|
|
||||||
SecureApt
|
SecureApt
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@ -69,10 +71,12 @@ Here, the output should be:
|
|||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This will be the default, stable, and recommended repository. It is available for
|
This is the stable, recommended repository. It is available for
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
enabled by default:
|
enabled by default:
|
||||||
@ -137,3 +141,40 @@ You can access this repository by adding the following line to
|
|||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||||
|
|
||||||
|
.. _package_repositories_client_only:
|
||||||
|
|
||||||
|
Proxmox Backup Client-only Repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
|
||||||
|
on systems using a Linux distribution not based on Proxmox projects, you can
|
||||||
|
use the client-only repository.
|
||||||
|
|
||||||
|
Currently there's only a client-repository for APT based systems.
|
||||||
|
|
||||||
|
.. _package_repositories_client_only_apt:
|
||||||
|
|
||||||
|
APT-based Proxmox Backup Client Repository
|
||||||
|
++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||||
|
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||||
|
|
||||||
|
This repository is tested with:
|
||||||
|
|
||||||
|
- Debian Buster
|
||||||
|
- Ubuntu 20.04 LTS
|
||||||
|
|
||||||
|
It may work with older, and should work with more recent released versions.
|
||||||
|
|
||||||
|
In order to configure this repository you need to first :ref:`setup the Proxmox
|
||||||
|
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
||||||
|
the APT sources lists.
|
||||||
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
|
snipped
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
|
deb http://download.proxmox.com/debian/pbs-client buster main
|
||||||
|
@ -13,39 +13,3 @@ parameter. It accepts the following values:
|
|||||||
:``json``: JSON (single line).
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
Device driver options can be specified as integer numbers (see
|
|
||||||
``/usr/include/linux/mtio.h``), or using symbolic names:
|
|
||||||
|
|
||||||
:``buffer-writes``: Enable buffered writes
|
|
||||||
|
|
||||||
:``async-writes``: Enable async writes
|
|
||||||
|
|
||||||
:``read-ahead``: Use read-ahead for fixed block size
|
|
||||||
|
|
||||||
:``debugging``: Enable debugging if compiled into the driver
|
|
||||||
|
|
||||||
:``two-fm``: Write two file marks when closing the file
|
|
||||||
|
|
||||||
:``fast-mteom``: Space directly to eod (and lose file number)
|
|
||||||
|
|
||||||
:``auto-lock``: Automatically lock/unlock drive door
|
|
||||||
|
|
||||||
:``def-writes``: Defaults are meant only for writes
|
|
||||||
|
|
||||||
:``can-bsr``: Indicates that the drive can space backwards
|
|
||||||
|
|
||||||
:``no-blklims``: Drive does not support read block limits
|
|
||||||
|
|
||||||
:``can-partitions``: Drive can handle partitioned tapes
|
|
||||||
|
|
||||||
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
|
||||||
|
|
||||||
:``sysv``: Enable the System V semantics
|
|
||||||
|
|
||||||
:``nowait``: Do not wait for rewind, etc. to complete
|
|
||||||
|
|
||||||
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
|
||||||
in variable block mode to enhance performance when reading blocks
|
|
||||||
shorter than the byte count
|
|
||||||
|
3
docs/proxmox-file-restore/description.rst
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Command line tool for restoring files and directories from PBS archives. In contrast to
|
||||||
|
proxmox-backup-client, this supports both container/host and VM backups.
|
||||||
|
|
28
docs/proxmox-file-restore/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-file-restore
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Command line tool for restoring files and directories from PBS archives
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
@ -3,6 +3,26 @@
|
|||||||
`Proxmox VE`_ Integration
|
`Proxmox VE`_ Integration
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
||||||
|
former as a storage in a Proxmox VE standalone or cluster setup.
|
||||||
|
|
||||||
|
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||||
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||||
|
of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
||||||
|
|
||||||
|
|
||||||
|
Using the Proxmox VE Web-Interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
||||||
|
Server since the `Proxmox VE 6.3 release
|
||||||
|
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||||
|
|
||||||
|
Using the Proxmox VE Command-Line
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
node. The following example uses ``store2`` as storage name, and
|
node. The following example uses ``store2`` as storage name, and
|
||||||
assumes the server address is ``localhost``, and you want to connect
|
assumes the server address is ``localhost``, and you want to connect
|
||||||
@ -41,9 +61,9 @@ After that you should be able to see storage status with:
|
|||||||
Name Type Status Total Used Available %
|
Name Type Status Total Used Available %
|
||||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||||
|
|
||||||
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
Having added the Proxmox Backup Server datastore to `Proxmox VE`_, you can
|
||||||
containers in the same way you would for any other storage device within the
|
backup VMs and containers in the same way you would for any other storage
|
||||||
environment (see `PVE Admin Guide: Backup and Restore
|
device within the environment (see `Proxmox VE Admin Guide: Backup and Restore
|
||||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Storage
|
Backup Storage
|
||||||
=======
|
==============
|
||||||
|
|
||||||
.. _storage_disk_management:
|
.. _storage_disk_management:
|
||||||
|
|
||||||
|
@ -4,12 +4,11 @@ Tape Backup
|
|||||||
===========
|
===========
|
||||||
|
|
||||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||||
production use. To enable it in the GUI, you need to issue the
|
production use.
|
||||||
following command (as root user on the console):
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
|
:align: right
|
||||||
# touch /etc/proxmox-backup/tape.cfg
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
Proxmox tape backup provides an easy way to store datastore content
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
onto magnetic tapes. This increases data safety because you get:
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
@ -59,7 +58,7 @@ In general, LTO tapes offer the following advantages:
|
|||||||
- Cold Media
|
- Cold Media
|
||||||
- Movable (storable inside vault)
|
- Movable (storable inside vault)
|
||||||
- Multiple vendors (for both media and drives)
|
- Multiple vendors (for both media and drives)
|
||||||
- Build in AES-GCM Encryption engine
|
- Built in AES-GCM Encryption engine
|
||||||
|
|
||||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
tape compression feature has no advantage.
|
tape compression feature has no advantage.
|
||||||
@ -69,12 +68,16 @@ Supported Hardware
|
|||||||
------------------
|
------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
||||||
or later. In general, all SCSI-2 tape drives supported by the Linux
|
|
||||||
kernel should work, but features like hardware encryption need LTO-4
|
|
||||||
or later.
|
or later.
|
||||||
|
|
||||||
Tape changing is carried out using the Linux 'mtx' command line
|
Tape changing is carried out using the SCSI Medium Changer protocol,
|
||||||
tool, so any changer device supported by this tool should work.
|
so all modern tape libraries should work.
|
||||||
|
|
||||||
|
.. Note:: We use a custom user space tape driver written in Rust_. This
|
||||||
|
driver directly communicates with the tape drive using the SCSI
|
||||||
|
generic interface. This may have negative side effects when used with the old
|
||||||
|
Linux kernel tape driver, so you should not use that driver with
|
||||||
|
Proxmox tape backup.
|
||||||
|
|
||||||
|
|
||||||
Drive Performance
|
Drive Performance
|
||||||
@ -84,7 +87,7 @@ Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
|||||||
that it still takes a minimum of 9 hours to completely write or
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
read a single tape (even at maximum speed).
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
The only way to speed up that data rate is to use more than one
|
The only way to speed that data rate up is to use more than one
|
||||||
drive. That way, you can run several backup jobs in parallel, or run
|
drive. That way, you can run several backup jobs in parallel, or run
|
||||||
restore jobs while the other dives are used for backups.
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
@ -93,15 +96,16 @@ Also consider that you first need to read data from your datastore
|
|||||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||||
to write to your tape at full speed, please make sure that the source
|
to write to your tape at full speed, please make sure that the source
|
||||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
datastore is able to deliver that performance (for example, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
**Tape Labels:**
|
||||||
sticky paper label to the front of the cartridge. We additionally store the
|
are used to uniquely identify a tape. You would normally apply a
|
||||||
label text magnetically on the tape (first file on tape).
|
sticky paper label to the front of the cartridge. We additionally
|
||||||
|
store the label text magnetically on the tape (first file on tape).
|
||||||
|
|
||||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
@ -109,51 +113,59 @@ Terminology
|
|||||||
|
|
||||||
.. _LTO Barcode Generator: lto-barcode/index.html
|
.. _LTO Barcode Generator: lto-barcode/index.html
|
||||||
|
|
||||||
:Barcodes: are a special form of tape labels, which are electronically
|
**Barcodes:**
|
||||||
readable. Most LTO tape robots use an 8 character string encoded as
|
are a special form of tape labels, which are electronically
|
||||||
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
Specification`_.
|
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_.
|
||||||
|
|
||||||
You can either buy such barcode labels from your cartridge vendor,
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
or print them yourself. You can use our `LTO Barcode Generator`_
|
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||||
app, if you would like to print them yourself.
|
app, if you would like to print them yourself.
|
||||||
|
|
||||||
.. Note:: Physical labels and the associated adhesive should have an
|
.. Note:: Physical labels and the associated adhesive should have an
|
||||||
environmental performance to match or exceed the environmental
|
environmental performance to match or exceed the environmental
|
||||||
specifications of the cartridge to which it is applied.
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
:Media Pools: A media pool is a logical container for tapes. A backup
|
**Media Pools:**
|
||||||
job targets one media pool, so a job only uses tapes from that
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
pool. The pool additionally defines how long a backup job can
|
one media pool, so a job only uses tapes from that pool. The pool
|
||||||
append data to tapes (allocation policy) and how long you want to
|
additionally defines how long a backup job can append data to tapes
|
||||||
keep the data (retention policy).
|
(allocation policy) and how long you want to keep the data
|
||||||
|
(retention policy).
|
||||||
|
|
||||||
:Media Set: A group of continuously written tapes (all from the same
|
**Media Set:**
|
||||||
media pool).
|
A group of continuously written tapes (all from the same media pool).
|
||||||
|
|
||||||
:Tape drive: The device used to read and write data to the tape. There
|
**Tape drive:**
|
||||||
are standalone drives, but drives are usually shipped within tape libraries.
|
The device used to read and write data to the tape. There are
|
||||||
|
standalone drives, but drives are usually shipped within tape
|
||||||
|
libraries.
|
||||||
|
|
||||||
:Tape changer: A device which can change the tapes inside a tape drive
|
**Tape changer:**
|
||||||
(tape robot). They are usually part of a tape library.
|
A device which can change the tapes inside a tape drive (tape
|
||||||
|
robot). They are usually part of a tape library.
|
||||||
|
|
||||||
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
||||||
|
|
||||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
`Tape library`_:
|
||||||
a number of slots to hold tape cartridges, a barcode reader to
|
A storage device that contains one or more tape drives, a number of
|
||||||
identify tape cartridges, and an automated method for loading tapes
|
slots to hold tape cartridges, a barcode reader to identify tape
|
||||||
(a robot).
|
cartridges, and an automated method for loading tapes (a robot).
|
||||||
|
|
||||||
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
This is also commonly known as an 'autoloader', 'tape robot' or
|
||||||
|
'tape jukebox'.
|
||||||
|
|
||||||
:Inventory: The inventory stores the list of known tapes (with
|
**Inventory:**
|
||||||
additional status information).
|
The inventory stores the list of known tapes (with additional status
|
||||||
|
information).
|
||||||
|
|
||||||
:Catalog: A media catalog stores information about the media content.
|
**Catalog:**
|
||||||
|
A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
Tape Quick Start
|
Tape Quick Start
|
||||||
---------------
|
----------------
|
||||||
|
|
||||||
1. Configure your tape hardware (drives and changers)
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
@ -176,8 +188,15 @@ same configuration.
|
|||||||
Tape changers
|
Tape changers
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
.. image:: images/screenshots/pbs-gui-tape-changers.png
|
||||||
this step if you are using a standalone drive.
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Changers
|
||||||
|
|
||||||
|
Tape changers (robots) are part of a `Tape Library`_. They contain a number of
|
||||||
|
slots to hold tape cartridges, a barcode reader to identify tape cartridges and
|
||||||
|
an automated method for loading tapes.
|
||||||
|
|
||||||
|
You can skip this step if you are using a standalone drive.
|
||||||
|
|
||||||
Linux is able to auto detect these devices, and you can get a list
|
Linux is able to auto detect these devices, and you can get a list
|
||||||
of available devices using:
|
of available devices using:
|
||||||
@ -204,6 +223,13 @@ Where ``sl3`` is an arbitrary name you can choose.
|
|||||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changers-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a new tape changer
|
||||||
|
|
||||||
|
This operation can also be carried out from the GUI, by navigating to the
|
||||||
|
**Changers** tab of **Tape Backup** and clicking **Add**.
|
||||||
|
|
||||||
You can display the final configuration with:
|
You can display the final configuration with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -217,7 +243,8 @@ You can display the final configuration with:
|
|||||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
└──────┴─────────────────────────────┘
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
Or simply list all configured changer devices:
|
Or simply list all configured changer devices (as seen in the **Changers** tab
|
||||||
|
of the GUI):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -228,7 +255,7 @@ Or simply list all configured changer devices:
|
|||||||
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto-detected, but only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
To test your setup, please query the status of the changer device with:
|
To test your setup, please query the status of the changer device with:
|
||||||
@ -261,12 +288,12 @@ It's worth noting that some of the smaller tape libraries don't have
|
|||||||
such slots. While they have something called a "Mail Slot", that slot
|
such slots. While they have something called a "Mail Slot", that slot
|
||||||
is just a way to grab the tape from the gripper. They are unable
|
is just a way to grab the tape from the gripper. They are unable
|
||||||
to hold media while the robot does other things. They also do not
|
to hold media while the robot does other things. They also do not
|
||||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
expose that "Mail Slot" over the SCSI interface, so you won't see them in
|
||||||
the status output.
|
the status output.
|
||||||
|
|
||||||
As a workaround, you can mark some of the normal slots as export
|
As a workaround, you can mark some of the normal slots as export
|
||||||
slot. The software treats those slots like real ``import-export``
|
slot. The software treats those slots like real ``import-export``
|
||||||
slots, and the media inside those slots is considered to be 'offline'
|
slots, and the media inside those slots are considered to be 'offline'
|
||||||
(not available for backup):
|
(not available for backup):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -302,6 +329,10 @@ the status output:
|
|||||||
Tape drives
|
Tape drives
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Drive list
|
||||||
|
|
||||||
Linux is able to auto detect tape drives, and you can get a list
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
of available tape drives using:
|
of available tape drives using:
|
||||||
|
|
||||||
@ -311,18 +342,23 @@ of available tape drives using:
|
|||||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
│ path │ vendor │ model │ serial │
|
│ path │ vendor │ model │ serial │
|
||||||
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
|
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a tape drive
|
||||||
|
|
||||||
In order to use that drive with Proxmox, you need to create a
|
In order to use that drive with Proxmox, you need to create a
|
||||||
configuration entry:
|
configuration entry. This can be done through **Tape Backup -> Drives** in the
|
||||||
|
GUI or by using the command below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-sg
|
||||||
|
|
||||||
.. Note:: Please use the persistent device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
If you have a tape library, you also need to set the associated
|
If you have a tape library, you also need to set the associated
|
||||||
@ -346,7 +382,7 @@ You can display the final configuration with:
|
|||||||
╞═════════╪════════════════════════════════╡
|
╞═════════╪════════════════════════════════╡
|
||||||
│ name │ mydrive │
|
│ name │ mydrive │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ path │ /dev/tape/by-id/scsi-12345-nst │
|
│ path │ /dev/tape/by-id/scsi-12345-sg │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ changer │ sl3 │
|
│ changer │ sl3 │
|
||||||
└─────────┴────────────────────────────────┘
|
└─────────┴────────────────────────────────┘
|
||||||
@ -362,10 +398,10 @@ To list all configured drives use:
|
|||||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
│ name │ path │ changer │ vendor │ model │ serial │
|
│ name │ path │ changer │ vendor │ model │ serial │
|
||||||
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
||||||
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
│ mydrive │ /dev/tape/by-id/scsi-12345-sg │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto detected and only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
For testing, you can simply query the drive status with:
|
For testing, you can simply query the drive status with:
|
||||||
@ -373,13 +409,35 @@ For testing, you can simply query the drive status with:
|
|||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape status --drive mydrive
|
# proxmox-tape status --drive mydrive
|
||||||
┌───────────┬────────────────────────┐
|
┌────────────────┬──────────────────────────┐
|
||||||
│ Name │ Value │
|
│ Name │ Value │
|
||||||
╞═══════════╪════════════════════════╡
|
╞════════════════╪══════════════════════════╡
|
||||||
│ blocksize │ 0 │
|
│ blocksize │ 0 │
|
||||||
├───────────┼────────────────────────┤
|
├────────────────┼──────────────────────────┤
|
||||||
│ status │ DRIVE_OPEN | IM_REP_EN │
|
│ density │ LTO4 │
|
||||||
└───────────┴────────────────────────┘
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ compression │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ buffer-mode │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ alert-flags │ (empty) │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ file-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ block-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ manufactured │ Fri Dec 13 01:00:00 2019 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-written │ 501.80 GiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-read │ 4.00 MiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-passes │ 20 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-wearout │ 0.12% │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ volume-mounts │ 2 │
|
||||||
|
└────────────────┴──────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
mode). This is the default anyway.
|
mode). This is the default anyway.
|
||||||
@ -390,8 +448,12 @@ For testing, you can simply query the drive status with:
|
|||||||
Media Pools
|
Media Pools
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Media Pools
|
||||||
|
|
||||||
A media pool is a logical container for tapes. A backup job targets
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
one media pool, so a job only uses tapes from that pool.
|
a single media pool, so a job only uses tapes from that pool.
|
||||||
|
|
||||||
.. topic:: Media Set
|
.. topic:: Media Set
|
||||||
|
|
||||||
@ -411,7 +473,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
The pool additionally defines how long backup jobs can append data
|
The pool additionally defines how long backup jobs can append data
|
||||||
to a media set. The following settings are possible:
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
- Try to use the current media set.
|
- Try to use the current media set (``continue``).
|
||||||
|
|
||||||
This setting produces one large media set. While this is very
|
This setting produces one large media set. While this is very
|
||||||
space efficient (deduplication, no unused space), it can lead to
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
@ -433,7 +495,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
.. NOTE:: Retention period starts with the existence of a newer
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
media set.
|
media set.
|
||||||
|
|
||||||
- Always create a new media set.
|
- Always create a new media set (``always``).
|
||||||
|
|
||||||
With this setting, each backup job creates a new media set. This
|
With this setting, each backup job creates a new media set. This
|
||||||
is less space efficient, because the media from the last set
|
is less space efficient, because the media from the last set
|
||||||
@ -510,8 +572,12 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
if the sources are from different namespaces with conflicting names
|
if the sources are from different namespaces with conflicting names
|
||||||
(for example, if the sources are from different Proxmox VE clusters).
|
(for example, if the sources are from different Proxmox VE clusters).
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a media pool
|
||||||
|
|
||||||
The following command creates a new media pool:
|
To create a new media pool, add one from **Tape Backup -> Media Pools** in the
|
||||||
|
GUI, or enter the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -520,7 +586,7 @@ The following command creates a new media pool:
|
|||||||
# proxmox-tape pool create daily --drive mydrive
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
Additional option can be set later, using the update command:
|
Additional options can be set later, using the update command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -543,6 +609,10 @@ To list all configured pools use:
|
|||||||
Tape Backup Jobs
|
Tape Backup Jobs
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Backup Jobs
|
||||||
|
|
||||||
To automate tape backup, you can configure tape backup jobs which
|
To automate tape backup, you can configure tape backup jobs which
|
||||||
write datastore content to a media pool, based on a specific time schedule.
|
write datastore content to a media pool, based on a specific time schedule.
|
||||||
The required settings are:
|
The required settings are:
|
||||||
@ -618,6 +688,14 @@ To remove a job, please use:
|
|||||||
|
|
||||||
# proxmox-tape backup-job remove job2
|
# proxmox-tape backup-job remove job2
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a backup job
|
||||||
|
|
||||||
|
This same functionality also exists in the GUI, under the **Backup Jobs** tab of
|
||||||
|
**Tape Backup**, where *Local Datastore* relates to the datastore you want to
|
||||||
|
backup and *Media Pool* is the pool to back up to.
|
||||||
|
|
||||||
|
|
||||||
Administration
|
Administration
|
||||||
--------------
|
--------------
|
||||||
@ -633,7 +711,7 @@ variable:
|
|||||||
|
|
||||||
You can then omit the ``--drive`` parameter from the command. If the
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
drive has an associated changer device, you may also omit the changer
|
drive has an associated changer device, you may also omit the changer
|
||||||
parameter from commands that needs a changer device, for example:
|
parameter from commands that need a changer device, for example:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -707,7 +785,7 @@ can then label all unlabeled tapes with a single command:
|
|||||||
Run Tape Backups
|
Run Tape Backups
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To manually run a backup job use:
|
To manually run a backup job click *Run Now* in the GUI or use the command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -772,7 +850,14 @@ Restore Catalog
|
|||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Creating a new encryption key:
|
.. image:: images/screenshots/pbs-gui-tape-crypt-keys.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Encryption Keys
|
||||||
|
|
||||||
|
Proxmox Backup Server also provides an interface for handling encryption keys on
|
||||||
|
the backup server. Encryption keys can be managed from the **Tape Backup ->
|
||||||
|
Encryption Keys** section of the GUI or through the ``proxmox-tape key`` command
|
||||||
|
line tool. To create a new encryption key from the command line:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -883,78 +968,3 @@ This command does the following:
|
|||||||
- run drive cleaning operation
|
- run drive cleaning operation
|
||||||
|
|
||||||
- unload the cleaning tape (to slot 3)
|
- unload the cleaning tape (to slot 3)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Configuration Files
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
``media-pool.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape.cfg``
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape-job.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Command Syntax
|
|
||||||
--------------
|
|
||||||
|
|
||||||
``proxmox-tape``
|
|
||||||
----------------
|
|
||||||
|
|
||||||
.. include:: proxmox-tape/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmt``
|
|
||||||
-------
|
|
||||||
|
|
||||||
.. include:: pmt/options.rst
|
|
||||||
|
|
||||||
....
|
|
||||||
|
|
||||||
.. include:: pmt/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmtx``
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. include:: pmtx/synopsis.rst
|
|
||||||
|
@ -100,7 +100,7 @@ can be encrypted, and they are handled in a slightly different manner than
|
|||||||
normal chunks.
|
normal chunks.
|
||||||
|
|
||||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
chunk content, but with the plaintext content concatenated with the encryption
|
chunk content, but with the plain-text content concatenated with the encryption
|
||||||
key. This way, two chunks of the same data encrypted with different keys
|
key. This way, two chunks of the same data encrypted with different keys
|
||||||
generate two different checksums and no collisions occur for multiple
|
generate two different checksums and no collisions occur for multiple
|
||||||
encryption keys.
|
encryption keys.
|
||||||
@ -138,7 +138,7 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||||
collission is about the same as winning 13 such lotto games *in a row*.
|
a collision is about the same as winning 13 such lotto games *in a row*.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
|
@ -12,7 +12,7 @@ pub mod version;
|
|||||||
pub mod ping;
|
pub mod ping;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
pub mod tape;
|
pub mod tape;
|
||||||
mod helpers;
|
pub mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::Router;
|
use proxmox::api::Router;
|
||||||
|
@ -477,6 +477,17 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
|
match authenticator.remove_password(userid.name()) {
|
||||||
|
Ok(()) => {},
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"error removing password after deleting user {:?}: {}",
|
||||||
|
userid, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match crate::config::tfa::read().and_then(|mut cfg| {
|
match crate::config::tfa::read().and_then(|mut cfg| {
|
||||||
let _: bool = cfg.remove_user(&userid);
|
let _: bool = cfg.remove_user(&userid);
|
||||||
crate::config::tfa::write(&cfg)
|
crate::config::tfa::write(&cfg)
|
||||||
|
@ -1385,7 +1385,7 @@ pub fn pxar_file_download(
|
|||||||
|
|
||||||
let mut split = components.splitn(2, |c| *c == b'/');
|
let mut split = components.splitn(2, |c| *c == b'/');
|
||||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||||
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
|
let file_path = split.next().unwrap_or(b"/");
|
||||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
|
@ -27,7 +27,7 @@ use crate::{
|
|||||||
SLOT_ARRAY_SCHEMA,
|
SLOT_ARRAY_SCHEMA,
|
||||||
EXPORT_SLOT_LIST_SCHEMA,
|
EXPORT_SLOT_LIST_SCHEMA,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_changer_list,
|
linux_tape_changer_list,
|
||||||
@ -303,7 +303,7 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
|||||||
None => bail!("Delete changer '{}' failed - no such entry", name),
|
None => bail!("Delete changer '{}' failed - no such entry", name),
|
||||||
}
|
}
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
for drive in drive_list {
|
for drive in drive_list {
|
||||||
if let Some(changer) = drive.changer {
|
if let Some(changer) = drive.changer {
|
||||||
if changer == name {
|
if changer == name {
|
||||||
|
@ -19,12 +19,12 @@ use crate::{
|
|||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
CHANGER_DRIVENUM_SCHEMA,
|
CHANGER_DRIVENUM_SCHEMA,
|
||||||
LINUX_DRIVE_PATH_SCHEMA,
|
LTO_DRIVE_PATH_SCHEMA,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_device_list,
|
lto_tape_device_list,
|
||||||
check_drive_path,
|
check_drive_path,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -37,7 +37,7 @@ use crate::{
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
@ -60,13 +60,13 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let (mut config, _digest) = config::drive::config()?;
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let item: LinuxTapeDrive = serde_json::from_value(param)?;
|
let item: LtoTapeDrive = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
|
|
||||||
check_drive_path(&linux_drives, &item.path)?;
|
check_drive_path(<o_drives, &item.path)?;
|
||||||
|
|
||||||
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let existing: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
for drive in existing {
|
for drive in existing {
|
||||||
if drive.name == item.name {
|
if drive.name == item.name {
|
||||||
@ -77,7 +77,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&item.name, "linux", &item)?;
|
config.set_data(&item.name, "lto", &item)?;
|
||||||
|
|
||||||
config::drive::save_config(&config)?;
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
@ -104,11 +104,11 @@ pub fn get_config(
|
|||||||
name: String,
|
name: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<LinuxTapeDrive, Error> {
|
) -> Result<LtoTapeDrive, Error> {
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let data: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ pub fn get_config(
|
|||||||
description: "The list of configured drives (with config digest).",
|
description: "The list of configured drives (with config digest).",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
@ -135,13 +135,13 @@ pub fn get_config(
|
|||||||
pub fn list_drives(
|
pub fn list_drives(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
) -> Result<Vec<LtoTapeDrive>, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
let drive_list = drive_list
|
let drive_list = drive_list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -176,7 +176,7 @@ pub enum DeletableProperty {
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
@ -225,7 +225,7 @@ pub fn update_drive(
|
|||||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let mut data: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
|
|
||||||
if let Some(delete) = delete {
|
if let Some(delete) = delete {
|
||||||
for delete_prop in delete {
|
for delete_prop in delete {
|
||||||
@ -240,8 +240,8 @@ pub fn update_drive(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(path) = path {
|
if let Some(path) = path {
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
check_drive_path(&linux_drives, &path)?;
|
check_drive_path(<o_drives, &path)?;
|
||||||
data.path = path;
|
data.path = path;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ pub fn update_drive(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&name, "linux", &data)?;
|
config.set_data(&name, "lto", &data)?;
|
||||||
|
|
||||||
config::drive::save_config(&config)?;
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
@ -290,8 +290,8 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
match config.sections.get(&name) {
|
match config.sections.get(&name) {
|
||||||
Some((section_type, _)) => {
|
Some((section_type, _)) => {
|
||||||
if section_type != "linux" {
|
if section_type != "lto" {
|
||||||
bail!("Entry '{}' exists, but is not a linux tape drive", name);
|
bail!("Entry '{}' exists, but is not a lto tape drive", name);
|
||||||
}
|
}
|
||||||
config.sections.remove(&name);
|
config.sections.remove(&name);
|
||||||
},
|
},
|
||||||
|
@ -48,7 +48,7 @@ pub fn list_dir_content<R: Read + Seek>(
|
|||||||
let mut components = path.clone();
|
let mut components = path.clone();
|
||||||
components.push(b'/');
|
components.push(b'/');
|
||||||
components.extend(&direntry.name);
|
components.extend(&direntry.name);
|
||||||
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
|
let mut entry = ArchiveEntry::new(&components, Some(&direntry.attr));
|
||||||
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
||||||
entry.size = size.into();
|
entry.size = size.into();
|
||||||
entry.mtime = mtime.into();
|
entry.mtime = mtime.into();
|
||||||
|
@ -7,7 +7,7 @@ use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
|||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools::{apt, http, subscription};
|
use crate::tools::{apt, http::SimpleHttp, subscription};
|
||||||
|
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||||
@ -194,10 +194,12 @@ fn apt_get_changelog(
|
|||||||
bail!("Package '{}' not found", name);
|
bail!("Package '{}' not found", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut client = SimpleHttp::new(None); // TODO: pass proxy_config
|
||||||
|
|
||||||
let changelog_url = &pkg_info[0].change_log_url;
|
let changelog_url = &pkg_info[0].change_log_url;
|
||||||
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
||||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, None))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
Ok(json!(changelog))
|
Ok(json!(changelog))
|
||||||
|
|
||||||
@ -221,7 +223,7 @@ fn apt_get_changelog(
|
|||||||
auth_header.insert("Authorization".to_owned(),
|
auth_header.insert("Authorization".to_owned(),
|
||||||
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
|
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
|
||||||
|
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
Ok(json!(changelog))
|
Ok(json!(changelog))
|
||||||
|
|
||||||
|
@ -32,9 +32,6 @@ use crate::api2::types::{NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid};
|
|||||||
pub fn check_subscription(
|
pub fn check_subscription(
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
|
|
||||||
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
|
|
||||||
|
|
||||||
let info = match subscription::read_subscription() {
|
let info = match subscription::read_subscription() {
|
||||||
Err(err) => bail!("could not read subscription status: {}", err),
|
Err(err) => bail!("could not read subscription status: {}", err),
|
||||||
Ok(Some(info)) => info,
|
Ok(Some(info)) => info,
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::{Mutex, Arc};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
|
try_block,
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
@ -33,6 +34,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
server::{
|
server::{
|
||||||
lookup_user_email,
|
lookup_user_email,
|
||||||
|
TapeBackupJobSummary,
|
||||||
jobstate::{
|
jobstate::{
|
||||||
Job,
|
Job,
|
||||||
JobState,
|
JobState,
|
||||||
@ -176,8 +178,15 @@ pub fn do_tape_backup_job(
|
|||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
// early check/lock before starting worker
|
// for scheduled jobs we acquire the lock later in the worker
|
||||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
let drive_lock = if schedule.is_some() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||||
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
@ -185,26 +194,40 @@ pub fn do_tape_backup_job(
|
|||||||
auth_id.clone(),
|
auth_id.clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
|
||||||
|
|
||||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
let mut drive_lock = drive_lock;
|
||||||
|
|
||||||
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
let (job_result, summary) = match try_block!({
|
||||||
if let Some(event_str) = schedule {
|
if schedule.is_some() {
|
||||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
// for scheduled tape backup jobs, we wait indefinitely for the lock
|
||||||
}
|
task_log!(worker, "waiting for drive lock...");
|
||||||
|
loop {
|
||||||
|
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
|
||||||
|
drive_lock = Some(lock);
|
||||||
|
break;
|
||||||
|
} // ignore errors
|
||||||
|
|
||||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
worker.check_abort()?;
|
||||||
let email = lookup_user_email(notify_user);
|
}
|
||||||
|
}
|
||||||
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
let job_result = backup_worker(
|
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||||
&worker,
|
if let Some(event_str) = schedule {
|
||||||
datastore,
|
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||||
&pool_config,
|
}
|
||||||
&setup,
|
|
||||||
email.clone(),
|
backup_worker(
|
||||||
);
|
&worker,
|
||||||
|
datastore,
|
||||||
|
&pool_config,
|
||||||
|
&setup,
|
||||||
|
email.clone(),
|
||||||
|
)
|
||||||
|
}) {
|
||||||
|
Ok(summary) => (Ok(()), summary),
|
||||||
|
Err(err) => (Err(err), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
let status = worker.create_state(&job_result);
|
let status = worker.create_state(&job_result);
|
||||||
|
|
||||||
@ -214,6 +237,7 @@ pub fn do_tape_backup_job(
|
|||||||
Some(job.jobname()),
|
Some(job.jobname()),
|
||||||
&setup,
|
&setup,
|
||||||
&job_result,
|
&job_result,
|
||||||
|
summary,
|
||||||
) {
|
) {
|
||||||
eprintln!("send tape backup notification failed: {}", err);
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
}
|
}
|
||||||
@ -340,13 +364,17 @@ pub fn backup(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
let _drive_lock = drive_lock; // keep lock guard
|
||||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
let job_result = backup_worker(
|
|
||||||
|
let (job_result, summary) = match backup_worker(
|
||||||
&worker,
|
&worker,
|
||||||
datastore,
|
datastore,
|
||||||
&pool_config,
|
&pool_config,
|
||||||
&setup,
|
&setup,
|
||||||
email.clone(),
|
email.clone(),
|
||||||
);
|
) {
|
||||||
|
Ok(summary) => (Ok(()), summary),
|
||||||
|
Err(err) => (Err(err), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
if let Err(err) = crate::server::send_tape_backup_status(
|
if let Err(err) = crate::server::send_tape_backup_status(
|
||||||
@ -354,6 +382,7 @@ pub fn backup(
|
|||||||
None,
|
None,
|
||||||
&setup,
|
&setup,
|
||||||
&job_result,
|
&job_result,
|
||||||
|
summary,
|
||||||
) {
|
) {
|
||||||
eprintln!("send tape backup notification failed: {}", err);
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
}
|
}
|
||||||
@ -374,16 +403,16 @@ fn backup_worker(
|
|||||||
pool_config: &MediaPoolConfig,
|
pool_config: &MediaPoolConfig,
|
||||||
setup: &TapeBackupJobSetup,
|
setup: &TapeBackupJobSetup,
|
||||||
email: Option<String>,
|
email: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<TapeBackupJobSummary, Error> {
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let start = std::time::Instant::now();
|
||||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
let mut summary: TapeBackupJobSummary = Default::default();
|
||||||
|
|
||||||
task_log!(worker, "update media online status");
|
task_log!(worker, "update media online status");
|
||||||
let changer_name = update_media_online_status(&setup.drive)?;
|
let changer_name = update_media_online_status(&setup.drive)?;
|
||||||
|
|
||||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||||
|
|
||||||
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
||||||
|
|
||||||
@ -402,26 +431,42 @@ fn backup_worker(
|
|||||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let datastore_name = datastore.name();
|
||||||
|
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
|
|
||||||
|
let mut need_catalog = false; // avoid writing catalog for empty jobs
|
||||||
|
|
||||||
for (group_number, group) in group_list.into_iter().enumerate() {
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||||
progress.done_groups = group_number as u64;
|
progress.done_groups = group_number as u64;
|
||||||
progress.done_snapshots = 0;
|
progress.done_snapshots = 0;
|
||||||
progress.group_snapshots = 0;
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
let snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
|
// filter out unfinished backups
|
||||||
|
let mut snapshot_list = snapshot_list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|item| item.is_finished())
|
||||||
|
.collect();
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||||
|
|
||||||
if latest_only {
|
if latest_only {
|
||||||
progress.group_snapshots = 1;
|
progress.group_snapshots = 1;
|
||||||
if let Some(info) = snapshot_list.pop() {
|
if let Some(info) = snapshot_list.pop() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
errors = true;
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
}
|
}
|
||||||
progress.done_snapshots = 1;
|
progress.done_snapshots = 1;
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -433,12 +478,18 @@ fn backup_worker(
|
|||||||
} else {
|
} else {
|
||||||
progress.group_snapshots = snapshot_list.len() as u64;
|
progress.group_snapshots = snapshot_list.len() as u64;
|
||||||
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
errors = true;
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
}
|
}
|
||||||
progress.done_snapshots = snapshot_number as u64 + 1;
|
progress.done_snapshots = snapshot_number as u64 + 1;
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -452,6 +503,22 @@ fn backup_worker(
|
|||||||
|
|
||||||
pool_writer.commit()?;
|
pool_writer.commit()?;
|
||||||
|
|
||||||
|
if need_catalog {
|
||||||
|
task_log!(worker, "append media catalog");
|
||||||
|
|
||||||
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
task_log!(worker, "catalog does not fit on tape, writing to next volume");
|
||||||
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
bail!("write_catalog_archive failed on second media");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if setup.export_media_set.unwrap_or(false) {
|
if setup.export_media_set.unwrap_or(false) {
|
||||||
pool_writer.export_media_set(worker)?;
|
pool_writer.export_media_set(worker)?;
|
||||||
} else if setup.eject_media.unwrap_or(false) {
|
} else if setup.eject_media.unwrap_or(false) {
|
||||||
@ -462,7 +529,9 @@ fn backup_worker(
|
|||||||
bail!("Tape backup finished with some errors. Please check the task log.");
|
bail!("Tape backup finished with some errors. Please check the task log.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
summary.duration = start.elapsed();
|
||||||
|
|
||||||
|
Ok(summary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to update the the media online status
|
// Try to update the the media online status
|
||||||
@ -508,33 +577,48 @@ pub fn backup_snapshot(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
|
||||||
|
|
||||||
|
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
|
||||||
|
datastore.clone(),
|
||||||
|
snapshot_reader.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut chunk_iter = chunk_iter.peekable();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
// test is we have remaining chunks
|
// test is we have remaining chunks
|
||||||
if chunk_iter.peek().is_none() {
|
match chunk_iter.peek() {
|
||||||
break;
|
None => break,
|
||||||
|
Some(Ok(_)) => { /* Ok */ },
|
||||||
|
Some(Err(err)) => bail!("{}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||||
|
|
||||||
if leom {
|
if leom {
|
||||||
pool_writer.set_media_status_full(&uuid)?;
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Err(_) = reader_thread.join() {
|
||||||
|
bail!("chunk reader thread failed");
|
||||||
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
|
|
||||||
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
if !done {
|
if !done {
|
||||||
|
@ -20,7 +20,7 @@ use crate::{
|
|||||||
Authid,
|
Authid,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
ChangerListEntry,
|
ChangerListEntry,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
MtxEntryKind,
|
MtxEntryKind,
|
||||||
MtxStatusEntry,
|
MtxStatusEntry,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
@ -88,7 +88,7 @@ pub async fn get_status(
|
|||||||
|
|
||||||
inventory.update_online_status(&map)?;
|
inventory.update_online_status(&map)?;
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
||||||
|
|
||||||
for drive in drive_list {
|
for drive in drive_list {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::panic::UnwindSafe;
|
use std::panic::UnwindSafe;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -10,7 +11,6 @@ use proxmox::{
|
|||||||
identity,
|
identity,
|
||||||
list_subdirs_api_method,
|
list_subdirs_api_method,
|
||||||
tools::Uuid,
|
tools::Uuid,
|
||||||
sys::error::SysError,
|
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
section_config::SectionConfigData,
|
section_config::SectionConfigData,
|
||||||
@ -42,22 +42,29 @@ use crate::{
|
|||||||
MEDIA_POOL_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
Authid,
|
Authid,
|
||||||
DriveListEntry,
|
DriveListEntry,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
MediaIdFlat,
|
MediaIdFlat,
|
||||||
LabelUuidMap,
|
LabelUuidMap,
|
||||||
MamAttribute,
|
MamAttribute,
|
||||||
LinuxDriveAndMediaStatus,
|
LtoDriveAndMediaStatus,
|
||||||
|
Lp17VolumeStatistics,
|
||||||
|
},
|
||||||
|
tape::restore::{
|
||||||
|
fast_catalog_restore,
|
||||||
|
restore_media,
|
||||||
},
|
},
|
||||||
tape::restore::restore_media,
|
|
||||||
},
|
},
|
||||||
server::WorkerTask,
|
server::WorkerTask,
|
||||||
tape::{
|
tape::{
|
||||||
TAPE_STATUS_DIR,
|
TAPE_STATUS_DIR,
|
||||||
MediaPool,
|
|
||||||
Inventory,
|
Inventory,
|
||||||
MediaCatalog,
|
MediaCatalog,
|
||||||
MediaId,
|
MediaId,
|
||||||
linux_tape_device_list,
|
BlockReadError,
|
||||||
|
lock_media_set,
|
||||||
|
lock_media_pool,
|
||||||
|
lock_unassigned_media_pool,
|
||||||
|
lto_tape_device_list,
|
||||||
lookup_device_identification,
|
lookup_device_identification,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
MediaLabel,
|
MediaLabel,
|
||||||
@ -65,9 +72,8 @@ use crate::{
|
|||||||
},
|
},
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
LinuxTapeHandle,
|
LtoTapeHandle,
|
||||||
Lp17VolumeStatistics,
|
open_lto_tape_device,
|
||||||
open_linux_tape_device,
|
|
||||||
media_changer,
|
media_changer,
|
||||||
required_media_changer,
|
required_media_changer,
|
||||||
open_drive,
|
open_drive,
|
||||||
@ -316,8 +322,8 @@ pub fn unload(
|
|||||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Erase media. Check for label-text if given (cancels if wrong media).
|
/// Format media. Check for label-text if given (cancels if wrong media).
|
||||||
pub fn erase_media(
|
pub fn format_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
fast: Option<bool>,
|
fast: Option<bool>,
|
||||||
label_text: Option<String>,
|
label_text: Option<String>,
|
||||||
@ -326,7 +332,7 @@ pub fn erase_media(
|
|||||||
let upid_str = run_drive_worker(
|
let upid_str = run_drive_worker(
|
||||||
rpcenv,
|
rpcenv,
|
||||||
drive.clone(),
|
drive.clone(),
|
||||||
"erase-media",
|
"format-media",
|
||||||
Some(drive.clone()),
|
Some(drive.clone()),
|
||||||
move |worker, config| {
|
move |worker, config| {
|
||||||
if let Some(ref label) = label_text {
|
if let Some(ref label) = label_text {
|
||||||
@ -345,15 +351,15 @@ pub fn erase_media(
|
|||||||
}
|
}
|
||||||
/* assume drive contains no or unrelated data */
|
/* assume drive contains no or unrelated data */
|
||||||
task_log!(worker, "unable to read media label: {}", err);
|
task_log!(worker, "unable to read media label: {}", err);
|
||||||
task_log!(worker, "erase anyways");
|
task_log!(worker, "format anyways");
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
Ok((None, _)) => {
|
Ok((None, _)) => {
|
||||||
if let Some(label) = label_text {
|
if let Some(label) = label_text {
|
||||||
bail!("expected label '{}', found empty tape", label);
|
bail!("expected label '{}', found empty tape", label);
|
||||||
}
|
}
|
||||||
task_log!(worker, "found empty media - erase anyways");
|
task_log!(worker, "found empty media - format anyways");
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
Ok((Some(media_id), _key_config)) => {
|
Ok((Some(media_id), _key_config)) => {
|
||||||
if let Some(label_text) = label_text {
|
if let Some(label_text) = label_text {
|
||||||
@ -373,11 +379,20 @@ pub fn erase_media(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
let mut inventory = Inventory::new(status_path);
|
||||||
|
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
inventory.remove_media(&media_id.label.uuid)?;
|
let _pool_lock = lock_media_pool(status_path, pool)?;
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
let _media_set_lock = lock_media_set(status_path, uuid, None)?;
|
||||||
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.remove_media(&media_id.label.uuid)?;
|
||||||
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.remove_media(&media_id.label.uuid)?;
|
||||||
|
};
|
||||||
|
|
||||||
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,7 +504,7 @@ pub fn eject_media(
|
|||||||
/// Write a new media label to the media in 'drive'. The media is
|
/// Write a new media label to the media in 'drive'. The media is
|
||||||
/// assigned to the specified 'pool', or else to the free media pool.
|
/// assigned to the specified 'pool', or else to the free media pool.
|
||||||
///
|
///
|
||||||
/// Note: The media need to be empty (you may want to erase it first).
|
/// Note: The media need to be empty (you may want to format it first).
|
||||||
pub fn label_media(
|
pub fn label_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
pool: Option<String>,
|
pool: Option<String>,
|
||||||
@ -514,14 +529,11 @@ pub fn label_media(
|
|||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
match drive.read_next_file() {
|
match drive.read_next_file() {
|
||||||
Ok(Some(_file)) => bail!("media is not empty (erase first)"),
|
Ok(_reader) => bail!("media is not empty (format it first)"),
|
||||||
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
|
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
|
||||||
|
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
|
bail!("media read error - {}", err);
|
||||||
/* assume tape is empty */
|
|
||||||
} else {
|
|
||||||
bail!("media read error - {}", err);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,28 +560,37 @@ fn write_media_label(
|
|||||||
|
|
||||||
drive.label_tape(&label)?;
|
drive.label_tape(&label)?;
|
||||||
|
|
||||||
let mut media_set_label = None;
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
if let Some(ref pool) = pool {
|
let media_id = if let Some(ref pool) = pool {
|
||||||
// assign media to pool by writing special media set label
|
// assign media to pool by writing special media set label
|
||||||
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
|
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
|
||||||
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||||
|
|
||||||
drive.write_media_set_label(&set, None)?;
|
drive.write_media_set_label(&set, None)?;
|
||||||
media_set_label = Some(set);
|
|
||||||
|
let media_id = MediaId { label, media_set_label: Some(set) };
|
||||||
|
|
||||||
|
// Create the media catalog
|
||||||
|
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
||||||
|
|
||||||
|
let mut inventory = Inventory::new(status_path);
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
|
media_id
|
||||||
} else {
|
} else {
|
||||||
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
|
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
|
||||||
}
|
|
||||||
|
|
||||||
let media_id = MediaId { label, media_set_label };
|
let media_id = MediaId { label, media_set_label: None };
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
// Create the media catalog
|
||||||
|
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
||||||
|
|
||||||
// Create the media catalog
|
let mut inventory = Inventory::new(status_path);
|
||||||
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
media_id
|
||||||
inventory.store(media_id.clone(), false)?;
|
};
|
||||||
|
|
||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
@ -705,14 +726,24 @@ pub async fn read_label(
|
|||||||
|
|
||||||
if let Err(err) = drive.set_encryption(encrypt_fingerprint) {
|
if let Err(err) = drive.set_encryption(encrypt_fingerprint) {
|
||||||
// try, but ignore errors. just log to stderr
|
// try, but ignore errors. just log to stderr
|
||||||
eprintln!("uable to load encryption key: {}", err);
|
eprintln!("unable to load encryption key: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(true) = inventorize {
|
if let Some(true) = inventorize {
|
||||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
let mut inventory = Inventory::load(state_path)?;
|
let mut inventory = Inventory::new(state_path);
|
||||||
inventory.store(media_id, false)?;
|
|
||||||
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
|
let _pool_lock = lock_media_pool(state_path, pool)?;
|
||||||
|
let _lock = lock_media_set(state_path, uuid, None)?;
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(state_path)?;
|
||||||
|
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
flat
|
flat
|
||||||
@ -760,9 +791,9 @@ pub fn clean_drive(
|
|||||||
|
|
||||||
changer.clean_drive()?;
|
changer.clean_drive()?;
|
||||||
|
|
||||||
if let Ok(drive_config) = config.lookup::<LinuxTapeDrive>("linux", &drive) {
|
if let Ok(drive_config) = config.lookup::<LtoTapeDrive>("lto", &drive) {
|
||||||
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
|
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
|
||||||
let mut handle = LinuxTapeHandle::new(open_linux_tape_device(&drive_config.path)?);
|
let mut handle = LtoTapeHandle::new(open_lto_tape_device(&drive_config.path)?)?;
|
||||||
|
|
||||||
// test for critical tape alert flags
|
// test for critical tape alert flags
|
||||||
if let Ok(alert_flags) = handle.tape_alert_flags() {
|
if let Ok(alert_flags) = handle.tape_alert_flags() {
|
||||||
@ -947,7 +978,17 @@ pub fn update_inventory(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
||||||
inventory.store(media_id, false)?;
|
|
||||||
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
|
let _pool_lock = lock_media_pool(state_path, pool)?;
|
||||||
|
let _lock = lock_media_set(state_path, uuid, None)?;
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(state_path)?;
|
||||||
|
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
changer.unload_media(None)?;
|
changer.unload_media(None)?;
|
||||||
@ -1047,18 +1088,15 @@ fn barcode_label_media_worker(
|
|||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
match drive.read_next_file() {
|
match drive.read_next_file() {
|
||||||
Ok(Some(_file)) => {
|
Ok(_reader) => {
|
||||||
worker.log(format!("media '{}' is not empty (erase first)", label_text));
|
worker.log(format!("media '{}' is not empty (format it first)", label_text));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
|
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
|
||||||
Err(err) => {
|
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
|
||||||
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
|
Err(_err) => {
|
||||||
/* assume tape is empty */
|
worker.warn(format!("media '{}' read error (maybe not empty - format it first)", label_text));
|
||||||
} else {
|
continue;
|
||||||
worker.warn(format!("media '{}' read error (maybe not empty - erase first)", label_text));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1100,7 +1138,7 @@ pub async fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error>
|
|||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading cartridge memory".to_string(),
|
"reading cartridge memory".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
let mut handle = drive_config.open()?;
|
let mut handle = drive_config.open()?;
|
||||||
|
|
||||||
handle.cartridge_memory()
|
handle.cartridge_memory()
|
||||||
@ -1130,7 +1168,7 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
|
|||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading volume statistics".to_string(),
|
"reading volume statistics".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
let mut handle = drive_config.open()?;
|
let mut handle = drive_config.open()?;
|
||||||
|
|
||||||
handle.volume_statistics()
|
handle.volume_statistics()
|
||||||
@ -1148,24 +1186,24 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: LinuxDriveAndMediaStatus,
|
type: LtoDriveAndMediaStatus,
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Get drive/media status
|
/// Get drive/media status
|
||||||
pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
pub async fn status(drive: String) -> Result<LtoDriveAndMediaStatus, Error> {
|
||||||
run_drive_blocking_task(
|
run_drive_blocking_task(
|
||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading drive status".to_string(),
|
"reading drive status".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
|
|
||||||
// Note: use open_linux_tape_device, because this also works if no medium loaded
|
// Note: use open_lto_tape_device, because this also works if no medium loaded
|
||||||
let file = open_linux_tape_device(&drive_config.path)?;
|
let file = open_lto_tape_device(&drive_config.path)?;
|
||||||
|
|
||||||
let mut handle = LinuxTapeHandle::new(file);
|
let mut handle = LtoTapeHandle::new(file)?;
|
||||||
|
|
||||||
handle.get_drive_and_media_status()
|
handle.get_drive_and_media_status()
|
||||||
}
|
}
|
||||||
@ -1184,6 +1222,11 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
|||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
scan: {
|
||||||
|
description: "Re-read the whole tape to reconstruct the catalog instead of restoring saved versions.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
verbose: {
|
verbose: {
|
||||||
description: "Verbose mode - log all found chunks.",
|
description: "Verbose mode - log all found chunks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
@ -1202,11 +1245,13 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
|||||||
pub fn catalog_media(
|
pub fn catalog_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
force: Option<bool>,
|
force: Option<bool>,
|
||||||
|
scan: Option<bool>,
|
||||||
verbose: Option<bool>,
|
verbose: Option<bool>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let verbose = verbose.unwrap_or(false);
|
let verbose = verbose.unwrap_or(false);
|
||||||
let force = force.unwrap_or(false);
|
let force = force.unwrap_or(false);
|
||||||
|
let scan = scan.unwrap_or(false);
|
||||||
|
|
||||||
let upid_str = run_drive_worker(
|
let upid_str = run_drive_worker(
|
||||||
rpcenv,
|
rpcenv,
|
||||||
@ -1237,19 +1282,22 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
let mut inventory = Inventory::new(status_path);
|
||||||
inventory.store(media_id.clone(), false)?;
|
|
||||||
|
|
||||||
let pool = match media_id.media_set_label {
|
let (_media_set_lock, media_set_uuid) = match media_id.media_set_label {
|
||||||
None => {
|
None => {
|
||||||
worker.log("media is empty");
|
worker.log("media is empty");
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Some(ref set) => {
|
Some(ref set) => {
|
||||||
if set.uuid.as_ref() == [0u8;16] { // media is empty
|
if set.uuid.as_ref() == [0u8;16] { // media is empty
|
||||||
worker.log("media is empty");
|
worker.log("media is empty");
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
|
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
|
||||||
@ -1257,17 +1305,38 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
drive.set_encryption(encrypt_fingerprint)?;
|
drive.set_encryption(encrypt_fingerprint)?;
|
||||||
|
|
||||||
set.pool.clone()
|
let _pool_lock = lock_media_pool(status_path, &set.pool)?;
|
||||||
|
let media_set_lock = lock_media_set(status_path, &set.uuid, None)?;
|
||||||
|
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(status_path, &media_id)?;
|
||||||
|
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
|
(media_set_lock, &set.uuid)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
|
||||||
|
|
||||||
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
||||||
bail!("media catalog exists (please use --force to overwrite)");
|
bail!("media catalog exists (please use --force to overwrite)");
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
if !scan {
|
||||||
|
let media_set = inventory.compute_media_set_members(media_set_uuid)?;
|
||||||
|
|
||||||
|
if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? {
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
task_log!(worker, "no catalog found");
|
||||||
|
}
|
||||||
|
|
||||||
|
task_log!(worker, "scanning entire media to reconstruct catalog");
|
||||||
|
|
||||||
|
drive.rewind()?;
|
||||||
|
drive.read_label()?; // skip over labels - we already read them above
|
||||||
|
|
||||||
|
let mut checked_chunks = HashMap::new();
|
||||||
|
restore_media(&worker, &mut drive, &media_id, None, &mut checked_chunks, verbose)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
@ -1308,9 +1377,9 @@ pub fn list_drives(
|
|||||||
|
|
||||||
let (config, _) = config::drive::config()?;
|
let (config, _) = config::drive::config()?;
|
||||||
|
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
@ -1324,7 +1393,7 @@ pub fn list_drives(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let info = lookup_device_identification(&linux_drives, &drive.path);
|
let info = lookup_device_identification(<o_drives, &drive.path);
|
||||||
let state = get_tape_device_state(&config, &drive.name)?;
|
let state = get_tape_device_state(&config, &drive.name)?;
|
||||||
let entry = DriveListEntry { config: drive, info, state };
|
let entry = DriveListEntry { config: drive, info, state };
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
@ -1356,9 +1425,9 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
|||||||
.post(&API_METHOD_EJECT_MEDIA)
|
.post(&API_METHOD_EJECT_MEDIA)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"erase-media",
|
"format-media",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
.post(&API_METHOD_ERASE_MEDIA)
|
.post(&API_METHOD_FORMAT_MEDIA)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"export-media",
|
"export-media",
|
||||||
|
@ -122,7 +122,7 @@ pub async fn list_media(
|
|||||||
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||||
|
|
||||||
let changer_name = None; // assume standalone drive
|
let changer_name = None; // assume standalone drive
|
||||||
let mut pool = MediaPool::with_config(status_path, &config, changer_name)?;
|
let mut pool = MediaPool::with_config(status_path, &config, changer_name, true)?;
|
||||||
|
|
||||||
let current_time = proxmox::tools::time::epoch_i64();
|
let current_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
@ -432,29 +432,32 @@ pub fn list_content(
|
|||||||
.generate_media_set_name(&set.uuid, template)
|
.generate_media_set_name(&set.uuid, template)
|
||||||
.unwrap_or_else(|_| set.uuid.to_string());
|
.unwrap_or_else(|_| set.uuid.to_string());
|
||||||
|
|
||||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
let catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
||||||
|
|
||||||
for snapshot in catalog.snapshot_index().keys() {
|
for (store, content) in catalog.content() {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
for snapshot in content.snapshot_index.keys() {
|
||||||
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(ref backup_type) = filter.backup_type {
|
if let Some(ref backup_type) = filter.backup_type {
|
||||||
if backup_dir.group().backup_type() != backup_type { continue; }
|
if backup_dir.group().backup_type() != backup_type { continue; }
|
||||||
|
}
|
||||||
|
if let Some(ref backup_id) = filter.backup_id {
|
||||||
|
if backup_dir.group().backup_id() != backup_id { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(MediaContentEntry {
|
||||||
|
uuid: media_id.label.uuid.clone(),
|
||||||
|
label_text: media_id.label.label_text.to_string(),
|
||||||
|
pool: set.pool.clone(),
|
||||||
|
media_set_name: media_set_name.clone(),
|
||||||
|
media_set_uuid: set.uuid.clone(),
|
||||||
|
media_set_ctime: set.ctime,
|
||||||
|
seq_nr: set.seq_nr,
|
||||||
|
snapshot: snapshot.to_owned(),
|
||||||
|
store: store.to_owned(),
|
||||||
|
backup_time: backup_dir.backup_time(),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
if let Some(ref backup_id) = filter.backup_id {
|
|
||||||
if backup_dir.group().backup_id() != backup_id { continue; }
|
|
||||||
}
|
|
||||||
|
|
||||||
list.push(MediaContentEntry {
|
|
||||||
uuid: media_id.label.uuid.clone(),
|
|
||||||
label_text: media_id.label.label_text.to_string(),
|
|
||||||
pool: set.pool.clone(),
|
|
||||||
media_set_name: media_set_name.clone(),
|
|
||||||
media_set_uuid: set.uuid.clone(),
|
|
||||||
media_set_ctime: set.ctime,
|
|
||||||
seq_nr: set.seq_nr,
|
|
||||||
snapshot: snapshot.to_owned(),
|
|
||||||
backup_time: backup_dir.backup_time(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ use proxmox::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
api2::types::TapeDeviceInfo,
|
api2::types::TapeDeviceInfo,
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_device_list,
|
lto_tape_device_list,
|
||||||
linux_tape_changer_list,
|
linux_tape_changer_list,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -41,7 +41,7 @@ pub mod restore;
|
|||||||
/// Scan tape drives
|
/// Scan tape drives
|
||||||
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
||||||
|
|
||||||
let list = linux_tape_device_list();
|
let list = lto_tape_device_list();
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
use std::io::{Seek, SeekFrom};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -12,6 +15,7 @@ use proxmox::{
|
|||||||
RpcEnvironmentType,
|
RpcEnvironmentType,
|
||||||
Router,
|
Router,
|
||||||
Permission,
|
Permission,
|
||||||
|
schema::parse_property_string,
|
||||||
section_config::SectionConfigData,
|
section_config::SectionConfigData,
|
||||||
},
|
},
|
||||||
tools::{
|
tools::{
|
||||||
@ -26,10 +30,12 @@ use proxmox::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
task_log,
|
task_log,
|
||||||
|
task_warn,
|
||||||
task::TaskState,
|
task::TaskState,
|
||||||
tools::compute_file_csum,
|
tools::compute_file_csum,
|
||||||
api2::types::{
|
api2::types::{
|
||||||
DATASTORE_SCHEMA,
|
DATASTORE_MAP_ARRAY_SCHEMA,
|
||||||
|
DATASTORE_MAP_LIST_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
UPID_SCHEMA,
|
UPID_SCHEMA,
|
||||||
Authid,
|
Authid,
|
||||||
@ -40,6 +46,7 @@ use crate::{
|
|||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
acl::{
|
acl::{
|
||||||
PRIV_DATASTORE_BACKUP,
|
PRIV_DATASTORE_BACKUP,
|
||||||
|
PRIV_DATASTORE_MODIFY,
|
||||||
PRIV_TAPE_READ,
|
PRIV_TAPE_READ,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -63,18 +70,26 @@ use crate::{
|
|||||||
tape::{
|
tape::{
|
||||||
TAPE_STATUS_DIR,
|
TAPE_STATUS_DIR,
|
||||||
TapeRead,
|
TapeRead,
|
||||||
|
BlockReadError,
|
||||||
MediaId,
|
MediaId,
|
||||||
|
MediaSet,
|
||||||
MediaCatalog,
|
MediaCatalog,
|
||||||
MediaPool,
|
|
||||||
Inventory,
|
Inventory,
|
||||||
|
lock_media_set,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||||
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||||
|
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
ChunkArchiveHeader,
|
||||||
ChunkArchiveDecoder,
|
ChunkArchiveDecoder,
|
||||||
|
SnapshotArchiveHeader,
|
||||||
|
CatalogArchiveHeader,
|
||||||
},
|
},
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
@ -85,14 +100,75 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub struct DataStoreMap {
|
||||||
.post(&API_METHOD_RESTORE);
|
map: HashMap<String, Arc<DataStore>>,
|
||||||
|
default: Option<Arc<DataStore>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for DataStoreMap {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: String) -> Result<Self, Error> {
|
||||||
|
let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?;
|
||||||
|
let mut mapping: Vec<String> = value
|
||||||
|
.as_array()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|v| v.as_str().unwrap().to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
let mut default = None;
|
||||||
|
while let Some(mut store) = mapping.pop() {
|
||||||
|
if let Some(index) = store.find('=') {
|
||||||
|
let mut target = store.split_off(index);
|
||||||
|
target.remove(0); // remove '='
|
||||||
|
let datastore = DataStore::lookup_datastore(&target)?;
|
||||||
|
map.insert(store, datastore);
|
||||||
|
} else if default.is_none() {
|
||||||
|
default = Some(DataStore::lookup_datastore(&store)?);
|
||||||
|
} else {
|
||||||
|
bail!("multiple default stores given");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { map, default })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DataStoreMap {
|
||||||
|
fn used_datastores<'a>(&self) -> HashSet<&str> {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
for store in self.map.values() {
|
||||||
|
set.insert(store.name());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref store) = self.default {
|
||||||
|
set.insert(store.name());
|
||||||
|
}
|
||||||
|
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_datastore(&self, source: &str) -> Option<&DataStore> {
|
||||||
|
if let Some(store) = self.map.get(source) {
|
||||||
|
return Some(&store);
|
||||||
|
}
|
||||||
|
if let Some(ref store) = self.default {
|
||||||
|
return Some(&store);
|
||||||
|
}
|
||||||
|
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_MAP_LIST_SCHEMA,
|
||||||
},
|
},
|
||||||
drive: {
|
drive: {
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
@ -105,6 +181,10 @@ pub const ROUTER: Router = Router::new()
|
|||||||
type: Userid,
|
type: Userid,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -123,15 +203,34 @@ pub fn restore(
|
|||||||
drive: String,
|
drive: String,
|
||||||
media_set: String,
|
media_set: String,
|
||||||
notify_user: Option<Userid>,
|
notify_user: Option<Userid>,
|
||||||
|
owner: Option<Authid>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
let store_map = DataStoreMap::try_from(store)
|
||||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
.map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
|
||||||
bail!("no permissions on /datastore/{}", store);
|
let used_datastores = store_map.used_datastores();
|
||||||
|
if used_datastores.len() == 0 {
|
||||||
|
bail!("no datastores given");
|
||||||
|
}
|
||||||
|
|
||||||
|
for store in used_datastores.iter() {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||||
|
bail!("no permissions on /datastore/{}", store);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref owner) = owner {
|
||||||
|
let correct_owner = owner == &auth_id
|
||||||
|
|| (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
|
||||||
|
|
||||||
|
// same permission as changing ownership after syncing
|
||||||
|
if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
|
||||||
|
bail!("no permission to restore as '{}'", owner);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||||
@ -139,11 +238,14 @@ pub fn restore(
|
|||||||
bail!("no permissions on /tape/drive/{}", drive);
|
bail!("no permissions on /tape/drive/{}", drive);
|
||||||
}
|
}
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
|
||||||
let inventory = Inventory::load(status_path)?;
|
|
||||||
|
|
||||||
let media_set_uuid = media_set.parse()?;
|
let media_set_uuid = media_set.parse()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
|
||||||
|
|
||||||
|
let inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
|
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
|
||||||
@ -151,8 +253,6 @@ pub fn restore(
|
|||||||
bail!("no permissions on /tape/pool/{}", pool);
|
bail!("no permissions on /tape/pool/{}", pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
|
||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
// early check/lock before starting worker
|
// early check/lock before starting worker
|
||||||
@ -160,9 +260,14 @@ pub fn restore(
|
|||||||
|
|
||||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
|
let taskid = used_datastores
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", ");
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"tape-restore",
|
"tape-restore",
|
||||||
Some(store.clone()),
|
Some(taskid),
|
||||||
auth_id.clone(),
|
auth_id.clone(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
@ -170,8 +275,6 @@ pub fn restore(
|
|||||||
|
|
||||||
set_tape_device_state(&drive, &worker.upid().to_string())?;
|
set_tape_device_state(&drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
|
||||||
|
|
||||||
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
||||||
|
|
||||||
let media_list = members.media_list();
|
let media_list = members.media_list();
|
||||||
@ -202,7 +305,17 @@ pub fn restore(
|
|||||||
task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
|
task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
|
||||||
}
|
}
|
||||||
task_log!(worker, "Pool: {}", pool);
|
task_log!(worker, "Pool: {}", pool);
|
||||||
task_log!(worker, "Datastore: {}", store);
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"Datastore(s): {}",
|
||||||
|
store_map
|
||||||
|
.used_datastores()
|
||||||
|
.into_iter()
|
||||||
|
.map(String::from)
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join(", "),
|
||||||
|
);
|
||||||
|
|
||||||
task_log!(worker, "Drive: {}", drive);
|
task_log!(worker, "Drive: {}", drive);
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
@ -213,18 +326,33 @@ pub fn restore(
|
|||||||
.join(";")
|
.join(";")
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let mut datastore_locks = Vec::new();
|
||||||
|
for store_name in store_map.used_datastores() {
|
||||||
|
// explicit create shared lock to prevent GC on newly created chunks
|
||||||
|
if let Some(store) = store_map.get_datastore(store_name) {
|
||||||
|
let shared_store_lock = store.try_shared_chunk_store_lock()?;
|
||||||
|
datastore_locks.push(shared_store_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut checked_chunks_map = HashMap::new();
|
||||||
|
|
||||||
for media_id in media_id_list.iter() {
|
for media_id in media_id_list.iter() {
|
||||||
request_and_restore_media(
|
request_and_restore_media(
|
||||||
&worker,
|
&worker,
|
||||||
media_id,
|
media_id,
|
||||||
&drive_config,
|
&drive_config,
|
||||||
&drive,
|
&drive,
|
||||||
&datastore,
|
&store_map,
|
||||||
|
&mut checked_chunks_map,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
¬ify_user,
|
¬ify_user,
|
||||||
|
&owner,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drop(datastore_locks);
|
||||||
|
|
||||||
task_log!(worker, "Restore mediaset '{}' done", media_set);
|
task_log!(worker, "Restore mediaset '{}' done", media_set);
|
||||||
|
|
||||||
if let Err(err) = set_tape_device_state(&drive, "") {
|
if let Err(err) = set_tape_device_state(&drive, "") {
|
||||||
@ -249,11 +377,12 @@ pub fn request_and_restore_media(
|
|||||||
media_id: &MediaId,
|
media_id: &MediaId,
|
||||||
drive_config: &SectionConfigData,
|
drive_config: &SectionConfigData,
|
||||||
drive_name: &str,
|
drive_name: &str,
|
||||||
datastore: &DataStore,
|
store_map: &DataStoreMap,
|
||||||
|
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
|
||||||
authid: &Authid,
|
authid: &Authid,
|
||||||
notify_user: &Option<Userid>,
|
notify_user: &Option<Userid>,
|
||||||
|
owner: &Option<Authid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let media_set_uuid = match media_id.media_set_label {
|
let media_set_uuid = match media_id.media_set_label {
|
||||||
None => bail!("restore_media: no media set - internal error"),
|
None => bail!("restore_media: no media set - internal error"),
|
||||||
Some(ref set) => &set.uuid,
|
Some(ref set) => &set.uuid,
|
||||||
@ -284,7 +413,16 @@ pub fn request_and_restore_media(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
|
let restore_owner = owner.as_ref().unwrap_or(authid);
|
||||||
|
|
||||||
|
restore_media(
|
||||||
|
worker,
|
||||||
|
&mut drive,
|
||||||
|
&info,
|
||||||
|
Some((&store_map, restore_owner)),
|
||||||
|
checked_chunks_map,
|
||||||
|
false,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restore complete media content and catalog
|
/// Restore complete media content and catalog
|
||||||
@ -294,7 +432,8 @@ pub fn restore_media(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
drive: &mut Box<dyn TapeDriver>,
|
drive: &mut Box<dyn TapeDriver>,
|
||||||
media_id: &MediaId,
|
media_id: &MediaId,
|
||||||
target: Option<(&DataStore, &Authid)>,
|
target: Option<(&DataStoreMap, &Authid)>,
|
||||||
|
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
@ -303,15 +442,22 @@ pub fn restore_media(
|
|||||||
|
|
||||||
loop {
|
loop {
|
||||||
let current_file_number = drive.current_file_number()?;
|
let current_file_number = drive.current_file_number()?;
|
||||||
let reader = match drive.read_next_file()? {
|
let reader = match drive.read_next_file() {
|
||||||
None => {
|
Err(BlockReadError::EndOfFile) => {
|
||||||
|
task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(BlockReadError::EndOfStream) => {
|
||||||
task_log!(worker, "detected EOT after {} files", current_file_number);
|
task_log!(worker, "detected EOT after {} files", current_file_number);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Some(reader) => reader,
|
Err(BlockReadError::Error(err)) => {
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
|
Ok(reader) => reader,
|
||||||
};
|
};
|
||||||
|
|
||||||
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
|
restore_archive(worker, reader, current_file_number, target, &mut catalog, checked_chunks_map, verbose)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
|
MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
|
||||||
@ -323,11 +469,11 @@ fn restore_archive<'a>(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
mut reader: Box<dyn 'a + TapeRead>,
|
mut reader: Box<dyn 'a + TapeRead>,
|
||||||
current_file_number: u64,
|
current_file_number: u64,
|
||||||
target: Option<(&DataStore, &Authid)>,
|
target: Option<(&DataStoreMap, &Authid)>,
|
||||||
catalog: &mut MediaCatalog,
|
catalog: &mut MediaCatalog,
|
||||||
|
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
|
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
|
||||||
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
||||||
bail!("missing MediaContentHeader");
|
bail!("missing MediaContentHeader");
|
||||||
@ -340,67 +486,129 @@ fn restore_archive<'a>(
|
|||||||
bail!("unexpected content magic (label)");
|
bail!("unexpected content magic (label)");
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
||||||
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
bail!("unexpected snapshot archive version (v1.0)");
|
||||||
let snapshot = std::str::from_utf8(&snapshot)
|
}
|
||||||
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
|
||||||
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
|
||||||
|
|
||||||
|
let datastore_name = archive_header.store;
|
||||||
|
let snapshot = archive_header.snapshot;
|
||||||
|
|
||||||
|
let checked_chunks = checked_chunks_map.entry(datastore_name.clone()).or_insert(HashSet::new());
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
|
||||||
|
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some((datastore, authid)) = target.as_ref() {
|
if let Some((store_map, authid)) = target.as_ref() {
|
||||||
|
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
|
||||||
let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
|
let (owner, _group_lock) =
|
||||||
if *authid != &owner { // only the owner is allowed to create additional snapshots
|
datastore.create_locked_backup_group(backup_dir.group(), authid)?;
|
||||||
bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
|
if *authid != &owner {
|
||||||
}
|
// only the owner is allowed to create additional snapshots
|
||||||
|
bail!(
|
||||||
let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
|
"restore '{}' failed - owner check failed ({} != {})",
|
||||||
let mut path = datastore.base_path();
|
snapshot,
|
||||||
path.push(rel_path);
|
authid,
|
||||||
|
owner
|
||||||
if is_new {
|
);
|
||||||
task_log!(worker, "restore snapshot {}", backup_dir);
|
|
||||||
|
|
||||||
match restore_snapshot_archive(worker, reader, &path) {
|
|
||||||
Err(err) => {
|
|
||||||
std::fs::remove_dir_all(&path)?;
|
|
||||||
bail!("restore snapshot {} failed - {}", backup_dir, err);
|
|
||||||
}
|
|
||||||
Ok(false) => {
|
|
||||||
std::fs::remove_dir_all(&path)?;
|
|
||||||
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
|
||||||
}
|
|
||||||
Ok(true) => {
|
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
|
||||||
catalog.commit_if_large()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return Ok(());
|
|
||||||
|
let (rel_path, is_new, _snap_lock) =
|
||||||
|
datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
|
let mut path = datastore.base_path();
|
||||||
|
path.push(rel_path);
|
||||||
|
|
||||||
|
if is_new {
|
||||||
|
task_log!(worker, "restore snapshot {}", backup_dir);
|
||||||
|
|
||||||
|
match restore_snapshot_archive(worker, reader, &path, &datastore, checked_chunks) {
|
||||||
|
Err(err) => {
|
||||||
|
std::fs::remove_dir_all(&path)?;
|
||||||
|
bail!("restore snapshot {} failed - {}", backup_dir, err);
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
std::fs::remove_dir_all(&path)?;
|
||||||
|
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
||||||
|
}
|
||||||
|
Ok(true) => {
|
||||||
|
catalog.register_snapshot(
|
||||||
|
Uuid::from(header.uuid),
|
||||||
|
current_file_number,
|
||||||
|
&datastore_name,
|
||||||
|
&snapshot,
|
||||||
|
)?;
|
||||||
|
catalog.commit_if_large()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
task_log!(worker, "skipping...");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reader.skip_to_end()?; // read all data
|
reader.skip_data()?; // read all data
|
||||||
if let Ok(false) = reader.is_incomplete() {
|
if let Ok(false) = reader.is_incomplete() {
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||||
catalog.commit_if_large()?;
|
catalog.commit_if_large()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
bail!("unexpected chunk archive version (v1.0)");
|
||||||
task_log!(worker, "Found chunk archive: {}", current_file_number);
|
|
||||||
let datastore = target.as_ref().map(|t| t.0);
|
|
||||||
|
|
||||||
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
|
||||||
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
|
|
||||||
for digest in chunks.iter() {
|
|
||||||
catalog.register_chunk(&digest)?;
|
|
||||||
}
|
|
||||||
task_log!(worker, "register {} chunks", chunks.len());
|
|
||||||
catalog.end_chunk_archive()?;
|
|
||||||
catalog.commit_if_large()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => bail!("unknown content magic {:?}", header.content_magic),
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
|
||||||
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
|
||||||
|
|
||||||
|
let source_datastore = archive_header.store;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
|
||||||
|
let datastore = target
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| t.0.get_datastore(&source_datastore));
|
||||||
|
|
||||||
|
if datastore.is_some() || target.is_none() {
|
||||||
|
let checked_chunks = checked_chunks_map
|
||||||
|
.entry(datastore.map(|d| d.name()).unwrap_or("_unused_").to_string())
|
||||||
|
.or_insert(HashSet::new());
|
||||||
|
|
||||||
|
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, checked_chunks, verbose)? {
|
||||||
|
catalog.start_chunk_archive(
|
||||||
|
Uuid::from(header.uuid),
|
||||||
|
current_file_number,
|
||||||
|
&source_datastore,
|
||||||
|
)?;
|
||||||
|
for digest in chunks.iter() {
|
||||||
|
catalog.register_chunk(&digest)?;
|
||||||
|
}
|
||||||
|
task_log!(worker, "register {} chunks", chunks.len());
|
||||||
|
catalog.end_chunk_archive()?;
|
||||||
|
catalog.commit_if_large()?;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
} else if target.is_some() {
|
||||||
|
task_log!(worker, "skipping...");
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.skip_data()?; // read all data
|
||||||
|
}
|
||||||
|
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid);
|
||||||
|
|
||||||
|
reader.skip_data()?; // read all data
|
||||||
|
}
|
||||||
|
_ => bail!("unknown content magic {:?}", header.content_magic),
|
||||||
}
|
}
|
||||||
|
|
||||||
catalog.commit()?;
|
catalog.commit()?;
|
||||||
@ -412,71 +620,74 @@ fn restore_chunk_archive<'a>(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: Box<dyn 'a + TapeRead>,
|
reader: Box<dyn 'a + TapeRead>,
|
||||||
datastore: Option<&DataStore>,
|
datastore: Option<&DataStore>,
|
||||||
|
checked_chunks: &mut HashSet<[u8;32]>,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> Result<Option<Vec<[u8;32]>>, Error> {
|
) -> Result<Option<Vec<[u8;32]>>, Error> {
|
||||||
|
|
||||||
let mut chunks = Vec::new();
|
let mut chunks = Vec::new();
|
||||||
|
|
||||||
let mut decoder = ChunkArchiveDecoder::new(reader);
|
let mut decoder = ChunkArchiveDecoder::new(reader);
|
||||||
|
|
||||||
let result: Result<_, Error> = proxmox::try_block!({
|
loop {
|
||||||
while let Some((digest, blob)) = decoder.next_chunk()? {
|
let (digest, blob) = match decoder.next_chunk() {
|
||||||
|
Ok(Some((digest, blob))) => (digest, blob),
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(err) => {
|
||||||
|
let reader = decoder.reader();
|
||||||
|
|
||||||
worker.check_abort()?;
|
// check if this stream is marked incomplete
|
||||||
|
if let Ok(true) = reader.is_incomplete() {
|
||||||
if let Some(datastore) = datastore {
|
return Ok(Some(chunks));
|
||||||
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
|
||||||
if !chunk_exists {
|
|
||||||
blob.verify_crc()?;
|
|
||||||
|
|
||||||
if blob.crypt_mode()? == CryptMode::None {
|
|
||||||
blob.decode(None, Some(&digest))?; // verify digest
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
|
||||||
}
|
|
||||||
datastore.insert_chunk(&blob, &digest)?;
|
|
||||||
} else if verbose {
|
|
||||||
task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if this is an aborted stream without end marker
|
||||||
|
if let Ok(false) = reader.has_end_marker() {
|
||||||
|
worker.log("missing stream end marker".to_string());
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// else the archive is corrupt
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
if let Some(datastore) = datastore {
|
||||||
|
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||||
|
if !chunk_exists {
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
|
if blob.crypt_mode()? == CryptMode::None {
|
||||||
|
blob.decode(None, Some(&digest))?; // verify digest
|
||||||
|
}
|
||||||
|
if verbose {
|
||||||
|
task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||||
|
}
|
||||||
|
datastore.insert_chunk(&blob, &digest)?;
|
||||||
} else if verbose {
|
} else if verbose {
|
||||||
task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||||
}
|
}
|
||||||
chunks.push(digest);
|
checked_chunks.insert(digest.clone());
|
||||||
}
|
} else if verbose {
|
||||||
Ok(())
|
task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
|
||||||
});
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(()) => Ok(Some(chunks)),
|
|
||||||
Err(err) => {
|
|
||||||
let reader = decoder.reader();
|
|
||||||
|
|
||||||
// check if this stream is marked incomplete
|
|
||||||
if let Ok(true) = reader.is_incomplete() {
|
|
||||||
return Ok(Some(chunks));
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this is an aborted stream without end marker
|
|
||||||
if let Ok(false) = reader.has_end_marker() {
|
|
||||||
worker.log("missing stream end marker".to_string());
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
// else the archive is corrupt
|
|
||||||
Err(err)
|
|
||||||
}
|
}
|
||||||
|
chunks.push(digest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(Some(chunks))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_snapshot_archive<'a>(
|
fn restore_snapshot_archive<'a>(
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: Box<dyn 'a + TapeRead>,
|
reader: Box<dyn 'a + TapeRead>,
|
||||||
snapshot_path: &Path,
|
snapshot_path: &Path,
|
||||||
|
datastore: &DataStore,
|
||||||
|
checked_chunks: &mut HashSet<[u8;32]>,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
||||||
match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
|
match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path, datastore, checked_chunks) {
|
||||||
Ok(()) => Ok(true),
|
Ok(()) => Ok(true),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let reader = decoder.input();
|
let reader = decoder.input();
|
||||||
@ -501,6 +712,8 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
||||||
snapshot_path: &Path,
|
snapshot_path: &Path,
|
||||||
|
datastore: &DataStore,
|
||||||
|
checked_chunks: &mut HashSet<[u8;32]>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _root = match decoder.next() {
|
let _root = match decoder.next() {
|
||||||
@ -591,11 +804,13 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
|||||||
let index = DynamicIndexReader::open(&archive_path)?;
|
let index = DynamicIndexReader::open(&archive_path)?;
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(&item.filename, &csum, size)?;
|
manifest.verify_file(&item.filename, &csum, size)?;
|
||||||
|
datastore.fast_index_verification(&index, checked_chunks)?;
|
||||||
}
|
}
|
||||||
ArchiveType::FixedIndex => {
|
ArchiveType::FixedIndex => {
|
||||||
let index = FixedIndexReader::open(&archive_path)?;
|
let index = FixedIndexReader::open(&archive_path)?;
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(&item.filename, &csum, size)?;
|
manifest.verify_file(&item.filename, &csum, size)?;
|
||||||
|
datastore.fast_index_verification(&index, checked_chunks)?;
|
||||||
}
|
}
|
||||||
ArchiveType::Blob => {
|
ArchiveType::Blob => {
|
||||||
let mut tmpfile = std::fs::File::open(&archive_path)?;
|
let mut tmpfile = std::fs::File::open(&archive_path)?;
|
||||||
@ -617,3 +832,144 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try to restore media catalogs (form catalog_archives)
|
||||||
|
pub fn fast_catalog_restore(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
drive: &mut Box<dyn TapeDriver>,
|
||||||
|
media_set: &MediaSet,
|
||||||
|
uuid: &Uuid, // current media Uuid
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
let current_file_number = drive.current_file_number()?;
|
||||||
|
if current_file_number != 2 {
|
||||||
|
bail!("fast_catalog_restore: wrong media position - internal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut found_catalog = false;
|
||||||
|
|
||||||
|
let mut moved_to_eom = false;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let current_file_number = drive.current_file_number()?;
|
||||||
|
|
||||||
|
{ // limit reader scope
|
||||||
|
let mut reader = match drive.read_next_file() {
|
||||||
|
Err(BlockReadError::EndOfFile) => {
|
||||||
|
task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(BlockReadError::EndOfStream) => {
|
||||||
|
task_log!(worker, "detected EOT after {} files", current_file_number);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(BlockReadError::Error(err)) => {
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
|
Ok(reader) => reader,
|
||||||
|
};
|
||||||
|
|
||||||
|
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
|
||||||
|
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
||||||
|
bail!("missing MediaContentHeader");
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
|
||||||
|
task_log!(worker, "found catalog at pos {}", current_file_number);
|
||||||
|
|
||||||
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
|
||||||
|
|
||||||
|
if &archive_header.media_set_uuid != media_set.uuid() {
|
||||||
|
task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number);
|
||||||
|
reader.skip_data()?; // read all data
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let catalog_uuid = &archive_header.uuid;
|
||||||
|
|
||||||
|
let wanted = media_set
|
||||||
|
.media_list()
|
||||||
|
.iter()
|
||||||
|
.find(|e| {
|
||||||
|
match e {
|
||||||
|
None => false,
|
||||||
|
Some(uuid) => uuid == catalog_uuid,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if !wanted {
|
||||||
|
task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid);
|
||||||
|
reader.skip_data()?; // read all data
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if catalog_uuid == uuid {
|
||||||
|
// always restore and overwrite catalog
|
||||||
|
} else {
|
||||||
|
// only restore if catalog does not exist
|
||||||
|
if MediaCatalog::exists(status_path, catalog_uuid) {
|
||||||
|
task_log!(worker, "catalog for media '{}' already exists", catalog_uuid);
|
||||||
|
reader.skip_data()?; // read all data
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut file)?;
|
||||||
|
|
||||||
|
file.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
|
match MediaCatalog::parse_catalog_header(&mut file)? {
|
||||||
|
(true, Some(media_uuid), Some(media_set_uuid)) => {
|
||||||
|
if &media_uuid != catalog_uuid {
|
||||||
|
task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if media_set_uuid != archive_header.media_set_uuid {
|
||||||
|
task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
|
||||||
|
|
||||||
|
if catalog_uuid == uuid {
|
||||||
|
task_log!(worker, "successfully restored catalog");
|
||||||
|
found_catalog = true
|
||||||
|
} else {
|
||||||
|
task_log!(worker, "successfully restored related catalog {}", media_uuid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
task_warn!(worker, "got incomplete catalog header - skip file");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if moved_to_eom {
|
||||||
|
break; // already done - stop
|
||||||
|
}
|
||||||
|
moved_to_eom = true;
|
||||||
|
|
||||||
|
task_log!(worker, "searching for catalog at EOT (moving to EOT)");
|
||||||
|
drive.move_to_last_file()?;
|
||||||
|
|
||||||
|
let new_file_number = drive.current_file_number()?;
|
||||||
|
|
||||||
|
if new_file_number < (current_file_number + 1) {
|
||||||
|
break; // no new content - stop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(found_catalog)
|
||||||
|
}
|
||||||
|
15
src/api2/types/file_restore.rs
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// General status information about a running VM file-restore daemon
|
||||||
|
pub struct RestoreDaemonStatus {
|
||||||
|
/// VM uptime in seconds
|
||||||
|
pub uptime: i64,
|
||||||
|
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||||
|
/// not set, as then the status call will have reset the timer before returning the value
|
||||||
|
pub timeout: i64,
|
||||||
|
}
|
||||||
|
|
@ -34,6 +34,9 @@ pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GRO
|
|||||||
mod tape;
|
mod tape;
|
||||||
pub use tape::*;
|
pub use tape::*;
|
||||||
|
|
||||||
|
mod file_restore;
|
||||||
|
pub use file_restore::*;
|
||||||
|
|
||||||
// File names: may not contain slashes, may not start with "."
|
// File names: may not contain slashes, may not start with "."
|
||||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||||
if name.starts_with('.') {
|
if name.starts_with('.') {
|
||||||
@ -99,6 +102,8 @@ const_regex!{
|
|||||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
|
||||||
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||||
|
|
||||||
|
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -164,6 +169,9 @@ pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
|||||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
|
||||||
|
|
||||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||||
.format(&PASSWORD_FORMAT)
|
.format(&PASSWORD_FORMAT)
|
||||||
.min_length(1)
|
.min_length(1)
|
||||||
@ -356,6 +364,25 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
|
||||||
|
.format(&DATASTORE_MAP_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(65)
|
||||||
|
.type_text("(<source>=)?<target>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of Datastore mappings (or single datastore), comma separated. \
|
||||||
|
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
|
||||||
|
all other sources to the default 'e'. If no default is given, only the \
|
||||||
|
specified sources are mapped.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||||
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||||
.format(&UUID_FORMAT)
|
.format(&UUID_FORMAT)
|
||||||
@ -1327,19 +1354,22 @@ pub struct ArchiveEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ArchiveEntry {
|
impl ArchiveEntry {
|
||||||
pub fn new(filepath: &[u8], entry_type: &DirEntryAttribute) -> Self {
|
pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
filepath: base64::encode(filepath),
|
filepath: base64::encode(filepath),
|
||||||
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
||||||
.to_string(),
|
.to_string(),
|
||||||
entry_type: CatalogEntryType::from(entry_type).to_string(),
|
entry_type: match entry_type {
|
||||||
leaf: !matches!(entry_type, DirEntryAttribute::Directory { .. }),
|
Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
|
||||||
|
None => "v".to_owned(),
|
||||||
|
},
|
||||||
|
leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
|
||||||
size: match entry_type {
|
size: match entry_type {
|
||||||
DirEntryAttribute::File { size, .. } => Some(*size),
|
Some(DirEntryAttribute::File { size, .. }) => Some(*size),
|
||||||
_ => None
|
_ => None
|
||||||
},
|
},
|
||||||
mtime: match entry_type {
|
mtime: match entry_type {
|
||||||
DirEntryAttribute::File { mtime, .. } => Some(*mtime),
|
Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
|
||||||
_ => None
|
_ => None
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,8 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const LINUX_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
"The path to a LINUX non-rewinding SCSI tape device (i.e. '/dev/nst0')")
|
"The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
||||||
@ -57,7 +57,7 @@ pub struct VirtualTapeDrive {
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
@ -71,8 +71,8 @@ pub struct VirtualTapeDrive {
|
|||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Linux SCSI tape driver
|
/// Lto SCSI tape driver
|
||||||
pub struct LinuxTapeDrive {
|
pub struct LtoTapeDrive {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
@ -84,7 +84,7 @@ pub struct LinuxTapeDrive {
|
|||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
config: {
|
config: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
info: {
|
info: {
|
||||||
type: OptionalDeviceIdentification,
|
type: OptionalDeviceIdentification,
|
||||||
@ -96,7 +96,7 @@ pub struct LinuxTapeDrive {
|
|||||||
/// Drive list entry
|
/// Drive list entry
|
||||||
pub struct DriveListEntry {
|
pub struct DriveListEntry {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub config: LinuxTapeDrive,
|
pub config: LtoTapeDrive,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub info: OptionalDeviceIdentification,
|
pub info: OptionalDeviceIdentification,
|
||||||
/// the state of the drive if locked
|
/// the state of the drive if locked
|
||||||
@ -119,6 +119,8 @@ pub struct MamAttribute {
|
|||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
||||||
pub enum TapeDensity {
|
pub enum TapeDensity {
|
||||||
|
/// Unknown (no media loaded)
|
||||||
|
Unknown,
|
||||||
/// LTO1
|
/// LTO1
|
||||||
LTO1,
|
LTO1,
|
||||||
/// LTO2
|
/// LTO2
|
||||||
@ -144,6 +146,7 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
|
|
||||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
let density = match value {
|
let density = match value {
|
||||||
|
0x00 => TapeDensity::Unknown,
|
||||||
0x40 => TapeDensity::LTO1,
|
0x40 => TapeDensity::LTO1,
|
||||||
0x42 => TapeDensity::LTO2,
|
0x42 => TapeDensity::LTO2,
|
||||||
0x44 => TapeDensity::LTO3,
|
0x44 => TapeDensity::LTO3,
|
||||||
@ -169,29 +172,37 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Drive/Media status for Linux SCSI drives.
|
/// Drive/Media status for Lto SCSI drives.
|
||||||
///
|
///
|
||||||
/// Media related data is optional - only set if there is a medium
|
/// Media related data is optional - only set if there is a medium
|
||||||
/// loaded.
|
/// loaded.
|
||||||
pub struct LinuxDriveAndMediaStatus {
|
pub struct LtoDriveAndMediaStatus {
|
||||||
|
/// Vendor
|
||||||
|
pub vendor: String,
|
||||||
|
/// Product
|
||||||
|
pub product: String,
|
||||||
|
/// Revision
|
||||||
|
pub revision: String,
|
||||||
/// Block size (0 is variable size)
|
/// Block size (0 is variable size)
|
||||||
pub blocksize: u32,
|
pub blocksize: u32,
|
||||||
|
/// Compression enabled
|
||||||
|
pub compression: bool,
|
||||||
|
/// Drive buffer mode
|
||||||
|
pub buffer_mode: u8,
|
||||||
/// Tape density
|
/// Tape density
|
||||||
|
pub density: TapeDensity,
|
||||||
|
/// Media is write protected
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub density: Option<TapeDensity>,
|
pub write_protect: Option<bool>,
|
||||||
/// Status flags
|
|
||||||
pub status: String,
|
|
||||||
/// Linux Driver Options
|
|
||||||
pub options: String,
|
|
||||||
/// Tape Alert Flags
|
/// Tape Alert Flags
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub alert_flags: Option<String>,
|
pub alert_flags: Option<String>,
|
||||||
/// Current file number
|
/// Current file number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub file_number: Option<u32>,
|
pub file_number: Option<u64>,
|
||||||
/// Current block number
|
/// Current block number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub block_number: Option<u32>,
|
pub block_number: Option<u64>,
|
||||||
/// Medium Manufacture Date (epoch)
|
/// Medium Manufacture Date (epoch)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub manufactured: Option<i64>,
|
pub manufactured: Option<i64>,
|
||||||
@ -212,3 +223,62 @@ pub struct LinuxDriveAndMediaStatus {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub medium_wearout: Option<f64>,
|
pub medium_wearout: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
/// Volume statistics from SCSI log page 17h
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct Lp17VolumeStatistics {
|
||||||
|
/// Volume mounts (thread count)
|
||||||
|
pub volume_mounts: u64,
|
||||||
|
/// Total data sets written
|
||||||
|
pub volume_datasets_written: u64,
|
||||||
|
/// Write retries
|
||||||
|
pub volume_recovered_write_data_errors: u64,
|
||||||
|
/// Total unrecovered write errors
|
||||||
|
pub volume_unrecovered_write_data_errors: u64,
|
||||||
|
/// Total suspended writes
|
||||||
|
pub volume_write_servo_errors: u64,
|
||||||
|
/// Total fatal suspended writes
|
||||||
|
pub volume_unrecovered_write_servo_errors: u64,
|
||||||
|
/// Total datasets read
|
||||||
|
pub volume_datasets_read: u64,
|
||||||
|
/// Total read retries
|
||||||
|
pub volume_recovered_read_errors: u64,
|
||||||
|
/// Total unrecovered read errors
|
||||||
|
pub volume_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount unrecovered write errors
|
||||||
|
pub last_mount_unrecovered_write_errors: u64,
|
||||||
|
/// Last mount unrecovered read errors
|
||||||
|
pub last_mount_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount bytes written
|
||||||
|
pub last_mount_bytes_written: u64,
|
||||||
|
/// Last mount bytes read
|
||||||
|
pub last_mount_bytes_read: u64,
|
||||||
|
/// Lifetime bytes written
|
||||||
|
pub lifetime_bytes_written: u64,
|
||||||
|
/// Lifetime bytes read
|
||||||
|
pub lifetime_bytes_read: u64,
|
||||||
|
/// Last load write compression ratio
|
||||||
|
pub last_load_write_compression_ratio: u64,
|
||||||
|
/// Last load read compression ratio
|
||||||
|
pub last_load_read_compression_ratio: u64,
|
||||||
|
/// Medium mount time
|
||||||
|
pub medium_mount_time: u64,
|
||||||
|
/// Medium ready time
|
||||||
|
pub medium_ready_time: u64,
|
||||||
|
/// Total native capacity
|
||||||
|
pub total_native_capacity: u64,
|
||||||
|
/// Total used native capacity
|
||||||
|
pub total_used_native_capacity: u64,
|
||||||
|
/// Write protect
|
||||||
|
pub write_protect: bool,
|
||||||
|
/// Volume is WORM
|
||||||
|
pub worm: bool,
|
||||||
|
/// Beginning of medium passes
|
||||||
|
pub beginning_of_medium_passes: u64,
|
||||||
|
/// Middle of medium passes
|
||||||
|
pub middle_of_tape_passes: u64,
|
||||||
|
/// Volume serial number
|
||||||
|
pub serial: String,
|
||||||
|
}
|
||||||
|
@ -144,6 +144,8 @@ pub struct MediaContentEntry {
|
|||||||
pub seq_nr: u64,
|
pub seq_nr: u64,
|
||||||
/// Media Pool
|
/// Media Pool
|
||||||
pub pool: String,
|
pub pool: String,
|
||||||
|
/// Datastore Name
|
||||||
|
pub store: String,
|
||||||
/// Backup snapshot
|
/// Backup snapshot
|
||||||
pub snapshot: String,
|
pub snapshot: String,
|
||||||
/// Snapshot creation time (epoch)
|
/// Snapshot creation time (epoch)
|
||||||
|
24
src/auth.rs
@ -14,6 +14,7 @@ use crate::api2::types::{Userid, UsernameRef, RealmRef};
|
|||||||
pub trait ProxmoxAuthenticator {
|
pub trait ProxmoxAuthenticator {
|
||||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
|
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PAM();
|
pub struct PAM();
|
||||||
@ -60,6 +61,11 @@ impl ProxmoxAuthenticator for PAM {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// do not remove password for pam users
|
||||||
|
fn remove_password(&self, _username: &UsernameRef) -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PBS();
|
pub struct PBS();
|
||||||
@ -132,6 +138,24 @@ impl ProxmoxAuthenticator for PBS {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error> {
|
||||||
|
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
|
if let Some(map) = data.as_object_mut() {
|
||||||
|
map.remove(username.as_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(nix::unistd::Gid::from_raw(0));
|
||||||
|
|
||||||
|
let data = serde_json::to_vec_pretty(&data)?;
|
||||||
|
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup the autenticator for the specified realm
|
/// Lookup the autenticator for the specified realm
|
||||||
|
@ -3,17 +3,29 @@ use crate::tools;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::RawFd;
|
||||||
|
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
|
|
||||||
use super::manifest::MANIFEST_BLOB_NAME;
|
use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
macro_rules! BACKUP_ID_RE {
|
||||||
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
() => {
|
||||||
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
|
r"[A-Za-z0-9_][A-Za-z0-9._\-]*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
macro_rules! BACKUP_TYPE_RE {
|
||||||
|
() => {
|
||||||
|
r"(?:host|vm|ct)"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
macro_rules! BACKUP_TIME_RE {
|
||||||
|
() => {
|
||||||
|
r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
const_regex!{
|
const_regex! {
|
||||||
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||||
|
|
||||||
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||||
@ -38,7 +50,6 @@ pub struct BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::cmp::Ord for BackupGroup {
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||||
if type_order != std::cmp::Ordering::Equal {
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
@ -51,7 +62,7 @@ impl std::cmp::Ord for BackupGroup {
|
|||||||
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||||
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||||
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||||
_ => self.backup_id.cmp(&other.backup_id),
|
_ => self.backup_id.cmp(&other.backup_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -63,9 +74,11 @@ impl std::cmp::PartialOrd for BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
|
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||||
Self { backup_type: backup_type.into(), backup_id: backup_id.into() }
|
Self {
|
||||||
|
backup_type: backup_type.into(),
|
||||||
|
backup_id: backup_id.into(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_type(&self) -> &str {
|
pub fn backup_type(&self) -> &str {
|
||||||
@ -76,8 +89,7 @@ impl BackupGroup {
|
|||||||
&self.backup_id
|
&self.backup_id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn group_path(&self) -> PathBuf {
|
pub fn group_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = PathBuf::new();
|
let mut relative_path = PathBuf::new();
|
||||||
|
|
||||||
relative_path.push(&self.backup_type);
|
relative_path.push(&self.backup_type);
|
||||||
@ -88,60 +100,82 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
||||||
|
|
||||||
let mut list = vec![];
|
let mut list = vec![];
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
|
&BACKUP_DATE_REGEX,
|
||||||
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
let backup_dir =
|
||||||
let files = list_backup_files(l2_fd, backup_time)?;
|
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||||
|
let files = list_backup_files(l2_fd, backup_time)?;
|
||||||
|
|
||||||
list.push(BackupInfo { backup_dir, files });
|
list.push(BackupInfo { backup_dir, files });
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||||
|
|
||||||
let mut last = None;
|
let mut last = None;
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
let mut manifest_path = PathBuf::from(backup_time);
|
&BACKUP_DATE_REGEX,
|
||||||
manifest_path.push(MANIFEST_BLOB_NAME);
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
use nix::fcntl::{openat, OFlag};
|
return Ok(());
|
||||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
|
||||||
Ok(rawfd) => {
|
|
||||||
/* manifest exists --> assume backup was successful */
|
|
||||||
/* close else this leaks! */
|
|
||||||
nix::unistd::close(rawfd)?;
|
|
||||||
},
|
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
|
||||||
Err(err) => {
|
|
||||||
bail!("last_successful_backup: unexpected error - {}", err);
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
let mut manifest_path = PathBuf::from(backup_time);
|
||||||
if let Some(last_timestamp) = last {
|
manifest_path.push(MANIFEST_BLOB_NAME);
|
||||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
|
||||||
} else {
|
|
||||||
last = Some(timestamp);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
use nix::fcntl::{openat, OFlag};
|
||||||
})?;
|
match openat(
|
||||||
|
l2_fd,
|
||||||
|
&manifest_path,
|
||||||
|
OFlag::O_RDONLY,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
) {
|
||||||
|
Ok(rawfd) => {
|
||||||
|
/* manifest exists --> assume backup was successful */
|
||||||
|
/* close else this leaks! */
|
||||||
|
nix::unistd::close(rawfd)?;
|
||||||
|
}
|
||||||
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
bail!("last_successful_backup: unexpected error - {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||||
|
if let Some(last_timestamp) = last {
|
||||||
|
if timestamp > last_timestamp {
|
||||||
|
last = Some(timestamp);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
last = Some(timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(last)
|
Ok(last)
|
||||||
}
|
}
|
||||||
@ -162,7 +196,8 @@ impl std::str::FromStr for BackupGroup {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `vm/100".
|
/// This parses strings like `vm/100".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = GROUP_PATH_REGEX.captures(path)
|
let cap = GROUP_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -182,11 +217,10 @@ pub struct BackupDir {
|
|||||||
/// Backup timestamp
|
/// Backup timestamp
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
// backup_time as rfc3339
|
// backup_time as rfc3339
|
||||||
backup_time_string: String
|
backup_time_string: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupDir {
|
impl BackupDir {
|
||||||
|
|
||||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
@ -196,21 +230,33 @@ impl BackupDir {
|
|||||||
BackupDir::with_group(group, backup_time)
|
BackupDir::with_group(group, backup_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
|
pub fn with_rfc3339<T, U, V>(
|
||||||
|
backup_type: T,
|
||||||
|
backup_id: U,
|
||||||
|
backup_time_string: V,
|
||||||
|
) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
U: Into<String>,
|
U: Into<String>,
|
||||||
V: Into<String>,
|
V: Into<String>,
|
||||||
{
|
{
|
||||||
let backup_time_string = backup_time_string.into();
|
let backup_time_string = backup_time_string.into();
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn group(&self) -> &BackupGroup {
|
pub fn group(&self) -> &BackupGroup {
|
||||||
@ -225,8 +271,7 @@ impl BackupDir {
|
|||||||
&self.backup_time_string
|
&self.backup_time_string
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_path(&self) -> PathBuf {
|
pub fn relative_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = self.group.group_path();
|
let mut relative_path = self.group.group_path();
|
||||||
|
|
||||||
relative_path.push(self.backup_time_string.clone());
|
relative_path.push(self.backup_time_string.clone());
|
||||||
@ -247,7 +292,8 @@ impl std::str::FromStr for BackupDir {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
let cap = SNAPSHOT_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
BackupDir::with_rfc3339(
|
BackupDir::with_rfc3339(
|
||||||
@ -276,7 +322,6 @@ pub struct BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupInfo {
|
impl BackupInfo {
|
||||||
|
|
||||||
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
@ -287,19 +332,24 @@ impl BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finds the latest backup inside a backup group
|
/// Finds the latest backup inside a backup group
|
||||||
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
pub fn last_backup(
|
||||||
-> Result<Option<BackupInfo>, Error>
|
base_path: &Path,
|
||||||
{
|
group: &BackupGroup,
|
||||||
|
only_finished: bool,
|
||||||
|
) -> Result<Option<BackupInfo>, Error> {
|
||||||
let backups = group.list_backups(base_path)?;
|
let backups = group.list_backups(base_path)?;
|
||||||
Ok(backups.into_iter()
|
Ok(backups
|
||||||
|
.into_iter()
|
||||||
.filter(|item| !only_finished || item.is_finished())
|
.filter(|item| !only_finished || item.is_finished())
|
||||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||||
if ascendending { // oldest first
|
if ascendending {
|
||||||
|
// oldest first
|
||||||
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
||||||
} else { // newest first
|
} else {
|
||||||
|
// newest first
|
||||||
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -316,31 +366,52 @@ impl BackupInfo {
|
|||||||
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |_, backup_id, file_type| {
|
base_path,
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
&BACKUP_TYPE_REGEX,
|
||||||
|
|l0_fd, backup_type, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
tools::scandir(
|
||||||
|
l0_fd,
|
||||||
|
backup_type,
|
||||||
|
&BACKUP_ID_REGEX,
|
||||||
|
|_, backup_id, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
list.push(BackupGroup::new(backup_type, backup_id));
|
list.push(BackupGroup::new(backup_type, backup_id));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
},
|
||||||
})?;
|
)
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
// backup is considered unfinished if there is no manifest
|
// backup is considered unfinished if there is no manifest
|
||||||
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
self.files
|
||||||
|
.iter()
|
||||||
|
.any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
fn list_backup_files<P: ?Sized + nix::NixPath>(
|
||||||
|
dirfd: RawFd,
|
||||||
|
path: &P,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut files = vec![];
|
let mut files = vec![];
|
||||||
|
|
||||||
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
||||||
if file_type != nix::dir::Type::File { return Ok(()); }
|
if file_type != nix::dir::Type::File {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
files.push(filename.to_owned());
|
files.push(filename.to_owned());
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
|
@ -153,6 +153,34 @@ impl DataStore {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fast index verification - only check if chunks exists
|
||||||
|
pub fn fast_index_verification(
|
||||||
|
&self,
|
||||||
|
index: &dyn IndexFile,
|
||||||
|
checked: &mut HashSet<[u8;32]>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
if checked.contains(&info.digest) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stat_chunk(&info.digest).
|
||||||
|
map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"fast_index_verification error, stat_chunk {} failed - {}",
|
||||||
|
proxmox::tools::digest_to_hex(&info.digest),
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
checked.insert(info.digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn name(&self) -> &str {
|
pub fn name(&self) -> &str {
|
||||||
self.chunk_store.name()
|
self.chunk_store.name()
|
||||||
}
|
}
|
||||||
@ -686,6 +714,11 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
|
||||||
|
let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
|
std::fs::metadata(chunk_path).map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
|
|
||||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
@ -781,4 +814,3 @@ impl DataStore {
|
|||||||
self.verify_new
|
self.verify_new
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
|
||||||
use std::time::Instant;
|
|
||||||
use nix::dir::Dir;
|
use nix::dir::Dir;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
@ -25,8 +25,8 @@ use crate::{
|
|||||||
server::UPID,
|
server::UPID,
|
||||||
task::TaskState,
|
task::TaskState,
|
||||||
task_log,
|
task_log,
|
||||||
tools::ParallelHandler,
|
|
||||||
tools::fs::lock_dir_noblock_shared,
|
tools::fs::lock_dir_noblock_shared,
|
||||||
|
tools::ParallelHandler,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
||||||
@ -34,8 +34,8 @@ use crate::{
|
|||||||
pub struct VerifyWorker {
|
pub struct VerifyWorker {
|
||||||
worker: Arc<dyn TaskState + Send + Sync>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VerifyWorker {
|
impl VerifyWorker {
|
||||||
@ -45,15 +45,18 @@ impl VerifyWorker {
|
|||||||
worker,
|
worker,
|
||||||
datastore,
|
datastore,
|
||||||
// start with 16k chunks == up to 64G data
|
// start with 16k chunks == up to 64G data
|
||||||
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16*1024))),
|
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))),
|
||||||
// start with 64 chunks since we assume there are few corrupt ones
|
// start with 64 chunks since we assume there are few corrupt ones
|
||||||
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
|
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
info: &FileInfo,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
let raw_size = blob.raw_size();
|
let raw_size = blob.raw_size();
|
||||||
@ -88,7 +91,11 @@ fn rename_corrupted_chunk(
|
|||||||
let mut new_path = path.clone();
|
let mut new_path = path.clone();
|
||||||
loop {
|
loop {
|
||||||
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
||||||
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
|
if new_path.exists() && counter < 9 {
|
||||||
|
counter += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match std::fs::rename(&path, &new_path) {
|
match std::fs::rename(&path, &new_path) {
|
||||||
@ -109,7 +116,6 @@ fn verify_index_chunks(
|
|||||||
index: Box<dyn IndexFile + Send>,
|
index: Box<dyn IndexFile + Send>,
|
||||||
crypt_mode: CryptMode,
|
crypt_mode: CryptMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let errors = Arc::new(AtomicUsize::new(0));
|
let errors = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
@ -124,8 +130,9 @@ fn verify_index_chunks(
|
|||||||
let errors2 = Arc::clone(&errors);
|
let errors2 = Arc::clone(&errors);
|
||||||
|
|
||||||
let decoder_pool = ParallelHandler::new(
|
let decoder_pool = ParallelHandler::new(
|
||||||
"verify chunk decoder", 4,
|
"verify chunk decoder",
|
||||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
4,
|
||||||
|
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
|
||||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
@ -159,23 +166,65 @@ fn verify_index_chunks(
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
let skip_chunk = |digest: &[u8; 32]| -> bool {
|
||||||
|
if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
|
||||||
|
true
|
||||||
|
} else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
|
||||||
|
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||||
|
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let index_count = index.index_count();
|
||||||
|
let mut chunk_list = Vec::with_capacity(index_count);
|
||||||
|
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
|
|
||||||
|
for pos in 0..index_count {
|
||||||
|
if pos & 1023 == 0 {
|
||||||
|
verify_worker.worker.check_abort()?;
|
||||||
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
|
||||||
|
if skip_chunk(&info.digest) {
|
||||||
|
continue; // already verified or marked corrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
match verify_worker.datastore.stat_chunk(&info.digest) {
|
||||||
|
Err(err) => {
|
||||||
|
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
|
task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
rename_corrupted_chunk(
|
||||||
|
verify_worker.datastore.clone(),
|
||||||
|
&info.digest,
|
||||||
|
&verify_worker.worker,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(metadata) => {
|
||||||
|
chunk_list.push((pos, metadata.ino()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sorting by inode improves data locality, which makes it lots faster on spinners
|
||||||
|
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
|
||||||
|
|
||||||
|
for (pos, _) in chunk_list {
|
||||||
verify_worker.worker.check_abort()?;
|
verify_worker.worker.check_abort()?;
|
||||||
crate::tools::fail_on_shutdown()?;
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
let info = index.chunk_info(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
let size = info.size();
|
|
||||||
|
|
||||||
if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
|
// we must always recheck this here, the parallel worker below alter it!
|
||||||
continue; // already verified
|
if skip_chunk(&info.digest) {
|
||||||
}
|
continue; // already verified or marked corrupt
|
||||||
|
|
||||||
if verify_worker.corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
|
||||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
|
||||||
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
match verify_worker.datastore.load_chunk(&info.digest) {
|
match verify_worker.datastore.load_chunk(&info.digest) {
|
||||||
@ -183,10 +232,14 @@ fn verify_index_chunks(
|
|||||||
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
|
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
|
rename_corrupted_chunk(
|
||||||
continue;
|
verify_worker.datastore.clone(),
|
||||||
|
&info.digest,
|
||||||
|
&verify_worker.worker,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Ok(chunk) => {
|
Ok(chunk) => {
|
||||||
|
let size = info.size();
|
||||||
read_bytes += chunk.raw_size();
|
read_bytes += chunk.raw_size();
|
||||||
decoder_pool.send((chunk, info.digest, size))?;
|
decoder_pool.send((chunk, info.digest, size))?;
|
||||||
decoded_bytes += size;
|
decoded_bytes += size;
|
||||||
@ -198,11 +251,11 @@ fn verify_index_chunks(
|
|||||||
|
|
||||||
let elapsed = start_time.elapsed().as_secs_f64();
|
let elapsed = start_time.elapsed().as_secs_f64();
|
||||||
|
|
||||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
|
||||||
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
|
||||||
|
|
||||||
let read_speed = read_bytes_mib/elapsed;
|
let read_speed = read_bytes_mib / elapsed;
|
||||||
let decode_speed = decoded_bytes_mib/elapsed;
|
let decode_speed = decoded_bytes_mib / elapsed;
|
||||||
|
|
||||||
let error_count = errors.load(Ordering::SeqCst);
|
let error_count = errors.load(Ordering::SeqCst);
|
||||||
|
|
||||||
@ -229,7 +282,6 @@ fn verify_fixed_index(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
|
|
||||||
@ -244,11 +296,7 @@ fn verify_fixed_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
||||||
verify_worker,
|
|
||||||
Box::new(index),
|
|
||||||
info.chunk_crypt_mode(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_dynamic_index(
|
fn verify_dynamic_index(
|
||||||
@ -256,7 +304,6 @@ fn verify_dynamic_index(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
|
|
||||||
@ -271,11 +318,7 @@ fn verify_dynamic_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
||||||
verify_worker,
|
|
||||||
Box::new(index),
|
|
||||||
info.chunk_crypt_mode(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a single backup snapshot
|
/// Verify a single backup snapshot
|
||||||
@ -296,15 +339,12 @@ pub fn verify_backup_dir(
|
|||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = lock_dir_noblock_shared(
|
||||||
&verify_worker.datastore.snapshot_path(&backup_dir),
|
&verify_worker.datastore.snapshot_path(&backup_dir),
|
||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation");
|
"locked by another operation",
|
||||||
|
);
|
||||||
match snap_lock {
|
match snap_lock {
|
||||||
Ok(snap_lock) => verify_backup_dir_with_lock(
|
Ok(snap_lock) => {
|
||||||
verify_worker,
|
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
||||||
backup_dir,
|
}
|
||||||
upid,
|
|
||||||
filter,
|
|
||||||
snap_lock
|
|
||||||
),
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
verify_worker.worker,
|
verify_worker.worker,
|
||||||
@ -361,19 +401,11 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
let result = proxmox::try_block!({
|
let result = proxmox::try_block!({
|
||||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
task_log!(verify_worker.worker, " check {}", info.filename);
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex =>
|
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
|
||||||
verify_fixed_index(
|
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info),
|
||||||
verify_worker,
|
ArchiveType::Blob => {
|
||||||
&backup_dir,
|
verify_blob(verify_worker.datastore.clone(), &backup_dir, info)
|
||||||
info,
|
}
|
||||||
),
|
|
||||||
ArchiveType::DynamicIndex =>
|
|
||||||
verify_dynamic_index(
|
|
||||||
verify_worker,
|
|
||||||
&backup_dir,
|
|
||||||
info,
|
|
||||||
),
|
|
||||||
ArchiveType::Blob => verify_blob(verify_worker.datastore.clone(), &backup_dir, info),
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -392,7 +424,6 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
error_count += 1;
|
error_count += 1;
|
||||||
verify_result = VerifyState::Failed;
|
verify_result = VerifyState::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let verify_state = SnapshotVerifyState {
|
let verify_state = SnapshotVerifyState {
|
||||||
@ -400,9 +431,12 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
upid,
|
upid,
|
||||||
};
|
};
|
||||||
let verify_state = serde_json::to_value(verify_state)?;
|
let verify_state = serde_json::to_value(verify_state)?;
|
||||||
verify_worker.datastore.update_manifest(&backup_dir, |manifest| {
|
verify_worker
|
||||||
manifest.unprotected["verify_state"] = verify_state;
|
.datastore
|
||||||
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
.update_manifest(&backup_dir, |manifest| {
|
||||||
|
manifest.unprotected["verify_state"] = verify_state;
|
||||||
|
})
|
||||||
|
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(error_count == 0)
|
||||||
}
|
}
|
||||||
@ -421,7 +455,6 @@ pub fn verify_backup_group(
|
|||||||
upid: &UPID,
|
upid: &UPID,
|
||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
let mut list = match group.list_backups(&verify_worker.datastore.base_path()) {
|
let mut list = match group.list_backups(&verify_worker.datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
@ -438,26 +471,23 @@ pub fn verify_backup_group(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let snapshot_count = list.len();
|
let snapshot_count = list.len();
|
||||||
task_log!(verify_worker.worker, "verify group {}:{} ({} snapshots)", verify_worker.datastore.name(), group, snapshot_count);
|
task_log!(
|
||||||
|
verify_worker.worker,
|
||||||
|
"verify group {}:{} ({} snapshots)",
|
||||||
|
verify_worker.datastore.name(),
|
||||||
|
group,
|
||||||
|
snapshot_count
|
||||||
|
);
|
||||||
|
|
||||||
progress.group_snapshots = snapshot_count as u64;
|
progress.group_snapshots = snapshot_count as u64;
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for (pos, info) in list.into_iter().enumerate() {
|
for (pos, info) in list.into_iter().enumerate() {
|
||||||
if !verify_backup_dir(
|
if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
|
||||||
verify_worker,
|
|
||||||
&info.backup_dir,
|
|
||||||
upid.clone(),
|
|
||||||
filter,
|
|
||||||
)? {
|
|
||||||
errors.push(info.backup_dir.to_string());
|
errors.push(info.backup_dir.to_string());
|
||||||
}
|
}
|
||||||
progress.done_snapshots = pos as u64 + 1;
|
progress.done_snapshots = pos as u64 + 1;
|
||||||
task_log!(
|
task_log!(verify_worker.worker, "percentage done: {}", progress);
|
||||||
verify_worker.worker,
|
|
||||||
"percentage done: {}",
|
|
||||||
progress
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(errors)
|
Ok(errors)
|
||||||
@ -521,11 +551,7 @@ pub fn verify_all_backups(
|
|||||||
.filter(filter_by_owner)
|
.filter(filter_by_owner)
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(worker, "unable to list backups: {}", err,);
|
||||||
worker,
|
|
||||||
"unable to list backups: {}",
|
|
||||||
err,
|
|
||||||
);
|
|
||||||
return Ok(errors);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -542,13 +568,8 @@ pub fn verify_all_backups(
|
|||||||
progress.done_snapshots = 0;
|
progress.done_snapshots = 0;
|
||||||
progress.group_snapshots = 0;
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut group_errors = verify_backup_group(
|
let mut group_errors =
|
||||||
verify_worker,
|
verify_backup_group(verify_worker, &group, &mut progress, upid, filter)?;
|
||||||
&group,
|
|
||||||
&mut progress,
|
|
||||||
upid,
|
|
||||||
filter,
|
|
||||||
)?;
|
|
||||||
errors.append(&mut group_errors);
|
errors.append(&mut group_errors);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
427
src/bin/pmt.rs
@ -1,18 +1,18 @@
|
|||||||
/// Control magnetic tape drive operation
|
/// Control magnetic tape drive operation
|
||||||
///
|
///
|
||||||
/// This is a Rust implementation, meant to replace the 'mt' command
|
/// This is a Rust implementation, using the Proxmox userspace tape
|
||||||
/// line tool.
|
/// driver. This is meant as replacement fot the 'mt' command line
|
||||||
|
/// tool.
|
||||||
///
|
///
|
||||||
/// Features:
|
/// Features:
|
||||||
///
|
///
|
||||||
/// - written in Rust
|
/// - written in Rust
|
||||||
|
/// - use Proxmox userspace driver (using SG_IO)
|
||||||
/// - optional json output format
|
/// - optional json output format
|
||||||
/// - support tape alert flags
|
/// - support tape alert flags
|
||||||
/// - support volume statistics
|
/// - support volume statistics
|
||||||
/// - read cartridge memory
|
/// - read cartridge memory
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
@ -36,6 +36,12 @@ pub const FILE_MARK_COUNT_SCHEMA: Schema =
|
|||||||
.maximum(i32::MAX as isize)
|
.maximum(i32::MAX as isize)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const FILE_MARK_POSITION_SCHEMA: Schema =
|
||||||
|
IntegerSchema::new("File mark position (0 is BOT).")
|
||||||
|
.minimum(0)
|
||||||
|
.maximum(i32::MAX as isize)
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const RECORD_COUNT_SCHEMA: Schema =
|
pub const RECORD_COUNT_SCHEMA: Schema =
|
||||||
IntegerSchema::new("Record count.")
|
IntegerSchema::new("Record count.")
|
||||||
.minimum(1)
|
.minimum(1)
|
||||||
@ -43,7 +49,7 @@ pub const RECORD_COUNT_SCHEMA: Schema =
|
|||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const DRIVE_OPTION_SCHEMA: Schema = StringSchema::new(
|
pub const DRIVE_OPTION_SCHEMA: Schema = StringSchema::new(
|
||||||
"Linux Tape Driver Option, either numeric value or option name.")
|
"Lto Tape Driver Option, either numeric value or option name.")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const DRIVE_OPTION_LIST_SCHEMA: Schema =
|
pub const DRIVE_OPTION_LIST_SCHEMA: Schema =
|
||||||
@ -57,103 +63,60 @@ use proxmox_backup::{
|
|||||||
drive::complete_drive_name,
|
drive::complete_drive_name,
|
||||||
},
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
LINUX_DRIVE_PATH_SCHEMA,
|
LTO_DRIVE_PATH_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
complete_drive_path,
|
complete_drive_path,
|
||||||
linux_tape_device_list,
|
lto_tape_device_list,
|
||||||
drive::{
|
drive::{
|
||||||
linux_mtio::{MTCmd, SetDrvBufferOptions},
|
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
LinuxTapeHandle,
|
LtoTapeHandle,
|
||||||
open_linux_tape_device,
|
open_lto_tape_device,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
lazy_static::lazy_static!{
|
fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||||
|
|
||||||
static ref DRIVE_OPTIONS: HashMap<String, SetDrvBufferOptions> = {
|
|
||||||
let mut map = HashMap::new();
|
|
||||||
|
|
||||||
for i in 0..31 {
|
|
||||||
let bit: i32 = 1 << i;
|
|
||||||
let flag = SetDrvBufferOptions::from_bits_truncate(bit);
|
|
||||||
if flag.bits() == 0 { continue; }
|
|
||||||
let name = format!("{:?}", flag)
|
|
||||||
.to_lowercase()
|
|
||||||
.replace("_", "-");
|
|
||||||
|
|
||||||
map.insert(name, flag);
|
|
||||||
}
|
|
||||||
map
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_drive_options(options: Vec<String>) -> Result<SetDrvBufferOptions, Error> {
|
|
||||||
|
|
||||||
let mut value = SetDrvBufferOptions::empty();
|
|
||||||
|
|
||||||
for option in options.iter() {
|
|
||||||
if let Ok::<i32,_>(v) = option.parse() {
|
|
||||||
value |= SetDrvBufferOptions::from_bits_truncate(v);
|
|
||||||
} else if let Some(v) = DRIVE_OPTIONS.get(option) {
|
|
||||||
value |= *v;
|
|
||||||
} else {
|
|
||||||
let option = option.to_lowercase().replace("_", "-");
|
|
||||||
if let Some(v) = DRIVE_OPTIONS.get(&option) {
|
|
||||||
value |= *v;
|
|
||||||
} else {
|
|
||||||
bail!("unknown drive option {}", option);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
|
||||||
|
|
||||||
if let Some(name) = param["drive"].as_str() {
|
if let Some(name) = param["drive"].as_str() {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
|
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device) = param["device"].as_str() {
|
if let Some(device) = param["device"].as_str() {
|
||||||
eprintln!("using device {}", device);
|
eprintln!("using device {}", device);
|
||||||
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&device)?))
|
return LtoTapeHandle::new(open_lto_tape_device(&device)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
|
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(device) = std::env::var("TAPE") {
|
if let Ok(device) = std::env::var("TAPE") {
|
||||||
eprintln!("using device {}", device);
|
eprintln!("using device {}", device);
|
||||||
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&device)?))
|
return LtoTapeHandle::new(open_lto_tape_device(&device)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let mut drive_names = Vec::new();
|
let mut drive_names = Vec::new();
|
||||||
for (name, (section_type, _)) in config.sections.iter() {
|
for (name, (section_type, _)) in config.sections.iter() {
|
||||||
if section_type != "linux" { continue; }
|
if section_type != "lto" { continue; }
|
||||||
drive_names.push(name);
|
drive_names.push(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
if drive_names.len() == 1 {
|
if drive_names.len() == 1 {
|
||||||
let name = drive_names[0];
|
let name = drive_names[0];
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
|
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
bail!("no drive/device specified");
|
bail!("no drive/device specified");
|
||||||
@ -167,26 +130,22 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
schema: FILE_MARK_COUNT_SCHEMA,
|
schema: FILE_MARK_POSITION_SCHEMA,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Position the tape at the beginning of the count file.
|
/// Position the tape at the beginning of the count file (after
|
||||||
///
|
/// filemark count)
|
||||||
/// Positioning is done by first rewinding the tape and then spacing
|
fn asf(count: u64, param: Value) -> Result<(), Error> {
|
||||||
/// forward over count file marks.
|
|
||||||
fn asf(count: i32, param: Value) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.rewind()?;
|
handle.locate_file(count)?;
|
||||||
|
|
||||||
handle.forward_space_count_files(count)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -200,7 +159,7 @@ fn asf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -212,7 +171,7 @@ fn asf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
/// Backward space count files (position before file mark).
|
/// Backward space count files (position before file mark).
|
||||||
///
|
///
|
||||||
/// The tape is positioned on the last block of the previous file.
|
/// The tape is positioned on the last block of the previous file.
|
||||||
fn bsf(count: i32, param: Value) -> Result<(), Error> {
|
fn bsf(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
@ -230,7 +189,7 @@ fn bsf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -243,11 +202,12 @@ fn bsf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
///
|
///
|
||||||
/// This leaves the tape positioned at the first block of the file
|
/// This leaves the tape positioned at the first block of the file
|
||||||
/// that is count - 1 files before the current file.
|
/// that is count - 1 files before the current file.
|
||||||
fn bsfm(count: i32, param: Value) -> Result<(), Error> {
|
fn bsfm(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTBSFM, count, "bsfm")?;
|
handle.backward_space_count_files(count)?;
|
||||||
|
handle.forward_space_count_files(1)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -261,7 +221,7 @@ fn bsfm(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -271,11 +231,11 @@ fn bsfm(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Backward space records.
|
/// Backward space records.
|
||||||
fn bsr(count: i32, param: Value) -> Result<(), Error> {
|
fn bsr(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTBSR, count, "backward space records")?;
|
handle.backward_space_count_records(count)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -289,7 +249,7 @@ fn bsr(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
@ -340,7 +300,7 @@ fn cartridge_memory(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
@ -389,7 +349,7 @@ fn tape_alert_flags(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -413,7 +373,7 @@ fn eject(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -423,7 +383,7 @@ fn eject(param: Value) -> Result<(), Error> {
|
|||||||
fn eod(param: Value) -> Result<(), Error> {
|
fn eod(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
handle.move_to_eom()?;
|
handle.move_to_eom(false)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -437,7 +397,7 @@ fn eod(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
fast: {
|
fast: {
|
||||||
@ -449,7 +409,7 @@ fn eod(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Erase media
|
/// Erase media (from current position)
|
||||||
fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
|
fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
@ -466,7 +426,36 @@ fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
fast: {
|
||||||
|
description: "Use fast erase.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
default: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Format media, single partition
|
||||||
|
fn format(fast: Option<bool>, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
device: {
|
||||||
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -478,7 +467,7 @@ fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
|
|||||||
/// Forward space count files (position after file mark).
|
/// Forward space count files (position after file mark).
|
||||||
///
|
///
|
||||||
/// The tape is positioned on the first block of the next file.
|
/// The tape is positioned on the first block of the next file.
|
||||||
fn fsf(count: i32, param: Value) -> Result<(), Error> {
|
fn fsf(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
@ -495,7 +484,7 @@ fn fsf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -508,11 +497,12 @@ fn fsf(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
///
|
///
|
||||||
/// This leaves the tape positioned at the last block of the file that
|
/// This leaves the tape positioned at the last block of the file that
|
||||||
/// is count - 1 files past the current file.
|
/// is count - 1 files past the current file.
|
||||||
fn fsfm(count: i32, param: Value) -> Result<(), Error> {
|
fn fsfm(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTFSFM, count, "fsfm")?;
|
handle.forward_space_count_files(count)?;
|
||||||
|
handle.backward_space_count_files(1)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -526,7 +516,7 @@ fn fsfm(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -536,11 +526,11 @@ fn fsfm(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Forward space records.
|
/// Forward space records.
|
||||||
fn fsr(count: i32, param: Value) -> Result<(), Error> {
|
fn fsr(count: usize, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTFSR, count, "forward space records")?;
|
handle.forward_space_count_records(count)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -554,7 +544,7 @@ fn fsr(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -564,7 +554,7 @@ fn fsr(count: i32, param: Value) -> Result<(), Error> {
|
|||||||
fn load(param: Value) -> Result<(), Error> {
|
fn load(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
handle.mtload()?;
|
handle.load()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -578,7 +568,7 @@ fn load(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -589,7 +579,7 @@ fn lock(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTLOCK, 1, "lock tape drive door")?;
|
handle.lock()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -603,7 +593,7 @@ fn lock(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -634,7 +624,7 @@ fn scan(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let list = linux_tape_device_list();
|
let list = lto_tape_device_list();
|
||||||
|
|
||||||
if output_format == "json-pretty" {
|
if output_format == "json-pretty" {
|
||||||
println!("{}", serde_json::to_string_pretty(&list)?);
|
println!("{}", serde_json::to_string_pretty(&list)?);
|
||||||
@ -657,7 +647,6 @@ fn scan(param: Value) -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -666,36 +655,7 @@ fn scan(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
size: {
|
|
||||||
description: "Block size in bytes.",
|
|
||||||
minimum: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Set the block size of the drive
|
|
||||||
fn setblk(size: i32, param: Value) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTSETBLK, size, "set block size")?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
@ -737,122 +697,6 @@ fn status(param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
options: {
|
|
||||||
schema: DRIVE_OPTION_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
defaults: {
|
|
||||||
description: "Set default options (buffer-writes async-writes read-ahead can-bsr).",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Set device driver options (root only)
|
|
||||||
fn st_options(
|
|
||||||
options: Option<Vec<String>>,
|
|
||||||
defaults: Option<bool>,
|
|
||||||
param: Value) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
let options = match defaults {
|
|
||||||
Some(true) => {
|
|
||||||
if options.is_some() {
|
|
||||||
bail!("option --defaults conflicts with specified options");
|
|
||||||
}
|
|
||||||
let mut list = Vec::new();
|
|
||||||
list.push(String::from("buffer-writes"));
|
|
||||||
list.push(String::from("async-writes"));
|
|
||||||
list.push(String::from("read-ahead"));
|
|
||||||
list.push(String::from("can-bsr"));
|
|
||||||
list
|
|
||||||
}
|
|
||||||
Some(false) | None => {
|
|
||||||
options.unwrap_or_else(|| Vec::new())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let value = parse_drive_options(options)?;
|
|
||||||
|
|
||||||
handle.set_drive_buffer_options(value)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
options: {
|
|
||||||
schema: DRIVE_OPTION_LIST_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Set selected device driver options bits (root only)
|
|
||||||
fn st_set_options(options: Vec<String>, param: Value) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
let value = parse_drive_options(options)?;
|
|
||||||
|
|
||||||
handle.drive_buffer_set_options(value)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
options: {
|
|
||||||
schema: DRIVE_OPTION_LIST_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Clear selected device driver options bits (root only)
|
|
||||||
fn st_clear_options(options: Vec<String>, param: Value) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
let value = parse_drive_options(options)?;
|
|
||||||
|
|
||||||
handle.drive_buffer_clear_options(value)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -861,7 +705,7 @@ fn st_clear_options(options: Vec<String>, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -872,7 +716,7 @@ fn unlock(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
handle.mtop(MTCmd::MTUNLOCK, 1, "unlock tape drive door")?;
|
handle.unlock()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -886,7 +730,7 @@ fn unlock(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
@ -935,7 +779,7 @@ fn volume_statistics(param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
count: {
|
count: {
|
||||||
@ -946,10 +790,68 @@ fn volume_statistics(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Write count (default 1) EOF marks at current position.
|
/// Write count (default 1) EOF marks at current position.
|
||||||
fn weof(count: Option<i32>, param: Value) -> Result<(), Error> {
|
fn weof(count: Option<usize>, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let count = count.unwrap_or(1);
|
||||||
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
handle.mtop(MTCmd::MTWEOF, count.unwrap_or(1), "write EOF mark")?;
|
|
||||||
|
handle.write_filemarks(count)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
device: {
|
||||||
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
compression: {
|
||||||
|
description: "Enable/disable compression.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
blocksize: {
|
||||||
|
description: "Set tape drive block_length (0 is variable length).",
|
||||||
|
type: u32,
|
||||||
|
minimum: 0,
|
||||||
|
maximum: 0x80_00_00,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
buffer_mode: {
|
||||||
|
description: "Use drive buffer.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
defaults: {
|
||||||
|
description: "Set default options",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Set varios drive options
|
||||||
|
fn options(
|
||||||
|
compression: Option<bool>,
|
||||||
|
blocksize: Option<u32>,
|
||||||
|
buffer_mode: Option<bool>,
|
||||||
|
defaults: Option<bool>,
|
||||||
|
param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut handle = get_tape_handle(¶m)?;
|
||||||
|
|
||||||
|
if let Some(true) = defaults {
|
||||||
|
handle.set_default_options()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
handle.set_drive_options(compression, blocksize, buffer_mode)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -967,7 +869,6 @@ fn main() -> Result<(), Error> {
|
|||||||
CliCommand::new(method)
|
CliCommand::new(method)
|
||||||
.completion_cb("drive", complete_drive_name)
|
.completion_cb("drive", complete_drive_name)
|
||||||
.completion_cb("device", complete_drive_path)
|
.completion_cb("device", complete_drive_path)
|
||||||
.completion_cb("options", complete_option_name)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
@ -980,18 +881,16 @@ fn main() -> Result<(), Error> {
|
|||||||
.insert("eject", std_cmd(&API_METHOD_EJECT))
|
.insert("eject", std_cmd(&API_METHOD_EJECT))
|
||||||
.insert("eod", std_cmd(&API_METHOD_EOD))
|
.insert("eod", std_cmd(&API_METHOD_EOD))
|
||||||
.insert("erase", std_cmd(&API_METHOD_ERASE))
|
.insert("erase", std_cmd(&API_METHOD_ERASE))
|
||||||
|
.insert("format", std_cmd(&API_METHOD_FORMAT))
|
||||||
.insert("fsf", std_cmd(&API_METHOD_FSF).arg_param(&["count"]))
|
.insert("fsf", std_cmd(&API_METHOD_FSF).arg_param(&["count"]))
|
||||||
.insert("fsfm", std_cmd(&API_METHOD_FSFM).arg_param(&["count"]))
|
.insert("fsfm", std_cmd(&API_METHOD_FSFM).arg_param(&["count"]))
|
||||||
.insert("fsr", std_cmd(&API_METHOD_FSR).arg_param(&["count"]))
|
.insert("fsr", std_cmd(&API_METHOD_FSR).arg_param(&["count"]))
|
||||||
.insert("load", std_cmd(&API_METHOD_LOAD))
|
.insert("load", std_cmd(&API_METHOD_LOAD))
|
||||||
.insert("lock", std_cmd(&API_METHOD_LOCK))
|
.insert("lock", std_cmd(&API_METHOD_LOCK))
|
||||||
|
.insert("options", std_cmd(&API_METHOD_OPTIONS))
|
||||||
.insert("rewind", std_cmd(&API_METHOD_REWIND))
|
.insert("rewind", std_cmd(&API_METHOD_REWIND))
|
||||||
.insert("scan", CliCommand::new(&API_METHOD_SCAN))
|
.insert("scan", CliCommand::new(&API_METHOD_SCAN))
|
||||||
.insert("setblk", CliCommand::new(&API_METHOD_SETBLK).arg_param(&["size"]))
|
|
||||||
.insert("status", std_cmd(&API_METHOD_STATUS))
|
.insert("status", std_cmd(&API_METHOD_STATUS))
|
||||||
.insert("stoptions", std_cmd(&API_METHOD_ST_OPTIONS).arg_param(&["options"]))
|
|
||||||
.insert("stsetoptions", std_cmd(&API_METHOD_ST_SET_OPTIONS).arg_param(&["options"]))
|
|
||||||
.insert("stclearoptions", std_cmd(&API_METHOD_ST_CLEAR_OPTIONS).arg_param(&["options"]))
|
|
||||||
.insert("tape-alert-flags", std_cmd(&API_METHOD_TAPE_ALERT_FLAGS))
|
.insert("tape-alert-flags", std_cmd(&API_METHOD_TAPE_ALERT_FLAGS))
|
||||||
.insert("unlock", std_cmd(&API_METHOD_UNLOCK))
|
.insert("unlock", std_cmd(&API_METHOD_UNLOCK))
|
||||||
.insert("volume-statistics", std_cmd(&API_METHOD_VOLUME_STATISTICS))
|
.insert("volume-statistics", std_cmd(&API_METHOD_VOLUME_STATISTICS))
|
||||||
@ -1005,11 +904,3 @@ fn main() -> Result<(), Error> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Completion helpers
|
|
||||||
pub fn complete_option_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
|
||||||
DRIVE_OPTIONS
|
|
||||||
.keys()
|
|
||||||
.map(String::from)
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
@ -33,7 +33,7 @@ use proxmox_backup::{
|
|||||||
SCSI_CHANGER_PATH_SCHEMA,
|
SCSI_CHANGER_PATH_SCHEMA,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_changer_list,
|
linux_tape_changer_list,
|
||||||
@ -67,7 +67,7 @@ fn get_changer_handle(param: &Value) -> Result<File, Error> {
|
|||||||
|
|
||||||
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
if let Some(changer) = drive.changer {
|
if let Some(changer) = drive.changer {
|
||||||
let changer_config: ScsiTapeChanger = config.lookup("changer", &changer)?;
|
let changer_config: ScsiTapeChanger = config.lookup("changer", &changer)?;
|
||||||
eprintln!("using device {}", changer_config.path);
|
eprintln!("using device {}", changer_config.path);
|
||||||
|
@ -6,8 +6,11 @@ use proxmox::api::RpcEnvironmentType;
|
|||||||
|
|
||||||
//use proxmox_backup::tools;
|
//use proxmox_backup::tools;
|
||||||
//use proxmox_backup::api_schema::config::*;
|
//use proxmox_backup::api_schema::config::*;
|
||||||
use proxmox_backup::server::rest::*;
|
use proxmox_backup::server::{
|
||||||
use proxmox_backup::server;
|
self,
|
||||||
|
auth::default_api_auth,
|
||||||
|
rest::*,
|
||||||
|
};
|
||||||
use proxmox_backup::tools::daemon;
|
use proxmox_backup::tools::daemon;
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::config;
|
use proxmox_backup::config;
|
||||||
@ -53,7 +56,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
let _ = csrf_secret(); // load with lazy_static
|
let _ = csrf_secret(); // load with lazy_static
|
||||||
|
|
||||||
let mut config = server::ApiConfig::new(
|
let mut config = server::ApiConfig::new(
|
||||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
buildcfg::JS_DIR,
|
||||||
|
&proxmox_backup::api2::ROUTER,
|
||||||
|
RpcEnvironmentType::PRIVILEGED,
|
||||||
|
default_api_auth(),
|
||||||
|
)?;
|
||||||
|
|
||||||
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
|
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
|
||||||
|
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::io::{self, Read, Write, Seek, SeekFrom};
|
use std::io::{self, Read, Write, Seek, SeekFrom};
|
||||||
use std::os::unix::io::{FromRawFd, RawFd};
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@ -19,7 +17,7 @@ use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
|||||||
use proxmox::{
|
use proxmox::{
|
||||||
tools::{
|
tools::{
|
||||||
time::{strftime_local, epoch_i64},
|
time::{strftime_local, epoch_i64},
|
||||||
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
|
fs::{file_get_json, replace_file, CreateOptions, image_size},
|
||||||
},
|
},
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
@ -32,7 +30,11 @@ use proxmox::{
|
|||||||
};
|
};
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools::{
|
||||||
|
self,
|
||||||
|
StdChannelWriter,
|
||||||
|
TokioWriterAdapter,
|
||||||
|
};
|
||||||
use proxmox_backup::api2::types::*;
|
use proxmox_backup::api2::types::*;
|
||||||
use proxmox_backup::api2::version;
|
use proxmox_backup::api2::version;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
@ -67,8 +69,18 @@ use proxmox_backup::backup::{
|
|||||||
mod proxmox_backup_client;
|
mod proxmox_backup_client;
|
||||||
use proxmox_backup_client::*;
|
use proxmox_backup_client::*;
|
||||||
|
|
||||||
mod proxmox_client_tools;
|
pub mod proxmox_client_tools;
|
||||||
use proxmox_client_tools::*;
|
use proxmox_client_tools::{
|
||||||
|
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
|
||||||
|
complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
|
||||||
|
complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
|
||||||
|
extract_repository_from_value,
|
||||||
|
key_source::{
|
||||||
|
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
|
||||||
|
KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
|
||||||
|
},
|
||||||
|
CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
fn record_repository(repo: &BackupRepository) {
|
fn record_repository(repo: &BackupRepository) {
|
||||||
|
|
||||||
@ -162,7 +174,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
|||||||
dir_path: P,
|
dir_path: P,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
chunk_size: Option<usize>,
|
chunk_size: Option<usize>,
|
||||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
|
||||||
pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
|
pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
|
||||||
upload_options: UploadOptions,
|
upload_options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
@ -460,7 +472,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct CatalogUploadResult {
|
struct CatalogUploadResult {
|
||||||
catalog_writer: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
|
||||||
result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
|
result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,7 +485,7 @@ fn spawn_catalog_upload(
|
|||||||
let catalog_chunk_size = 512*1024;
|
let catalog_chunk_size = 512*1024;
|
||||||
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
|
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
|
||||||
|
|
||||||
let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
|
let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(StdChannelWriter::new(catalog_tx)))?));
|
||||||
|
|
||||||
let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
|
let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
|
||||||
|
|
||||||
@ -499,437 +511,6 @@ fn spawn_catalog_upload(
|
|||||||
Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx })
|
Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx })
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
|
||||||
enum KeySource {
|
|
||||||
DefaultKey,
|
|
||||||
Fd,
|
|
||||||
Path(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
fn format_key_source(source: &KeySource, key_type: &str) -> String {
|
|
||||||
match source {
|
|
||||||
KeySource::DefaultKey => format!("Using default {} key..", key_type),
|
|
||||||
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
|
|
||||||
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
|
||||||
struct KeyWithSource {
|
|
||||||
pub source: KeySource,
|
|
||||||
pub key: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyWithSource {
|
|
||||||
pub fn from_fd(key: Vec<u8>) -> Self {
|
|
||||||
Self {
|
|
||||||
source: KeySource::Fd,
|
|
||||||
key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_default(key: Vec<u8>) -> Self {
|
|
||||||
Self {
|
|
||||||
source: KeySource::DefaultKey,
|
|
||||||
key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_path(path: String, key: Vec<u8>) -> Self {
|
|
||||||
Self {
|
|
||||||
source: KeySource::Path(path),
|
|
||||||
key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
|
||||||
struct CryptoParams {
|
|
||||||
mode: CryptMode,
|
|
||||||
enc_key: Option<KeyWithSource>,
|
|
||||||
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
|
|
||||||
master_pubkey: Option<KeyWithSource>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
|
|
||||||
let keyfile = match param.get("keyfile") {
|
|
||||||
Some(Value::String(keyfile)) => Some(keyfile),
|
|
||||||
Some(_) => bail!("bad --keyfile parameter type"),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_fd = match param.get("keyfd") {
|
|
||||||
Some(Value::Number(key_fd)) => Some(
|
|
||||||
RawFd::try_from(key_fd
|
|
||||||
.as_i64()
|
|
||||||
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
|
||||||
)
|
|
||||||
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
|
||||||
),
|
|
||||||
Some(_) => bail!("bad --keyfd parameter type"),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let master_pubkey_file = match param.get("master-pubkey-file") {
|
|
||||||
Some(Value::String(keyfile)) => Some(keyfile),
|
|
||||||
Some(_) => bail!("bad --master-pubkey-file parameter type"),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let master_pubkey_fd = match param.get("master-pubkey-fd") {
|
|
||||||
Some(Value::Number(key_fd)) => Some(
|
|
||||||
RawFd::try_from(key_fd
|
|
||||||
.as_i64()
|
|
||||||
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
|
|
||||||
)
|
|
||||||
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
|
|
||||||
),
|
|
||||||
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mode: Option<CryptMode> = match param.get("crypt-mode") {
|
|
||||||
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let key = match (keyfile, key_fd) {
|
|
||||||
(None, None) => None,
|
|
||||||
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
|
||||||
(Some(keyfile), None) => Some(KeyWithSource::from_path(
|
|
||||||
keyfile.clone(),
|
|
||||||
file_get_contents(keyfile)?,
|
|
||||||
)),
|
|
||||||
(None, Some(fd)) => {
|
|
||||||
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
|
||||||
let mut data = Vec::new();
|
|
||||||
let _len: usize = { input }.read_to_end(&mut data).map_err(|err| {
|
|
||||||
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
|
||||||
})?;
|
|
||||||
Some(KeyWithSource::from_fd(data))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
|
|
||||||
(None, None) => None,
|
|
||||||
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
|
||||||
(Some(keyfile), None) => Some(KeyWithSource::from_path(
|
|
||||||
keyfile.clone(),
|
|
||||||
file_get_contents(keyfile)?,
|
|
||||||
)),
|
|
||||||
(None, Some(fd)) => {
|
|
||||||
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
|
||||||
let mut data = Vec::new();
|
|
||||||
let _len: usize = { input }
|
|
||||||
.read_to_end(&mut data)
|
|
||||||
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
|
|
||||||
Some(KeyWithSource::from_fd(data))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = match mode {
|
|
||||||
// no crypt mode, enable encryption if keys are available
|
|
||||||
None => match (key, master_pubkey) {
|
|
||||||
// only default keys if available
|
|
||||||
(None, None) => match key::read_optional_default_encryption_key()? {
|
|
||||||
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
|
||||||
enc_key => {
|
|
||||||
let master_pubkey = key::read_optional_default_master_pubkey()?;
|
|
||||||
CryptoParams {
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
enc_key,
|
|
||||||
master_pubkey,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
// explicit master key, default enc key needed
|
|
||||||
(None, master_pubkey) => match key::read_optional_default_encryption_key()? {
|
|
||||||
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
|
|
||||||
enc_key => {
|
|
||||||
CryptoParams {
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
enc_key,
|
|
||||||
master_pubkey,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
// explicit keyfile, maybe default master key
|
|
||||||
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: key::read_optional_default_master_pubkey()? },
|
|
||||||
|
|
||||||
// explicit keyfile and master key
|
|
||||||
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
|
|
||||||
},
|
|
||||||
|
|
||||||
// explicitly disabled encryption
|
|
||||||
Some(CryptMode::None) => match (key, master_pubkey) {
|
|
||||||
// no keys => OK, no encryption
|
|
||||||
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
|
||||||
|
|
||||||
// --keyfile and --crypt-mode=none
|
|
||||||
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
|
|
||||||
|
|
||||||
// --master-pubkey-file and --crypt-mode=none
|
|
||||||
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
|
|
||||||
},
|
|
||||||
|
|
||||||
// explicitly enabled encryption
|
|
||||||
Some(mode) => match (key, master_pubkey) {
|
|
||||||
// no key, maybe master key
|
|
||||||
(None, master_pubkey) => match key::read_optional_default_encryption_key()? {
|
|
||||||
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
|
||||||
enc_key => {
|
|
||||||
eprintln!("Encrypting with default encryption key!");
|
|
||||||
let master_pubkey = match master_pubkey {
|
|
||||||
None => key::read_optional_default_master_pubkey()?,
|
|
||||||
master_pubkey => master_pubkey,
|
|
||||||
};
|
|
||||||
|
|
||||||
CryptoParams {
|
|
||||||
mode,
|
|
||||||
enc_key,
|
|
||||||
master_pubkey,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
// --keyfile and --crypt-mode other than none
|
|
||||||
(enc_key, master_pubkey) => {
|
|
||||||
let master_pubkey = match master_pubkey {
|
|
||||||
None => key::read_optional_default_master_pubkey()?,
|
|
||||||
master_pubkey => master_pubkey,
|
|
||||||
};
|
|
||||||
|
|
||||||
CryptoParams { mode, enc_key, master_pubkey }
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
|
|
||||||
// safe w.r.t. concurrency
|
|
||||||
fn test_crypto_parameters_handling() -> Result<(), Error> {
|
|
||||||
let some_key = vec![1;1];
|
|
||||||
let default_key = vec![2;1];
|
|
||||||
|
|
||||||
let some_master_key = vec![3;1];
|
|
||||||
let default_master_key = vec![4;1];
|
|
||||||
|
|
||||||
let keypath = "./target/testout/keyfile.test";
|
|
||||||
let master_keypath = "./target/testout/masterkeyfile.test";
|
|
||||||
let invalid_keypath = "./target/testout/invalid_keyfile.test";
|
|
||||||
|
|
||||||
let no_key_res = CryptoParams {
|
|
||||||
enc_key: None,
|
|
||||||
master_pubkey: None,
|
|
||||||
mode: CryptMode::None,
|
|
||||||
};
|
|
||||||
let some_key_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_path(
|
|
||||||
keypath.to_string(),
|
|
||||||
some_key.clone(),
|
|
||||||
)),
|
|
||||||
master_pubkey: None,
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
};
|
|
||||||
let some_key_some_master_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_path(
|
|
||||||
keypath.to_string(),
|
|
||||||
some_key.clone(),
|
|
||||||
)),
|
|
||||||
master_pubkey: Some(KeyWithSource::from_path(
|
|
||||||
master_keypath.to_string(),
|
|
||||||
some_master_key.clone(),
|
|
||||||
)),
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
};
|
|
||||||
let some_key_default_master_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_path(
|
|
||||||
keypath.to_string(),
|
|
||||||
some_key.clone(),
|
|
||||||
)),
|
|
||||||
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
};
|
|
||||||
|
|
||||||
let some_key_sign_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_path(
|
|
||||||
keypath.to_string(),
|
|
||||||
some_key.clone(),
|
|
||||||
)),
|
|
||||||
master_pubkey: None,
|
|
||||||
mode: CryptMode::SignOnly,
|
|
||||||
};
|
|
||||||
let default_key_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
|
|
||||||
master_pubkey: None,
|
|
||||||
mode: CryptMode::Encrypt,
|
|
||||||
};
|
|
||||||
let default_key_sign_res = CryptoParams {
|
|
||||||
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
|
|
||||||
master_pubkey: None,
|
|
||||||
mode: CryptMode::SignOnly,
|
|
||||||
};
|
|
||||||
|
|
||||||
replace_file(&keypath, &some_key, CreateOptions::default())?;
|
|
||||||
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
|
|
||||||
|
|
||||||
// no params, no default key == no key
|
|
||||||
let res = crypto_parameters(&json!({}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// keyfile param == key from keyfile
|
|
||||||
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// crypt mode none == no key
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// crypt mode none with explicit key == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
|
||||||
|
|
||||||
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_sign_res);
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// invalid keyfile parameter always errors
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// now set a default key
|
|
||||||
unsafe { key::set_test_encryption_key(Ok(Some(default_key.clone()))); }
|
|
||||||
|
|
||||||
// and repeat
|
|
||||||
|
|
||||||
// no params but default key == default key
|
|
||||||
let res = crypto_parameters(&json!({}));
|
|
||||||
assert_eq!(res.unwrap(), default_key_res);
|
|
||||||
|
|
||||||
// keyfile param == key from keyfile
|
|
||||||
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// crypt mode none == no key
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
|
|
||||||
assert_eq!(res.unwrap(), default_key_sign_res);
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
|
|
||||||
assert_eq!(res.unwrap(), default_key_res);
|
|
||||||
|
|
||||||
// crypt mode none with explicit key == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
|
||||||
|
|
||||||
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_sign_res);
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// invalid keyfile parameter always errors
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// now make default key retrieval error
|
|
||||||
unsafe { key::set_test_encryption_key(Err(format_err!("test error"))); }
|
|
||||||
|
|
||||||
// and repeat
|
|
||||||
|
|
||||||
// no params, default key retrieval errors == Error
|
|
||||||
assert!(crypto_parameters(&json!({})).is_err());
|
|
||||||
|
|
||||||
// keyfile param == key from keyfile
|
|
||||||
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// crypt mode none == no key
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// crypt mode none with explicit key == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
|
||||||
|
|
||||||
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_sign_res);
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_res);
|
|
||||||
|
|
||||||
// invalid keyfile parameter always errors
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// now remove default key again
|
|
||||||
unsafe { key::set_test_encryption_key(Ok(None)); }
|
|
||||||
// set a default master key
|
|
||||||
unsafe { key::set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
|
|
||||||
|
|
||||||
// and use an explicit master key
|
|
||||||
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
|
|
||||||
// just a default == no key
|
|
||||||
let res = crypto_parameters(&json!({}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// keyfile param == key from keyfile
|
|
||||||
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_some_master_res);
|
|
||||||
// same with fallback to default master key
|
|
||||||
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_default_master_res);
|
|
||||||
|
|
||||||
// crypt mode none == error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
|
|
||||||
// with just default master key == no key
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
|
||||||
assert_eq!(res.unwrap(), no_key_res);
|
|
||||||
|
|
||||||
// crypt mode encrypt without enc key == error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
// crypt mode none with explicit key == Error
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
|
||||||
|
|
||||||
// crypt mode encrypt with keyfile == key from keyfile with correct mode
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_some_master_res);
|
|
||||||
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
|
||||||
assert_eq!(res.unwrap(), some_key_default_master_res);
|
|
||||||
|
|
||||||
// invalid master keyfile parameter always errors when a key is passed, even with a valid
|
|
||||||
// default master key
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
|
|
||||||
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -1160,7 +741,7 @@ async fn create_backup(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (key, created, fingerprint) =
|
let (key, created, fingerprint) =
|
||||||
decrypt_key(&key_with_source.key, &key::get_encryption_key_password)?;
|
decrypt_key(&key_with_source.key, &get_encryption_key_password)?;
|
||||||
println!("Encryption key fingerprint: {}", fingerprint);
|
println!("Encryption key fingerprint: {}", fingerprint);
|
||||||
|
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
@ -1510,7 +1091,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
None => None,
|
None => None,
|
||||||
Some(ref key) => {
|
Some(ref key) => {
|
||||||
let (key, _, _) =
|
let (key, _, _) =
|
||||||
decrypt_key(&key.key, &key::get_encryption_key_password).map_err(|err| {
|
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
|
||||||
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
||||||
err
|
err
|
||||||
})?;
|
})?;
|
||||||
|
@ -14,6 +14,7 @@ use proxmox::api::RpcEnvironmentType;
|
|||||||
use proxmox_backup::{
|
use proxmox_backup::{
|
||||||
backup::DataStore,
|
backup::DataStore,
|
||||||
server::{
|
server::{
|
||||||
|
auth::default_api_auth,
|
||||||
WorkerTask,
|
WorkerTask,
|
||||||
ApiConfig,
|
ApiConfig,
|
||||||
rest::*,
|
rest::*,
|
||||||
@ -40,6 +41,7 @@ use proxmox_backup::tools::{
|
|||||||
disks::{
|
disks::{
|
||||||
DiskManage,
|
DiskManage,
|
||||||
zfs_pool_stats,
|
zfs_pool_stats,
|
||||||
|
get_pool_from_dataset,
|
||||||
},
|
},
|
||||||
logrotate::LogRotate,
|
logrotate::LogRotate,
|
||||||
socket::{
|
socket::{
|
||||||
@ -84,12 +86,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
let _ = csrf_secret(); // load with lazy_static
|
let _ = csrf_secret(); // load with lazy_static
|
||||||
|
|
||||||
let mut config = ApiConfig::new(
|
let mut config = ApiConfig::new(
|
||||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
buildcfg::JS_DIR,
|
||||||
|
&proxmox_backup::api2::ROUTER,
|
||||||
// Enable experimental tape UI if tape.cfg exists
|
RpcEnvironmentType::PUBLIC,
|
||||||
if Path::new("/etc/proxmox-backup/tape.cfg").exists() {
|
default_api_auth(),
|
||||||
config.enable_tape_ui = true;
|
)?;
|
||||||
}
|
|
||||||
|
|
||||||
config.add_alias("novnc", "/usr/share/novnc-pve");
|
config.add_alias("novnc", "/usr/share/novnc-pve");
|
||||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||||
@ -865,8 +866,9 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
|
|||||||
let mut device_stat = None;
|
let mut device_stat = None;
|
||||||
match fs_type.as_str() {
|
match fs_type.as_str() {
|
||||||
"zfs" => {
|
"zfs" => {
|
||||||
if let Some(pool) = source {
|
if let Some(source) = source {
|
||||||
match zfs_pool_stats(&pool) {
|
let pool = get_pool_from_dataset(&source).unwrap_or(&source);
|
||||||
|
match zfs_pool_stats(pool) {
|
||||||
Ok(stat) => device_stat = stat,
|
Ok(stat) => device_stat = stat,
|
||||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||||
}
|
}
|
||||||
|
477
src/bin/proxmox-file-restore.rs
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
cli::{
|
||||||
|
default_table_format_options, format_and_print_result_full, get_output_format,
|
||||||
|
run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig, OUTPUT_FORMAT,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use pxar::accessor::aio::Accessor;
|
||||||
|
use pxar::decoder::aio::Decoder;
|
||||||
|
|
||||||
|
use proxmox_backup::api2::{helpers, types::ArchiveEntry};
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
|
||||||
|
DirEntryAttribute, IndexFile, LocalDynamicReadAt, CATALOG_NAME,
|
||||||
|
};
|
||||||
|
use proxmox_backup::client::{BackupReader, RemoteChunkReader};
|
||||||
|
use proxmox_backup::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
// use "pub" so rust doesn't complain about "unused" functions in the module
|
||||||
|
pub mod proxmox_client_tools;
|
||||||
|
use proxmox_client_tools::{
|
||||||
|
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
|
||||||
|
key_source::{
|
||||||
|
crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
|
||||||
|
KEYFILE_SCHEMA,
|
||||||
|
},
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
mod proxmox_file_restore;
|
||||||
|
use proxmox_file_restore::*;
|
||||||
|
|
||||||
|
enum ExtractPath {
|
||||||
|
ListArchives,
|
||||||
|
Pxar(String, Vec<u8>),
|
||||||
|
VM(String, Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
|
||||||
|
let mut bytes = if base64 {
|
||||||
|
base64::decode(path)?
|
||||||
|
} else {
|
||||||
|
path.into_bytes()
|
||||||
|
};
|
||||||
|
|
||||||
|
if bytes == b"/" {
|
||||||
|
return Ok(ExtractPath::ListArchives);
|
||||||
|
}
|
||||||
|
|
||||||
|
while !bytes.is_empty() && bytes[0] == b'/' {
|
||||||
|
bytes.remove(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (file, path) = {
|
||||||
|
let slash_pos = bytes.iter().position(|c| *c == b'/').unwrap_or(bytes.len());
|
||||||
|
let path = bytes.split_off(slash_pos);
|
||||||
|
let file = String::from_utf8(bytes)?;
|
||||||
|
(file, path)
|
||||||
|
};
|
||||||
|
|
||||||
|
if file.ends_with(".pxar.didx") {
|
||||||
|
Ok(ExtractPath::Pxar(file, path))
|
||||||
|
} else if file.ends_with(".img.fidx") {
|
||||||
|
Ok(ExtractPath::VM(file, path))
|
||||||
|
} else {
|
||||||
|
bail!("'{}' is not supported for file-restore", file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn keyfile_path(param: &Value) -> Option<String> {
|
||||||
|
if let Some(Value::String(keyfile)) = param.get("keyfile") {
|
||||||
|
return Some(keyfile.to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(Value::Number(keyfd)) = param.get("keyfd") {
|
||||||
|
return Some(format!("/dev/fd/{}", keyfd));
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
snapshot: {
|
||||||
|
type: String,
|
||||||
|
description: "Group/Snapshot path.",
|
||||||
|
},
|
||||||
|
"path": {
|
||||||
|
description: "Path to restore. Directories will be restored as .zip files.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"base64": {
|
||||||
|
type: Boolean,
|
||||||
|
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
keyfile: {
|
||||||
|
schema: KEYFILE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"driver": {
|
||||||
|
type: BlockDriverType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "A list of elements under the given path",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: ArchiveEntry,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List a directory from a backup snapshot.
|
||||||
|
async fn list(
|
||||||
|
snapshot: String,
|
||||||
|
path: String,
|
||||||
|
base64: bool,
|
||||||
|
param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let snapshot: BackupDir = snapshot.parse()?;
|
||||||
|
let path = parse_path(path, base64)?;
|
||||||
|
|
||||||
|
let keyfile = keyfile_path(¶m);
|
||||||
|
let crypto = crypto_parameters_keep_fd(¶m)?;
|
||||||
|
let crypt_config = match crypto.enc_key {
|
||||||
|
None => None,
|
||||||
|
Some(ref key) => {
|
||||||
|
let (key, _, _) =
|
||||||
|
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
|
||||||
|
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
||||||
|
err
|
||||||
|
})?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = connect(&repo)?;
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&snapshot.group().backup_type(),
|
||||||
|
&snapshot.group().backup_id(),
|
||||||
|
snapshot.backup_time(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
|
let result = match path {
|
||||||
|
ExtractPath::ListArchives => {
|
||||||
|
let mut entries = vec![];
|
||||||
|
for file in manifest.files() {
|
||||||
|
if !file.filename.ends_with(".pxar.didx") && !file.filename.ends_with(".img.fidx") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let path = format!("/{}", file.filename);
|
||||||
|
let attr = if file.filename.ends_with(".pxar.didx") {
|
||||||
|
// a pxar file is a file archive, so it's root is also a directory root
|
||||||
|
Some(&DirEntryAttribute::Directory { start: 0 })
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
entries.push(ArchiveEntry::new(path.as_bytes(), attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(entries)
|
||||||
|
}
|
||||||
|
ExtractPath::Pxar(file, mut path) => {
|
||||||
|
let index = client
|
||||||
|
.download_dynamic_index(&manifest, CATALOG_NAME)
|
||||||
|
.await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||||
|
let chunk_reader = RemoteChunkReader::new(
|
||||||
|
client.clone(),
|
||||||
|
crypt_config,
|
||||||
|
file_info.chunk_crypt_mode(),
|
||||||
|
most_used,
|
||||||
|
);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let mut catalog_reader = CatalogReader::new(reader);
|
||||||
|
|
||||||
|
let mut fullpath = file.into_bytes();
|
||||||
|
fullpath.append(&mut path);
|
||||||
|
|
||||||
|
helpers::list_dir_content(&mut catalog_reader, &fullpath)
|
||||||
|
}
|
||||||
|
ExtractPath::VM(file, path) => {
|
||||||
|
let details = SnapRestoreDetails {
|
||||||
|
manifest,
|
||||||
|
repo,
|
||||||
|
snapshot,
|
||||||
|
keyfile,
|
||||||
|
};
|
||||||
|
let driver: Option<BlockDriverType> = match param.get("driver") {
|
||||||
|
Some(drv) => Some(serde_json::from_value(drv.clone())?),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
data_list(driver, details, file, path).await
|
||||||
|
}
|
||||||
|
}?;
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.sortby("type", false)
|
||||||
|
.sortby("text", false)
|
||||||
|
.column(ColumnConfig::new("type"))
|
||||||
|
.column(ColumnConfig::new("text").header("name"))
|
||||||
|
.column(ColumnConfig::new("mtime").header("last modified"))
|
||||||
|
.column(ColumnConfig::new("size"));
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
format_and_print_result_full(
|
||||||
|
&mut json!(result),
|
||||||
|
&API_METHOD_LIST.returns,
|
||||||
|
&output_format,
|
||||||
|
&options,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
snapshot: {
|
||||||
|
type: String,
|
||||||
|
description: "Group/Snapshot path.",
|
||||||
|
},
|
||||||
|
"path": {
|
||||||
|
description: "Path to restore. Directories will be restored as .zip files if extracted to stdout.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"base64": {
|
||||||
|
type: Boolean,
|
||||||
|
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
target: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
description: "Target directory path. Use '-' to write to standard output.",
|
||||||
|
},
|
||||||
|
keyfile: {
|
||||||
|
schema: KEYFILE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verbose: {
|
||||||
|
type: Boolean,
|
||||||
|
description: "Print verbose information",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"driver": {
|
||||||
|
type: BlockDriverType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Restore files from a backup snapshot.
|
||||||
|
async fn extract(
|
||||||
|
snapshot: String,
|
||||||
|
path: String,
|
||||||
|
base64: bool,
|
||||||
|
target: Option<String>,
|
||||||
|
verbose: bool,
|
||||||
|
param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let snapshot: BackupDir = snapshot.parse()?;
|
||||||
|
let orig_path = path;
|
||||||
|
let path = parse_path(orig_path.clone(), base64)?;
|
||||||
|
|
||||||
|
let target = match target {
|
||||||
|
Some(target) if target == "-" => None,
|
||||||
|
Some(target) => Some(PathBuf::from(target)),
|
||||||
|
None => Some(std::env::current_dir()?),
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = keyfile_path(¶m);
|
||||||
|
let crypto = crypto_parameters_keep_fd(¶m)?;
|
||||||
|
let crypt_config = match crypto.enc_key {
|
||||||
|
None => None,
|
||||||
|
Some(ref key) => {
|
||||||
|
let (key, _, _) =
|
||||||
|
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
|
||||||
|
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
||||||
|
err
|
||||||
|
})?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = connect(&repo)?;
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&snapshot.group().backup_type(),
|
||||||
|
&snapshot.group().backup_id(),
|
||||||
|
snapshot.backup_time(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
match path {
|
||||||
|
ExtractPath::Pxar(archive_name, path) => {
|
||||||
|
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||||
|
let index = client
|
||||||
|
.download_dynamic_index(&manifest, &archive_name)
|
||||||
|
.await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(
|
||||||
|
client.clone(),
|
||||||
|
crypt_config,
|
||||||
|
file_info.chunk_crypt_mode(),
|
||||||
|
most_used,
|
||||||
|
);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader = LocalDynamicReadAt::new(reader);
|
||||||
|
let decoder = Accessor::new(reader, archive_size).await?;
|
||||||
|
extract_to_target(decoder, &path, target, verbose).await?;
|
||||||
|
}
|
||||||
|
ExtractPath::VM(file, path) => {
|
||||||
|
let details = SnapRestoreDetails {
|
||||||
|
manifest,
|
||||||
|
repo,
|
||||||
|
snapshot,
|
||||||
|
keyfile,
|
||||||
|
};
|
||||||
|
let driver: Option<BlockDriverType> = match param.get("driver") {
|
||||||
|
Some(drv) => Some(serde_json::from_value(drv.clone())?),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(mut target) = target {
|
||||||
|
let reader = data_extract(driver, details, file, path.clone(), true).await?;
|
||||||
|
let decoder = Decoder::from_tokio(reader).await?;
|
||||||
|
extract_sub_dir_seq(&target, decoder, verbose).await?;
|
||||||
|
|
||||||
|
// we extracted a .pxarexclude-cli file auto-generated by the VM when encoding the
|
||||||
|
// archive, this file is of no use for the user, so try to remove it
|
||||||
|
target.push(".pxarexclude-cli");
|
||||||
|
std::fs::remove_file(target).map_err(|e| {
|
||||||
|
format_err!("unable to remove temporary .pxarexclude-cli file - {}", e)
|
||||||
|
})?;
|
||||||
|
} else {
|
||||||
|
let mut reader = data_extract(driver, details, file, path.clone(), false).await?;
|
||||||
|
tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
bail!("cannot extract '{}'", orig_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn extract_to_target<T>(
|
||||||
|
decoder: Accessor<T>,
|
||||||
|
path: &[u8],
|
||||||
|
target: Option<PathBuf>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: pxar::accessor::ReadAt + Clone + Send + Sync + Unpin + 'static,
|
||||||
|
{
|
||||||
|
let path = if path.is_empty() { b"/" } else { path };
|
||||||
|
|
||||||
|
let root = decoder.open_root().await?;
|
||||||
|
let file = root
|
||||||
|
.lookup(OsStr::from_bytes(path))
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
|
||||||
|
|
||||||
|
if let Some(target) = target {
|
||||||
|
extract_sub_dir(target, decoder, OsStr::from_bytes(path), verbose).await?;
|
||||||
|
} else {
|
||||||
|
match file.kind() {
|
||||||
|
pxar::EntryKind::File { .. } => {
|
||||||
|
tokio::io::copy(&mut file.contents().await?, &mut tokio::io::stdout()).await?;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
create_zip(
|
||||||
|
tokio::io::stdout(),
|
||||||
|
decoder,
|
||||||
|
OsStr::from_bytes(path),
|
||||||
|
verbose,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
|
||||||
|
.arg_param(&["snapshot", "path"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||||
|
|
||||||
|
let restore_cmd_def = CliCommand::new(&API_METHOD_EXTRACT)
|
||||||
|
.arg_param(&["snapshot", "path", "target"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||||
|
.completion_cb("target", tools::complete_file_name);
|
||||||
|
|
||||||
|
let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
|
||||||
|
let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
|
||||||
|
.arg_param(&["name"])
|
||||||
|
.completion_cb("name", complete_block_driver_ids);
|
||||||
|
|
||||||
|
let cmd_def = CliCommandMap::new()
|
||||||
|
.insert("list", list_cmd_def)
|
||||||
|
.insert("extract", restore_cmd_def)
|
||||||
|
.insert("status", status_cmd_def)
|
||||||
|
.insert("stop", stop_cmd_def);
|
||||||
|
|
||||||
|
let rpcenv = CliEnvironment::new();
|
||||||
|
run_cli_command(
|
||||||
|
cmd_def,
|
||||||
|
rpcenv,
|
||||||
|
Some(|future| proxmox_backup::tools::runtime::main(future)),
|
||||||
|
);
|
||||||
|
}
|
124
src/bin/proxmox-restore-daemon.rs
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use log::error;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
use std::os::unix::{
|
||||||
|
io::{FromRawFd, RawFd},
|
||||||
|
net,
|
||||||
|
};
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
|
use proxmox::api::RpcEnvironmentType;
|
||||||
|
use proxmox_backup::client::DEFAULT_VSOCK_PORT;
|
||||||
|
use proxmox_backup::server::{rest::*, ApiConfig};
|
||||||
|
|
||||||
|
mod proxmox_restore_daemon;
|
||||||
|
use proxmox_restore_daemon::*;
|
||||||
|
|
||||||
|
/// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
|
||||||
|
/// We should never have more than a few requests in queue, so use a low number.
|
||||||
|
pub const MAX_PENDING: usize = 32;
|
||||||
|
|
||||||
|
/// Will be present in base initramfs
|
||||||
|
pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// The current disks state. Use for accessing data on the attached snapshots.
|
||||||
|
pub static ref DISK_STATE: Arc<Mutex<DiskState>> = {
|
||||||
|
Arc::new(Mutex::new(DiskState::scan().unwrap()))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is expected to be run by 'proxmox-file-restore' within a mini-VM
|
||||||
|
fn main() -> Result<(), Error> {
|
||||||
|
if !Path::new(VM_DETECT_FILE).exists() {
|
||||||
|
bail!(concat!(
|
||||||
|
"This binary is not supposed to be run manually. ",
|
||||||
|
"Please use 'proxmox-file-restore' instead."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
|
||||||
|
// stdout to a serial terminal attached by QEMU)
|
||||||
|
env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
|
||||||
|
.write_style(env_logger::WriteStyle::Never)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
// scan all attached disks now, before starting the API
|
||||||
|
// this will panic and stop the VM if anything goes wrong
|
||||||
|
{
|
||||||
|
let _disk_state = DISK_STATE.lock().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox_backup::tools::runtime::main(run())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run() -> Result<(), Error> {
|
||||||
|
watchdog_init();
|
||||||
|
|
||||||
|
let auth_config = Arc::new(
|
||||||
|
auth::ticket_auth().map_err(|err| format_err!("reading ticket file failed: {}", err))?,
|
||||||
|
);
|
||||||
|
let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC, auth_config)?;
|
||||||
|
let rest_server = RestServer::new(config);
|
||||||
|
|
||||||
|
let vsock_fd = get_vsock_fd()?;
|
||||||
|
let connections = accept_vsock_connections(vsock_fd);
|
||||||
|
let receiver_stream = ReceiverStream::new(connections);
|
||||||
|
let acceptor = hyper::server::accept::from_stream(receiver_stream);
|
||||||
|
|
||||||
|
hyper::Server::builder(acceptor).serve(rest_server).await?;
|
||||||
|
|
||||||
|
bail!("hyper server exited");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn accept_vsock_connections(
|
||||||
|
vsock_fd: RawFd,
|
||||||
|
) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
|
||||||
|
use nix::sys::socket::*;
|
||||||
|
let (sender, receiver) = mpsc::channel(MAX_PENDING);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
|
||||||
|
// we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
|
||||||
|
let client_fd = accept(vsock_fd)?;
|
||||||
|
let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
|
||||||
|
stream.set_nonblocking(true)?;
|
||||||
|
tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
|
||||||
|
});
|
||||||
|
|
||||||
|
match stream {
|
||||||
|
Ok(stream) => {
|
||||||
|
if sender.send(Ok(stream)).await.is_err() {
|
||||||
|
error!("connection accept channel was closed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("error accepting vsock connetion: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
receiver
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_vsock_fd() -> Result<RawFd, Error> {
|
||||||
|
use nix::sys::socket::*;
|
||||||
|
let sock_fd = socket(
|
||||||
|
AddressFamily::Vsock,
|
||||||
|
SockType::Stream,
|
||||||
|
SockFlag::empty(),
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
|
||||||
|
bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
|
||||||
|
listen(sock_fd, MAX_PENDING)?;
|
||||||
|
Ok(sock_fd)
|
||||||
|
}
|
@ -27,10 +27,13 @@ use proxmox_backup::{
|
|||||||
api2::{
|
api2::{
|
||||||
self,
|
self,
|
||||||
types::{
|
types::{
|
||||||
|
Authid,
|
||||||
DATASTORE_SCHEMA,
|
DATASTORE_SCHEMA,
|
||||||
|
DATASTORE_MAP_LIST_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
MEDIA_LABEL_SCHEMA,
|
MEDIA_LABEL_SCHEMA,
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
Userid,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
config::{
|
config::{
|
||||||
@ -40,6 +43,7 @@ use proxmox_backup::{
|
|||||||
media_pool::complete_pool_name,
|
media_pool::complete_pool_name,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
|
BlockReadError,
|
||||||
drive::{
|
drive::{
|
||||||
open_drive,
|
open_drive,
|
||||||
lock_tape_device,
|
lock_tape_device,
|
||||||
@ -112,8 +116,8 @@ pub fn extract_drive_name(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Erase media
|
/// Format media
|
||||||
async fn erase_media(mut param: Value) -> Result<(), Error> {
|
async fn format_media(mut param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
@ -123,7 +127,7 @@ async fn erase_media(mut param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut client = connect_to_localhost()?;
|
let mut client = connect_to_localhost()?;
|
||||||
|
|
||||||
let path = format!("api2/json/tape/drive/{}/erase-media", drive);
|
let path = format!("api2/json/tape/drive/{}/format-media", drive);
|
||||||
let result = client.post(&path, Some(param)).await?;
|
let result = client.post(&path, Some(param)).await?;
|
||||||
|
|
||||||
view_task_result(&mut client, result, &output_format).await?;
|
view_task_result(&mut client, result, &output_format).await?;
|
||||||
@ -548,7 +552,7 @@ fn move_to_eom(mut param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut drive = open_drive(&config, &drive)?;
|
let mut drive = open_drive(&config, &drive)?;
|
||||||
|
|
||||||
drive.move_to_eom()?;
|
drive.move_to_eom(false)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -584,12 +588,19 @@ fn debug_scan(mut param: Value) -> Result<(), Error> {
|
|||||||
loop {
|
loop {
|
||||||
let file_number = drive.current_file_number()?;
|
let file_number = drive.current_file_number()?;
|
||||||
|
|
||||||
match drive.read_next_file()? {
|
match drive.read_next_file() {
|
||||||
None => {
|
Err(BlockReadError::EndOfFile) => {
|
||||||
println!("EOD");
|
println!("filemark number {}", file_number);
|
||||||
continue;
|
continue;
|
||||||
},
|
}
|
||||||
Some(mut reader) => {
|
Err(BlockReadError::EndOfStream) => {
|
||||||
|
println!("got EOT");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(BlockReadError::Error(err)) => {
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
|
Ok(mut reader) => {
|
||||||
println!("got file number {}", file_number);
|
println!("got file number {}", file_number);
|
||||||
|
|
||||||
let header: Result<MediaContentHeader, _> = unsafe { reader.read_le_value() };
|
let header: Result<MediaContentHeader, _> = unsafe { reader.read_le_value() };
|
||||||
@ -611,8 +622,15 @@ fn debug_scan(mut param: Value) -> Result<(), Error> {
|
|||||||
println!("unable to read content header - {}", err);
|
println!("unable to read content header - {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let bytes = reader.skip_to_end()?;
|
let bytes = reader.skip_data()?;
|
||||||
println!("skipped {}", HumanByte::from(bytes));
|
println!("skipped {}", HumanByte::from(bytes));
|
||||||
|
if let Ok(true) = reader.has_end_marker() {
|
||||||
|
if reader.is_incomplete()? {
|
||||||
|
println!("WARNING: file is incomplete");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
println!("WARNING: file without end marker");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -738,8 +756,9 @@ async fn status(mut param: Value) -> Result<(), Error> {
|
|||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.column(ColumnConfig::new("blocksize"))
|
.column(ColumnConfig::new("blocksize"))
|
||||||
.column(ColumnConfig::new("density"))
|
.column(ColumnConfig::new("density"))
|
||||||
.column(ColumnConfig::new("status"))
|
.column(ColumnConfig::new("compression"))
|
||||||
.column(ColumnConfig::new("options"))
|
.column(ColumnConfig::new("buffer-mode"))
|
||||||
|
.column(ColumnConfig::new("write-protect"))
|
||||||
.column(ColumnConfig::new("alert-flags"))
|
.column(ColumnConfig::new("alert-flags"))
|
||||||
.column(ColumnConfig::new("file-number"))
|
.column(ColumnConfig::new("file-number"))
|
||||||
.column(ColumnConfig::new("block-number"))
|
.column(ColumnConfig::new("block-number"))
|
||||||
@ -781,8 +800,8 @@ async fn clean_drive(mut param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut client = connect_to_localhost()?;
|
let mut client = connect_to_localhost()?;
|
||||||
|
|
||||||
let path = format!("api2/json/tape/drive/{}/clean-drive", drive);
|
let path = format!("api2/json/tape/drive/{}/clean", drive);
|
||||||
let result = client.post(&path, Some(param)).await?;
|
let result = client.put(&path, Some(param)).await?;
|
||||||
|
|
||||||
view_task_result(&mut client, result, &output_format).await?;
|
view_task_result(&mut client, result, &output_format).await?;
|
||||||
|
|
||||||
@ -853,7 +872,7 @@ async fn backup(mut param: Value) -> Result<(), Error> {
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_MAP_LIST_SCHEMA,
|
||||||
},
|
},
|
||||||
drive: {
|
drive: {
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
@ -863,6 +882,14 @@ async fn backup(mut param: Value) -> Result<(), Error> {
|
|||||||
description: "Media set UUID.",
|
description: "Media set UUID.",
|
||||||
type: String,
|
type: String,
|
||||||
},
|
},
|
||||||
|
"notify-user": {
|
||||||
|
type: Userid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
schema: OUTPUT_FORMAT,
|
schema: OUTPUT_FORMAT,
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -900,6 +927,11 @@ async fn restore(mut param: Value) -> Result<(), Error> {
|
|||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
scan: {
|
||||||
|
description: "Re-read the whole tape to reconstruct the catalog instead of restoring saved versions.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
verbose: {
|
verbose: {
|
||||||
description: "Verbose mode - log all found chunks.",
|
description: "Verbose mode - log all found chunks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
@ -976,8 +1008,8 @@ fn main() {
|
|||||||
.completion_cb("drive", complete_drive_name)
|
.completion_cb("drive", complete_drive_name)
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"erase",
|
"format",
|
||||||
CliCommand::new(&API_METHOD_ERASE_MEDIA)
|
CliCommand::new(&API_METHOD_FORMAT_MEDIA)
|
||||||
.completion_cb("drive", complete_drive_name)
|
.completion_cb("drive", complete_drive_name)
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
|
@ -34,6 +34,8 @@ use crate::{
|
|||||||
connect,
|
connect,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Copy, Clone, Serialize)]
|
#[derive(Copy, Clone, Serialize)]
|
||||||
/// Speed test result
|
/// Speed test result
|
||||||
@ -152,7 +154,7 @@ pub async fn benchmark(
|
|||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
let (key, _, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
let (key, _, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
Some(Arc::new(crypt_config))
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@ use crate::{
|
|||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
format_key_source,
|
format_key_source,
|
||||||
record_repository,
|
record_repository,
|
||||||
key::get_encryption_key_password,
|
|
||||||
decrypt_key,
|
decrypt_key,
|
||||||
api_datastore_latest_snapshot,
|
api_datastore_latest_snapshot,
|
||||||
complete_repository,
|
complete_repository,
|
||||||
@ -38,6 +37,8 @@ use crate::{
|
|||||||
Shell,
|
Shell,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
|
@ -20,114 +20,10 @@ use proxmox_backup::{
|
|||||||
tools::paperkey::{generate_paper_key, PaperkeyFormat},
|
tools::paperkey::{generate_paper_key, PaperkeyFormat},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::KeyWithSource;
|
use crate::proxmox_client_tools::key_source::{
|
||||||
|
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
|
||||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
place_default_encryption_key, place_default_master_pubkey,
|
||||||
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
};
|
||||||
|
|
||||||
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
|
||||||
super::find_xdg_file(
|
|
||||||
DEFAULT_MASTER_PUBKEY_FILE_NAME,
|
|
||||||
"default master public key file",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
|
|
||||||
super::place_xdg_file(
|
|
||||||
DEFAULT_MASTER_PUBKEY_FILE_NAME,
|
|
||||||
"default master public key file",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
|
||||||
super::find_xdg_file(
|
|
||||||
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
|
|
||||||
"default encryption key file",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
|
||||||
super::place_xdg_file(
|
|
||||||
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
|
|
||||||
"default encryption key file",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(test))]
|
|
||||||
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
|
|
||||||
find_default_encryption_key()?
|
|
||||||
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(test))]
|
|
||||||
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
|
|
||||||
find_default_master_pubkey()?
|
|
||||||
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
|
|
||||||
// not safe when multiple concurrent test cases end up here!
|
|
||||||
unsafe {
|
|
||||||
match &TEST_DEFAULT_ENCRYPTION_KEY {
|
|
||||||
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
|
||||||
Ok(None) => Ok(None),
|
|
||||||
Err(_) => bail!("test error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
// not safe when multiple concurrent test cases end up here!
|
|
||||||
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
|
|
||||||
TEST_DEFAULT_ENCRYPTION_KEY = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
|
|
||||||
// not safe when multiple concurrent test cases end up here!
|
|
||||||
unsafe {
|
|
||||||
match &TEST_DEFAULT_MASTER_PUBKEY {
|
|
||||||
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
|
||||||
Ok(None) => Ok(None),
|
|
||||||
Err(_) => bail!("test error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
// not safe when multiple concurrent test cases end up here!
|
|
||||||
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
|
|
||||||
TEST_DEFAULT_MASTER_PUBKEY = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
|
||||||
// fixme: implement other input methods
|
|
||||||
|
|
||||||
use std::env::VarError::*;
|
|
||||||
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
|
||||||
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
|
||||||
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
|
||||||
Err(NotPresent) => {
|
|
||||||
// Try another method
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're on a TTY, query the user for a password
|
|
||||||
if tty::stdin_isatty() {
|
|
||||||
return Ok(tty::read_password("Encryption Key Password: ")?);
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("no password input mechanism available");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
use anyhow::{Context, Error};
|
|
||||||
|
|
||||||
mod benchmark;
|
mod benchmark;
|
||||||
pub use benchmark::*;
|
pub use benchmark::*;
|
||||||
mod mount;
|
mod mount;
|
||||||
@ -13,29 +11,3 @@ pub use snapshot::*;
|
|||||||
|
|
||||||
pub mod key;
|
pub mod key;
|
||||||
|
|
||||||
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
|
||||||
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convenience helper for better error messages:
|
|
||||||
pub fn find_xdg_file(
|
|
||||||
file_name: impl AsRef<std::path::Path>,
|
|
||||||
description: &'static str,
|
|
||||||
) -> Result<Option<std::path::PathBuf>, Error> {
|
|
||||||
let file_name = file_name.as_ref();
|
|
||||||
base_directories()
|
|
||||||
.map(|base| base.find_config_file(file_name))
|
|
||||||
.with_context(|| format!("error searching for {}", description))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn place_xdg_file(
|
|
||||||
file_name: impl AsRef<std::path::Path>,
|
|
||||||
description: &'static str,
|
|
||||||
) -> Result<std::path::PathBuf, Error> {
|
|
||||||
let file_name = file_name.as_ref();
|
|
||||||
base_directories()
|
|
||||||
.and_then(|base| {
|
|
||||||
base.place_config_file(file_name).map_err(Error::from)
|
|
||||||
})
|
|
||||||
.with_context(|| format!("failed to place {} in xdg home", description))
|
|
||||||
}
|
|
||||||
|
@ -43,6 +43,8 @@ use crate::{
|
|||||||
BufferedDynamicReadAt,
|
BufferedDynamicReadAt,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::Sync(&mount),
|
&ApiHandler::Sync(&mount),
|
||||||
@ -182,7 +184,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
|||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
println!("Encryption key file: '{:?}'", path);
|
println!("Encryption key file: '{:?}'", path);
|
||||||
let (key, _, fingerprint) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
let (key, _, fingerprint) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
println!("Encryption key fingerprint: '{}'", fingerprint);
|
println!("Encryption key fingerprint: '{}'", fingerprint);
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,8 @@ use crate::{
|
|||||||
record_repository,
|
record_repository,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -239,7 +241,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
let crypt_config = match crypto.enc_key {
|
let crypt_config = match crypto.enc_key {
|
||||||
None => None,
|
None => None,
|
||||||
Some(key) => {
|
Some(key) => {
|
||||||
let (key, _created, _) = decrypt_key(&key.key, &crate::key::get_encryption_key_password)?;
|
let (key, _created, _) = decrypt_key(&key.key, &get_encryption_key_password)?;
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
Some(Arc::new(crypt_config))
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
|
585
src/bin/proxmox_client_tools/key_source.rs
Normal file
@ -0,0 +1,585 @@
|
|||||||
|
use std::convert::TryFrom;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::os::unix::io::{FromRawFd, RawFd};
|
||||||
|
use std::io::Read;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::schema::*;
|
||||||
|
use proxmox::sys::linux::tty;
|
||||||
|
use proxmox::tools::fs::file_get_contents;
|
||||||
|
|
||||||
|
use proxmox_backup::backup::CryptMode;
|
||||||
|
|
||||||
|
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||||
|
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||||
|
|
||||||
|
pub const KEYFILE_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const KEYFD_SCHEMA: Schema =
|
||||||
|
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
|
||||||
|
.minimum(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
|
||||||
|
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
|
||||||
|
.minimum(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub enum KeySource {
|
||||||
|
DefaultKey,
|
||||||
|
Fd,
|
||||||
|
Path(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
|
||||||
|
match source {
|
||||||
|
KeySource::DefaultKey => format!("Using default {} key..", key_type),
|
||||||
|
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
|
||||||
|
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub struct KeyWithSource {
|
||||||
|
pub source: KeySource,
|
||||||
|
pub key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyWithSource {
|
||||||
|
pub fn from_fd(key: Vec<u8>) -> Self {
|
||||||
|
Self {
|
||||||
|
source: KeySource::Fd,
|
||||||
|
key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_default(key: Vec<u8>) -> Self {
|
||||||
|
Self {
|
||||||
|
source: KeySource::DefaultKey,
|
||||||
|
key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_path(path: String, key: Vec<u8>) -> Self {
|
||||||
|
Self {
|
||||||
|
source: KeySource::Path(path),
|
||||||
|
key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
|
pub struct CryptoParams {
|
||||||
|
pub mode: CryptMode,
|
||||||
|
pub enc_key: Option<KeyWithSource>,
|
||||||
|
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
|
||||||
|
pub master_pubkey: Option<KeyWithSource>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
|
||||||
|
do_crypto_parameters(param, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
|
||||||
|
do_crypto_parameters(param, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
|
||||||
|
let keyfile = match param.get("keyfile") {
|
||||||
|
Some(Value::String(keyfile)) => Some(keyfile),
|
||||||
|
Some(_) => bail!("bad --keyfile parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let key_fd = match param.get("keyfd") {
|
||||||
|
Some(Value::Number(key_fd)) => Some(
|
||||||
|
RawFd::try_from(key_fd
|
||||||
|
.as_i64()
|
||||||
|
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
||||||
|
),
|
||||||
|
Some(_) => bail!("bad --keyfd parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let master_pubkey_file = match param.get("master-pubkey-file") {
|
||||||
|
Some(Value::String(keyfile)) => Some(keyfile),
|
||||||
|
Some(_) => bail!("bad --master-pubkey-file parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let master_pubkey_fd = match param.get("master-pubkey-fd") {
|
||||||
|
Some(Value::Number(key_fd)) => Some(
|
||||||
|
RawFd::try_from(key_fd
|
||||||
|
.as_i64()
|
||||||
|
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
|
||||||
|
),
|
||||||
|
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mode: Option<CryptMode> = match param.get("crypt-mode") {
|
||||||
|
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let key = match (keyfile, key_fd) {
|
||||||
|
(None, None) => None,
|
||||||
|
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
||||||
|
(Some(keyfile), None) => Some(KeyWithSource::from_path(
|
||||||
|
keyfile.clone(),
|
||||||
|
file_get_contents(keyfile)?,
|
||||||
|
)),
|
||||||
|
(None, Some(fd)) => {
|
||||||
|
let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
|
||||||
|
let mut data = Vec::new();
|
||||||
|
let _len: usize = input.read_to_end(&mut data).map_err(|err| {
|
||||||
|
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
||||||
|
})?;
|
||||||
|
if keep_keyfd_open {
|
||||||
|
// don't close fd if requested, and try to reset seek position
|
||||||
|
std::mem::forget(input);
|
||||||
|
unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
|
||||||
|
}
|
||||||
|
Some(KeyWithSource::from_fd(data))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
|
||||||
|
(None, None) => None,
|
||||||
|
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
||||||
|
(Some(keyfile), None) => Some(KeyWithSource::from_path(
|
||||||
|
keyfile.clone(),
|
||||||
|
file_get_contents(keyfile)?,
|
||||||
|
)),
|
||||||
|
(None, Some(fd)) => {
|
||||||
|
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
||||||
|
let mut data = Vec::new();
|
||||||
|
let _len: usize = { input }
|
||||||
|
.read_to_end(&mut data)
|
||||||
|
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
|
||||||
|
Some(KeyWithSource::from_fd(data))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = match mode {
|
||||||
|
// no crypt mode, enable encryption if keys are available
|
||||||
|
None => match (key, master_pubkey) {
|
||||||
|
// only default keys if available
|
||||||
|
(None, None) => match read_optional_default_encryption_key()? {
|
||||||
|
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
||||||
|
enc_key => {
|
||||||
|
let master_pubkey = read_optional_default_master_pubkey()?;
|
||||||
|
CryptoParams {
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
enc_key,
|
||||||
|
master_pubkey,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// explicit master key, default enc key needed
|
||||||
|
(None, master_pubkey) => match read_optional_default_encryption_key()? {
|
||||||
|
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
|
||||||
|
enc_key => {
|
||||||
|
CryptoParams {
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
enc_key,
|
||||||
|
master_pubkey,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// explicit keyfile, maybe default master key
|
||||||
|
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
|
||||||
|
|
||||||
|
// explicit keyfile and master key
|
||||||
|
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
|
||||||
|
},
|
||||||
|
|
||||||
|
// explicitly disabled encryption
|
||||||
|
Some(CryptMode::None) => match (key, master_pubkey) {
|
||||||
|
// no keys => OK, no encryption
|
||||||
|
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
||||||
|
|
||||||
|
// --keyfile and --crypt-mode=none
|
||||||
|
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
|
||||||
|
|
||||||
|
// --master-pubkey-file and --crypt-mode=none
|
||||||
|
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
|
||||||
|
},
|
||||||
|
|
||||||
|
// explicitly enabled encryption
|
||||||
|
Some(mode) => match (key, master_pubkey) {
|
||||||
|
// no key, maybe master key
|
||||||
|
(None, master_pubkey) => match read_optional_default_encryption_key()? {
|
||||||
|
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
||||||
|
enc_key => {
|
||||||
|
eprintln!("Encrypting with default encryption key!");
|
||||||
|
let master_pubkey = match master_pubkey {
|
||||||
|
None => read_optional_default_master_pubkey()?,
|
||||||
|
master_pubkey => master_pubkey,
|
||||||
|
};
|
||||||
|
|
||||||
|
CryptoParams {
|
||||||
|
mode,
|
||||||
|
enc_key,
|
||||||
|
master_pubkey,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// --keyfile and --crypt-mode other than none
|
||||||
|
(enc_key, master_pubkey) => {
|
||||||
|
let master_pubkey = match master_pubkey {
|
||||||
|
None => read_optional_default_master_pubkey()?,
|
||||||
|
master_pubkey => master_pubkey,
|
||||||
|
};
|
||||||
|
|
||||||
|
CryptoParams { mode, enc_key, master_pubkey }
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(
|
||||||
|
DEFAULT_MASTER_PUBKEY_FILE_NAME,
|
||||||
|
"default master public key file",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(
|
||||||
|
DEFAULT_MASTER_PUBKEY_FILE_NAME,
|
||||||
|
"default master public key file",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(
|
||||||
|
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
|
||||||
|
"default encryption key file",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(
|
||||||
|
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
|
||||||
|
"default encryption key file",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
|
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
|
||||||
|
find_default_encryption_key()?
|
||||||
|
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
|
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
|
||||||
|
find_default_master_pubkey()?
|
||||||
|
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
|
||||||
|
// not safe when multiple concurrent test cases end up here!
|
||||||
|
unsafe {
|
||||||
|
match &TEST_DEFAULT_ENCRYPTION_KEY {
|
||||||
|
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
||||||
|
Ok(None) => Ok(None),
|
||||||
|
Err(_) => bail!("test error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
// not safe when multiple concurrent test cases end up here!
|
||||||
|
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
|
||||||
|
TEST_DEFAULT_ENCRYPTION_KEY = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
|
||||||
|
// not safe when multiple concurrent test cases end up here!
|
||||||
|
unsafe {
|
||||||
|
match &TEST_DEFAULT_MASTER_PUBKEY {
|
||||||
|
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
||||||
|
Ok(None) => Ok(None),
|
||||||
|
Err(_) => bail!("test error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
// not safe when multiple concurrent test cases end up here!
|
||||||
|
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
|
||||||
|
TEST_DEFAULT_MASTER_PUBKEY = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
|
// fixme: implement other input methods
|
||||||
|
|
||||||
|
use std::env::VarError::*;
|
||||||
|
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
||||||
|
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
||||||
|
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
||||||
|
Err(NotPresent) => {
|
||||||
|
// Try another method
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're on a TTY, query the user for a password
|
||||||
|
if tty::stdin_isatty() {
|
||||||
|
return Ok(tty::read_password("Encryption Key Password: ")?);
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("no password input mechanism available");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
|
||||||
|
// safe w.r.t. concurrency
|
||||||
|
fn test_crypto_parameters_handling() -> Result<(), Error> {
|
||||||
|
use serde_json::json;
|
||||||
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
|
|
||||||
|
let some_key = vec![1;1];
|
||||||
|
let default_key = vec![2;1];
|
||||||
|
|
||||||
|
let some_master_key = vec![3;1];
|
||||||
|
let default_master_key = vec![4;1];
|
||||||
|
|
||||||
|
let keypath = "./target/testout/keyfile.test";
|
||||||
|
let master_keypath = "./target/testout/masterkeyfile.test";
|
||||||
|
let invalid_keypath = "./target/testout/invalid_keyfile.test";
|
||||||
|
|
||||||
|
let no_key_res = CryptoParams {
|
||||||
|
enc_key: None,
|
||||||
|
master_pubkey: None,
|
||||||
|
mode: CryptMode::None,
|
||||||
|
};
|
||||||
|
let some_key_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_path(
|
||||||
|
keypath.to_string(),
|
||||||
|
some_key.clone(),
|
||||||
|
)),
|
||||||
|
master_pubkey: None,
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
};
|
||||||
|
let some_key_some_master_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_path(
|
||||||
|
keypath.to_string(),
|
||||||
|
some_key.clone(),
|
||||||
|
)),
|
||||||
|
master_pubkey: Some(KeyWithSource::from_path(
|
||||||
|
master_keypath.to_string(),
|
||||||
|
some_master_key.clone(),
|
||||||
|
)),
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
};
|
||||||
|
let some_key_default_master_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_path(
|
||||||
|
keypath.to_string(),
|
||||||
|
some_key.clone(),
|
||||||
|
)),
|
||||||
|
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
};
|
||||||
|
|
||||||
|
let some_key_sign_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_path(
|
||||||
|
keypath.to_string(),
|
||||||
|
some_key.clone(),
|
||||||
|
)),
|
||||||
|
master_pubkey: None,
|
||||||
|
mode: CryptMode::SignOnly,
|
||||||
|
};
|
||||||
|
let default_key_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
|
||||||
|
master_pubkey: None,
|
||||||
|
mode: CryptMode::Encrypt,
|
||||||
|
};
|
||||||
|
let default_key_sign_res = CryptoParams {
|
||||||
|
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
|
||||||
|
master_pubkey: None,
|
||||||
|
mode: CryptMode::SignOnly,
|
||||||
|
};
|
||||||
|
|
||||||
|
replace_file(&keypath, &some_key, CreateOptions::default())?;
|
||||||
|
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
|
||||||
|
|
||||||
|
// no params, no default key == no key
|
||||||
|
let res = crypto_parameters(&json!({}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// keyfile param == key from keyfile
|
||||||
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// crypt mode none == no key
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// crypt mode none with explicit key == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
||||||
|
|
||||||
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// invalid keyfile parameter always errors
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// now set a default key
|
||||||
|
unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
|
||||||
|
|
||||||
|
// and repeat
|
||||||
|
|
||||||
|
// no params but default key == default key
|
||||||
|
let res = crypto_parameters(&json!({}));
|
||||||
|
assert_eq!(res.unwrap(), default_key_res);
|
||||||
|
|
||||||
|
// keyfile param == key from keyfile
|
||||||
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// crypt mode none == no key
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
|
||||||
|
assert_eq!(res.unwrap(), default_key_sign_res);
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
|
||||||
|
assert_eq!(res.unwrap(), default_key_res);
|
||||||
|
|
||||||
|
// crypt mode none with explicit key == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
||||||
|
|
||||||
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// invalid keyfile parameter always errors
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// now make default key retrieval error
|
||||||
|
unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
|
||||||
|
|
||||||
|
// and repeat
|
||||||
|
|
||||||
|
// no params, default key retrieval errors == Error
|
||||||
|
assert!(crypto_parameters(&json!({})).is_err());
|
||||||
|
|
||||||
|
// keyfile param == key from keyfile
|
||||||
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// crypt mode none == no key
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// crypt mode none with explicit key == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
||||||
|
|
||||||
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_res);
|
||||||
|
|
||||||
|
// invalid keyfile parameter always errors
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// now remove default key again
|
||||||
|
unsafe { set_test_encryption_key(Ok(None)); }
|
||||||
|
// set a default master key
|
||||||
|
unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
|
||||||
|
|
||||||
|
// and use an explicit master key
|
||||||
|
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
|
||||||
|
// just a default == no key
|
||||||
|
let res = crypto_parameters(&json!({}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// keyfile param == key from keyfile
|
||||||
|
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_some_master_res);
|
||||||
|
// same with fallback to default master key
|
||||||
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_default_master_res);
|
||||||
|
|
||||||
|
// crypt mode none == error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
|
||||||
|
// with just default master key == no key
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
||||||
|
assert_eq!(res.unwrap(), no_key_res);
|
||||||
|
|
||||||
|
// crypt mode encrypt without enc key == error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
// crypt mode none with explicit key == Error
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
||||||
|
|
||||||
|
// crypt mode encrypt with keyfile == key from keyfile with correct mode
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_some_master_res);
|
||||||
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
||||||
|
assert_eq!(res.unwrap(), some_key_default_master_res);
|
||||||
|
|
||||||
|
// invalid master keyfile parameter always errors when a key is passed, even with a valid
|
||||||
|
// default master key
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
|
||||||
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,8 +1,7 @@
|
|||||||
//! Shared tools useful for common CLI clients.
|
//! Shared tools useful for common CLI clients.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
@ -14,9 +13,12 @@ use proxmox::{
|
|||||||
use proxmox_backup::api2::access::user::UserWithTokens;
|
use proxmox_backup::api2::access::user::UserWithTokens;
|
||||||
use proxmox_backup::api2::types::*;
|
use proxmox_backup::api2::types::*;
|
||||||
use proxmox_backup::backup::BackupDir;
|
use proxmox_backup::backup::BackupDir;
|
||||||
|
use proxmox_backup::buildcfg;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
pub mod key_source;
|
||||||
|
|
||||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||||
|
|
||||||
@ -25,24 +27,6 @@ pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
|||||||
.max_length(256)
|
.max_length(256)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const KEYFILE_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const KEYFD_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
|
|
||||||
.minimum(0)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
|
|
||||||
.minimum(0)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
|
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
|
||||||
.minimum(64)
|
.minimum(64)
|
||||||
.maximum(4096)
|
.maximum(4096)
|
||||||
@ -364,3 +348,40 @@ pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec
|
|||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
||||||
|
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience helper for better error messages:
|
||||||
|
pub fn find_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<Option<std::path::PathBuf>, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.map(|base| base.find_config_file(file_name))
|
||||||
|
.with_context(|| format!("error searching for {}", description))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<std::path::PathBuf, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
|
||||||
|
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a runtime dir owned by the current user.
|
||||||
|
/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
|
||||||
|
/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
|
||||||
|
pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
|
||||||
|
let uid = nix::unistd::Uid::current();
|
||||||
|
let mut path: std::path::PathBuf = buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
|
||||||
|
path.push(uid.to_string());
|
||||||
|
tools::create_run_dir()?;
|
||||||
|
std::fs::create_dir_all(&path)?;
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
207
src/bin/proxmox_file_restore/block_driver.rs
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
//! Abstraction layer over different methods of accessing a block backup
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::hash::BuildHasher;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{BackupDir, BackupManifest};
|
||||||
|
use proxmox_backup::api2::types::ArchiveEntry;
|
||||||
|
use proxmox_backup::client::BackupRepository;
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use super::block_driver_qemu::QemuBlockDriver;
|
||||||
|
|
||||||
|
/// Contains details about a snapshot that is to be accessed by block file restore
|
||||||
|
pub struct SnapRestoreDetails {
|
||||||
|
pub repo: BackupRepository,
|
||||||
|
pub snapshot: BackupDir,
|
||||||
|
pub manifest: BackupManifest,
|
||||||
|
pub keyfile: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return value of a BlockRestoreDriver.status() call, 'id' must be valid for .stop(id)
|
||||||
|
pub struct DriverStatus {
|
||||||
|
pub id: String,
|
||||||
|
pub data: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Async<R> = Pin<Box<dyn Future<Output = R> + Send>>;
|
||||||
|
|
||||||
|
/// An abstract implementation for retrieving data out of a block file backup
|
||||||
|
pub trait BlockRestoreDriver {
|
||||||
|
/// List ArchiveEntrys for the given image file and path
|
||||||
|
fn data_list(
|
||||||
|
&self,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
path: Vec<u8>,
|
||||||
|
) -> Async<Result<Vec<ArchiveEntry>, Error>>;
|
||||||
|
|
||||||
|
/// pxar=true:
|
||||||
|
/// Attempt to create a pxar archive of the given file path and return a reader instance for it
|
||||||
|
/// pxar=false:
|
||||||
|
/// Attempt to read the file or folder at the given path and return the file content or a zip
|
||||||
|
/// file as a stream
|
||||||
|
fn data_extract(
|
||||||
|
&self,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
path: Vec<u8>,
|
||||||
|
pxar: bool,
|
||||||
|
) -> Async<Result<Box<dyn tokio::io::AsyncRead + Unpin + Send>, Error>>;
|
||||||
|
|
||||||
|
/// Return status of all running/mapped images, result value is (id, extra data), where id must
|
||||||
|
/// match with the ones returned from list()
|
||||||
|
fn status(&self) -> Async<Result<Vec<DriverStatus>, Error>>;
|
||||||
|
/// Stop/Close a running restore method
|
||||||
|
fn stop(&self, id: String) -> Async<Result<(), Error>>;
|
||||||
|
/// Returned ids must be prefixed with driver type so that they cannot collide between drivers,
|
||||||
|
/// the returned values must be passable to stop()
|
||||||
|
fn list(&self) -> Vec<String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)]
|
||||||
|
pub enum BlockDriverType {
|
||||||
|
/// Uses a small QEMU/KVM virtual machine to map images securely. Requires PVE-patched QEMU.
|
||||||
|
Qemu,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockDriverType {
|
||||||
|
fn resolve(&self) -> impl BlockRestoreDriver {
|
||||||
|
match self {
|
||||||
|
BlockDriverType::Qemu => QemuBlockDriver {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_DRIVER: BlockDriverType = BlockDriverType::Qemu;
|
||||||
|
const ALL_DRIVERS: &[BlockDriverType] = &[BlockDriverType::Qemu];
|
||||||
|
|
||||||
|
pub async fn data_list(
|
||||||
|
driver: Option<BlockDriverType>,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
path: Vec<u8>,
|
||||||
|
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||||
|
let driver = driver.unwrap_or(DEFAULT_DRIVER).resolve();
|
||||||
|
driver.data_list(details, img_file, path).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn data_extract(
|
||||||
|
driver: Option<BlockDriverType>,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
path: Vec<u8>,
|
||||||
|
pxar: bool,
|
||||||
|
) -> Result<Box<dyn tokio::io::AsyncRead + Send + Unpin>, Error> {
|
||||||
|
let driver = driver.unwrap_or(DEFAULT_DRIVER).resolve();
|
||||||
|
driver.data_extract(details, img_file, path, pxar).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"driver": {
|
||||||
|
type: BlockDriverType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Retrieve status information about currently running/mapped restore images
|
||||||
|
pub async fn status(driver: Option<BlockDriverType>, param: Value) -> Result<(), Error> {
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
let text = output_format == "text";
|
||||||
|
|
||||||
|
let mut ret = json!({});
|
||||||
|
|
||||||
|
for dt in ALL_DRIVERS {
|
||||||
|
if driver.is_some() && &driver.unwrap() != dt {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let drv_name = format!("{:?}", dt);
|
||||||
|
let drv = dt.resolve();
|
||||||
|
match drv.status().await {
|
||||||
|
Ok(data) if data.is_empty() => {
|
||||||
|
if text {
|
||||||
|
println!("{}: no mappings", drv_name);
|
||||||
|
} else {
|
||||||
|
ret[drv_name] = json!({});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(data) => {
|
||||||
|
if text {
|
||||||
|
println!("{}:", &drv_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret[&drv_name]["ids"] = json!({});
|
||||||
|
for status in data {
|
||||||
|
if text {
|
||||||
|
println!("{} \t({})", status.id, status.data);
|
||||||
|
} else {
|
||||||
|
ret[&drv_name]["ids"][status.id] = status.data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
if text {
|
||||||
|
eprintln!("error getting status from driver '{}' - {}", drv_name, err);
|
||||||
|
} else {
|
||||||
|
ret[drv_name] = json!({ "error": format!("{}", err) });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !text {
|
||||||
|
format_and_print_result(&ret, &output_format);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"name": {
|
||||||
|
type: String,
|
||||||
|
description: "The name of the VM to stop.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Immediately stop/unmap a given image. Not typically necessary, as VMs will stop themselves
|
||||||
|
/// after a timer anyway.
|
||||||
|
pub async fn stop(name: String) -> Result<(), Error> {
|
||||||
|
for drv in ALL_DRIVERS.iter().map(BlockDriverType::resolve) {
|
||||||
|
if drv.list().contains(&name) {
|
||||||
|
return drv.stop(name).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("no mapping with name '{}' found", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Autocompletion handler for block mappings
|
||||||
|
pub fn complete_block_driver_ids<S: BuildHasher>(
|
||||||
|
_arg: &str,
|
||||||
|
_param: &HashMap<String, String, S>,
|
||||||
|
) -> Vec<String> {
|
||||||
|
ALL_DRIVERS
|
||||||
|
.iter()
|
||||||
|
.map(BlockDriverType::resolve)
|
||||||
|
.map(|d| d.list())
|
||||||
|
.flatten()
|
||||||
|
.collect()
|
||||||
|
}
|
330
src/bin/proxmox_file_restore/block_driver_qemu.rs
Normal file
@ -0,0 +1,330 @@
|
|||||||
|
//! Block file access via a small QEMU restore VM using the PBS block driver in QEMU
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use futures::FutureExt;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::{File, OpenOptions};
|
||||||
|
use std::io::{prelude::*, SeekFrom};
|
||||||
|
|
||||||
|
use proxmox::tools::fs::lock_file;
|
||||||
|
use proxmox_backup::api2::types::ArchiveEntry;
|
||||||
|
use proxmox_backup::backup::BackupDir;
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use super::block_driver::*;
|
||||||
|
use crate::proxmox_client_tools::get_user_run_dir;
|
||||||
|
|
||||||
|
const RESTORE_VM_MAP: &str = "restore-vm-map.json";
|
||||||
|
|
||||||
|
pub struct QemuBlockDriver {}
|
||||||
|
|
||||||
|
#[derive(Clone, Hash, Serialize, Deserialize)]
|
||||||
|
struct VMState {
|
||||||
|
pid: i32,
|
||||||
|
cid: i32,
|
||||||
|
ticket: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct VMStateMap {
|
||||||
|
map: HashMap<String, VMState>,
|
||||||
|
file: File,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VMStateMap {
|
||||||
|
fn open_file_raw(write: bool) -> Result<File, Error> {
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
let mut path = get_user_run_dir()?;
|
||||||
|
path.push(RESTORE_VM_MAP);
|
||||||
|
OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(write)
|
||||||
|
.create(write)
|
||||||
|
.mode(0o600)
|
||||||
|
.open(path)
|
||||||
|
.map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Acquire a lock on the state map and retrieve a deserialized version
|
||||||
|
fn load() -> Result<Self, Error> {
|
||||||
|
let mut file = Self::open_file_raw(true)?;
|
||||||
|
lock_file(&mut file, true, Some(std::time::Duration::from_secs(5)))?;
|
||||||
|
let map = serde_json::from_reader(&file).unwrap_or_default();
|
||||||
|
Ok(Self { map, file })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load a read-only copy of the current VM map. Only use for informational purposes, like
|
||||||
|
/// shell auto-completion, for anything requiring consistency use load() !
|
||||||
|
fn load_read_only() -> Result<HashMap<String, VMState>, Error> {
|
||||||
|
let file = Self::open_file_raw(false)?;
|
||||||
|
Ok(serde_json::from_reader(&file).unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write back a potentially modified state map, consuming the held lock
|
||||||
|
fn write(mut self) -> Result<(), Error> {
|
||||||
|
self.file.seek(SeekFrom::Start(0))?;
|
||||||
|
self.file.set_len(0)?;
|
||||||
|
serde_json::to_writer(self.file, &self.map)?;
|
||||||
|
|
||||||
|
// drop ourselves including file lock
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the map, but drop the lock immediately
|
||||||
|
fn read_only(self) -> HashMap<String, VMState> {
|
||||||
|
self.map
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_name(repo: &BackupRepository, snap: &BackupDir) -> String {
|
||||||
|
let full = format!("qemu_{}/{}", repo, snap);
|
||||||
|
tools::systemd::escape_unit(&full, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// remove non-responsive VMs from given map, returns 'true' if map was modified
|
||||||
|
async fn cleanup_map(map: &mut HashMap<String, VMState>) -> bool {
|
||||||
|
let mut to_remove = Vec::new();
|
||||||
|
for (name, state) in map.iter() {
|
||||||
|
let client = VsockClient::new(state.cid, DEFAULT_VSOCK_PORT, Some(state.ticket.clone()));
|
||||||
|
let res = client
|
||||||
|
.get("api2/json/status", Some(json!({"keep-timeout": true})))
|
||||||
|
.await;
|
||||||
|
if res.is_err() {
|
||||||
|
// VM is not reachable, remove from map and inform user
|
||||||
|
to_remove.push(name.clone());
|
||||||
|
eprintln!(
|
||||||
|
"VM '{}' (pid: {}, cid: {}) was not reachable, removing from map",
|
||||||
|
name, state.pid, state.cid
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for tr in &to_remove {
|
||||||
|
map.remove(tr);
|
||||||
|
}
|
||||||
|
|
||||||
|
!to_remove.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_ticket() -> String {
|
||||||
|
proxmox::tools::Uuid::generate().to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_running(details: &SnapRestoreDetails) -> Result<VsockClient, Error> {
|
||||||
|
let name = make_name(&details.repo, &details.snapshot);
|
||||||
|
let mut state = VMStateMap::load()?;
|
||||||
|
|
||||||
|
cleanup_map(&mut state.map).await;
|
||||||
|
|
||||||
|
let new_cid;
|
||||||
|
let vms = match state.map.get(&name) {
|
||||||
|
Some(vm) => {
|
||||||
|
let client = VsockClient::new(vm.cid, DEFAULT_VSOCK_PORT, Some(vm.ticket.clone()));
|
||||||
|
let res = client.get("api2/json/status", None).await;
|
||||||
|
match res {
|
||||||
|
Ok(_) => {
|
||||||
|
// VM is running and we just reset its timeout, nothing to do
|
||||||
|
return Ok(client);
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("stale VM detected, restarting ({})", err);
|
||||||
|
// VM is dead, restart
|
||||||
|
let vms = start_vm(vm.cid, details).await?;
|
||||||
|
new_cid = vms.cid;
|
||||||
|
state.map.insert(name, vms.clone());
|
||||||
|
vms
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let mut cid = state
|
||||||
|
.map
|
||||||
|
.iter()
|
||||||
|
.map(|v| v.1.cid)
|
||||||
|
.max()
|
||||||
|
.unwrap_or(0)
|
||||||
|
.wrapping_add(1);
|
||||||
|
|
||||||
|
// offset cid by user id, to avoid unneccessary retries
|
||||||
|
let running_uid = nix::unistd::Uid::current();
|
||||||
|
cid = cid.wrapping_add(running_uid.as_raw() as i32);
|
||||||
|
|
||||||
|
// some low CIDs have special meaning, start at 10 to avoid them
|
||||||
|
cid = cid.max(10);
|
||||||
|
|
||||||
|
let vms = start_vm(cid, details).await?;
|
||||||
|
new_cid = vms.cid;
|
||||||
|
state.map.insert(name, vms.clone());
|
||||||
|
vms
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
state.write()?;
|
||||||
|
Ok(VsockClient::new(
|
||||||
|
new_cid,
|
||||||
|
DEFAULT_VSOCK_PORT,
|
||||||
|
Some(vms.ticket.clone()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_vm(cid_request: i32, details: &SnapRestoreDetails) -> Result<VMState, Error> {
|
||||||
|
let ticket = new_ticket();
|
||||||
|
let files = details
|
||||||
|
.manifest
|
||||||
|
.files()
|
||||||
|
.iter()
|
||||||
|
.map(|file| file.filename.clone())
|
||||||
|
.filter(|name| name.ends_with(".img.fidx"));
|
||||||
|
let (pid, cid) =
|
||||||
|
super::qemu_helper::start_vm((cid_request.abs() & 0xFFFF) as u16, details, files, &ticket)
|
||||||
|
.await?;
|
||||||
|
Ok(VMState { pid, cid, ticket })
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockRestoreDriver for QemuBlockDriver {
|
||||||
|
fn data_list(
|
||||||
|
&self,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
mut path: Vec<u8>,
|
||||||
|
) -> Async<Result<Vec<ArchiveEntry>, Error>> {
|
||||||
|
async move {
|
||||||
|
let client = ensure_running(&details).await?;
|
||||||
|
if !path.is_empty() && path[0] != b'/' {
|
||||||
|
path.insert(0, b'/');
|
||||||
|
}
|
||||||
|
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||||
|
let mut result = client
|
||||||
|
.get("api2/json/list", Some(json!({ "path": path })))
|
||||||
|
.await?;
|
||||||
|
serde_json::from_value(result["data"].take()).map_err(|err| err.into())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn data_extract(
|
||||||
|
&self,
|
||||||
|
details: SnapRestoreDetails,
|
||||||
|
img_file: String,
|
||||||
|
mut path: Vec<u8>,
|
||||||
|
pxar: bool,
|
||||||
|
) -> Async<Result<Box<dyn tokio::io::AsyncRead + Unpin + Send>, Error>> {
|
||||||
|
async move {
|
||||||
|
let client = ensure_running(&details).await?;
|
||||||
|
if !path.is_empty() && path[0] != b'/' {
|
||||||
|
path.insert(0, b'/');
|
||||||
|
}
|
||||||
|
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||||
|
let (mut tx, rx) = tokio::io::duplex(1024 * 4096);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(err) = client
|
||||||
|
.download(
|
||||||
|
"api2/json/extract",
|
||||||
|
Some(json!({ "path": path, "pxar": pxar })),
|
||||||
|
&mut tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!("reading file extraction stream failed - {}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Box::new(rx) as Box<dyn tokio::io::AsyncRead + Unpin + Send>)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn status(&self) -> Async<Result<Vec<DriverStatus>, Error>> {
|
||||||
|
async move {
|
||||||
|
let mut state_map = VMStateMap::load()?;
|
||||||
|
let modified = cleanup_map(&mut state_map.map).await;
|
||||||
|
let map = if modified {
|
||||||
|
let m = state_map.map.clone();
|
||||||
|
state_map.write()?;
|
||||||
|
m
|
||||||
|
} else {
|
||||||
|
state_map.read_only()
|
||||||
|
};
|
||||||
|
let mut result = Vec::new();
|
||||||
|
|
||||||
|
for (n, s) in map.iter() {
|
||||||
|
let client = VsockClient::new(s.cid, DEFAULT_VSOCK_PORT, Some(s.ticket.clone()));
|
||||||
|
let resp = client
|
||||||
|
.get("api2/json/status", Some(json!({"keep-timeout": true})))
|
||||||
|
.await;
|
||||||
|
let name = tools::systemd::unescape_unit(n)
|
||||||
|
.unwrap_or_else(|_| "<invalid name>".to_owned());
|
||||||
|
let mut extra = json!({"pid": s.pid, "cid": s.cid});
|
||||||
|
|
||||||
|
match resp {
|
||||||
|
Ok(status) => match status["data"].as_object() {
|
||||||
|
Some(map) => {
|
||||||
|
for (k, v) in map.iter() {
|
||||||
|
extra[k] = v.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let err = format!(
|
||||||
|
"invalid JSON received from /status call: {}",
|
||||||
|
status.to_string()
|
||||||
|
);
|
||||||
|
extra["error"] = json!(err);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
let err = format!("error during /status API call: {}", err);
|
||||||
|
extra["error"] = json!(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.push(DriverStatus {
|
||||||
|
id: name,
|
||||||
|
data: extra,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self, id: String) -> Async<Result<(), Error>> {
|
||||||
|
async move {
|
||||||
|
let name = tools::systemd::escape_unit(&id, false);
|
||||||
|
let mut map = VMStateMap::load()?;
|
||||||
|
let map_mod = cleanup_map(&mut map.map).await;
|
||||||
|
match map.map.get(&name) {
|
||||||
|
Some(state) => {
|
||||||
|
let client =
|
||||||
|
VsockClient::new(state.cid, DEFAULT_VSOCK_PORT, Some(state.ticket.clone()));
|
||||||
|
// ignore errors, this either fails because:
|
||||||
|
// * the VM is unreachable/dead, in which case we don't want it in the map
|
||||||
|
// * the call was successful and the connection reset when the VM stopped
|
||||||
|
let _ = client.get("api2/json/stop", None).await;
|
||||||
|
map.map.remove(&name);
|
||||||
|
map.write()?;
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
if map_mod {
|
||||||
|
map.write()?;
|
||||||
|
}
|
||||||
|
bail!("VM with name '{}' not found", name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list(&self) -> Vec<String> {
|
||||||
|
match VMStateMap::load_read_only() {
|
||||||
|
Ok(state) => state
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(name, _)| tools::systemd::unescape_unit(&name).ok())
|
||||||
|
.collect(),
|
||||||
|
Err(_) => Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
6
src/bin/proxmox_file_restore/mod.rs
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
//! Block device drivers and tools for single file restore
|
||||||
|
pub mod block_driver;
|
||||||
|
pub use block_driver::*;
|
||||||
|
|
||||||
|
mod qemu_helper;
|
||||||
|
mod block_driver_qemu;
|
276
src/bin/proxmox_file_restore/qemu_helper.rs
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
//! Helper to start a QEMU VM for single file restore.
|
||||||
|
use std::fs::{File, OpenOptions};
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use tokio::time;
|
||||||
|
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
|
||||||
|
use proxmox::tools::{
|
||||||
|
fd::Fd,
|
||||||
|
fs::{create_path, file_read_string, make_tmp_file, CreateOptions},
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::backup::backup_user;
|
||||||
|
use proxmox_backup::client::{VsockClient, DEFAULT_VSOCK_PORT};
|
||||||
|
use proxmox_backup::{buildcfg, tools};
|
||||||
|
|
||||||
|
use super::SnapRestoreDetails;
|
||||||
|
|
||||||
|
const PBS_VM_NAME: &str = "pbs-restore-vm";
|
||||||
|
const MAX_CID_TRIES: u64 = 32;
|
||||||
|
|
||||||
|
fn create_restore_log_dir() -> Result<String, Error> {
|
||||||
|
let logpath = format!("{}/file-restore", buildcfg::PROXMOX_BACKUP_LOG_DIR);
|
||||||
|
|
||||||
|
proxmox::try_block!({
|
||||||
|
let backup_user = backup_user()?;
|
||||||
|
let opts = CreateOptions::new()
|
||||||
|
.owner(backup_user.uid)
|
||||||
|
.group(backup_user.gid);
|
||||||
|
|
||||||
|
let opts_root = CreateOptions::new()
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(nix::unistd::Gid::from_raw(0));
|
||||||
|
|
||||||
|
create_path(buildcfg::PROXMOX_BACKUP_LOG_DIR, None, Some(opts))?;
|
||||||
|
create_path(&logpath, None, Some(opts_root))?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.map_err(|err: Error| format_err!("unable to create file-restore log dir - {}", err))?;
|
||||||
|
|
||||||
|
Ok(logpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_img_existance() -> Result<(), Error> {
|
||||||
|
let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN);
|
||||||
|
let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN);
|
||||||
|
if !kernel.exists() || !initramfs.exists() {
|
||||||
|
bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_kill_vm(pid: i32) -> Result<(), Error> {
|
||||||
|
let pid = Pid::from_raw(pid);
|
||||||
|
if let Ok(()) = kill(pid, None) {
|
||||||
|
// process is running (and we could kill it), check if it is actually ours
|
||||||
|
// (if it errors assume we raced with the process's death and ignore it)
|
||||||
|
if let Ok(cmdline) = file_read_string(format!("/proc/{}/cmdline", pid)) {
|
||||||
|
if cmdline.split('\0').any(|a| a == PBS_VM_NAME) {
|
||||||
|
// yes, it's ours, kill it brutally with SIGKILL, no reason to take
|
||||||
|
// any chances - in this state it's most likely broken anyway
|
||||||
|
if let Err(err) = kill(pid, Signal::SIGKILL) {
|
||||||
|
bail!(
|
||||||
|
"reaping broken VM (pid {}) with SIGKILL failed: {}",
|
||||||
|
pid,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_temp_initramfs(ticket: &str) -> Result<(Fd, String), Error> {
|
||||||
|
use std::ffi::CString;
|
||||||
|
use tokio::fs::File;
|
||||||
|
|
||||||
|
let (tmp_fd, tmp_path) =
|
||||||
|
make_tmp_file("/tmp/file-restore-qemu.initramfs.tmp", CreateOptions::new())?;
|
||||||
|
nix::unistd::unlink(&tmp_path)?;
|
||||||
|
tools::fd_change_cloexec(tmp_fd.0, false)?;
|
||||||
|
|
||||||
|
let mut f = File::from_std(unsafe { std::fs::File::from_raw_fd(tmp_fd.0) });
|
||||||
|
let mut base = File::open(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN).await?;
|
||||||
|
|
||||||
|
tokio::io::copy(&mut base, &mut f).await?;
|
||||||
|
|
||||||
|
let name = CString::new("ticket").unwrap();
|
||||||
|
tools::cpio::append_file(
|
||||||
|
&mut f,
|
||||||
|
ticket.as_bytes(),
|
||||||
|
&name,
|
||||||
|
0,
|
||||||
|
(libc::S_IFREG | 0o400) as u16,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
ticket.len() as u32,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
tools::cpio::append_trailer(&mut f).await?;
|
||||||
|
|
||||||
|
// forget the tokio file, we close the file descriptor via the returned Fd
|
||||||
|
std::mem::forget(f);
|
||||||
|
|
||||||
|
let path = format!("/dev/fd/{}", &tmp_fd.0);
|
||||||
|
Ok((tmp_fd, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_vm(
|
||||||
|
// u16 so we can do wrapping_add without going too high
|
||||||
|
mut cid: u16,
|
||||||
|
details: &SnapRestoreDetails,
|
||||||
|
files: impl Iterator<Item = String>,
|
||||||
|
ticket: &str,
|
||||||
|
) -> Result<(i32, i32), Error> {
|
||||||
|
validate_img_existance()?;
|
||||||
|
|
||||||
|
if let Err(_) = std::env::var("PBS_PASSWORD") {
|
||||||
|
bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
|
||||||
|
}
|
||||||
|
|
||||||
|
let pid;
|
||||||
|
let (pid_fd, pid_path) = make_tmp_file("/tmp/file-restore-qemu.pid.tmp", CreateOptions::new())?;
|
||||||
|
nix::unistd::unlink(&pid_path)?;
|
||||||
|
tools::fd_change_cloexec(pid_fd.0, false)?;
|
||||||
|
|
||||||
|
let (_ramfs_pid, ramfs_path) = create_temp_initramfs(ticket).await?;
|
||||||
|
|
||||||
|
let logpath = create_restore_log_dir()?;
|
||||||
|
let logfile = &format!("{}/qemu.log", logpath);
|
||||||
|
let mut logrotate = tools::logrotate::LogRotate::new(logfile, false)
|
||||||
|
.ok_or_else(|| format_err!("could not get QEMU log file names"))?;
|
||||||
|
|
||||||
|
if let Err(err) = logrotate.do_rotate(CreateOptions::default(), Some(16)) {
|
||||||
|
eprintln!("warning: logrotate for QEMU log file failed - {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut logfd = OpenOptions::new()
|
||||||
|
.append(true)
|
||||||
|
.create_new(true)
|
||||||
|
.open(logfile)?;
|
||||||
|
tools::fd_change_cloexec(logfd.as_raw_fd(), false)?;
|
||||||
|
|
||||||
|
// preface log file with start timestamp so one can see how long QEMU took to start
|
||||||
|
writeln!(logfd, "[{}] PBS file restore VM log", {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
proxmox::tools::time::epoch_to_rfc3339(now)?
|
||||||
|
},)?;
|
||||||
|
|
||||||
|
let base_args = [
|
||||||
|
"-chardev",
|
||||||
|
&format!(
|
||||||
|
"file,id=log,path=/dev/null,logfile=/dev/fd/{},logappend=on",
|
||||||
|
logfd.as_raw_fd()
|
||||||
|
),
|
||||||
|
"-serial",
|
||||||
|
"chardev:log",
|
||||||
|
"-vnc",
|
||||||
|
"none",
|
||||||
|
"-enable-kvm",
|
||||||
|
"-m",
|
||||||
|
"512",
|
||||||
|
"-kernel",
|
||||||
|
buildcfg::PROXMOX_BACKUP_KERNEL_FN,
|
||||||
|
"-initrd",
|
||||||
|
&ramfs_path,
|
||||||
|
"-append",
|
||||||
|
"quiet",
|
||||||
|
"-daemonize",
|
||||||
|
"-pidfile",
|
||||||
|
&format!("/dev/fd/{}", pid_fd.as_raw_fd()),
|
||||||
|
"-name",
|
||||||
|
PBS_VM_NAME,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Generate drive arguments for all fidx files in backup snapshot
|
||||||
|
let mut drives = Vec::new();
|
||||||
|
let mut id = 0;
|
||||||
|
for file in files {
|
||||||
|
if !file.ends_with(".img.fidx") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
drives.push("-drive".to_owned());
|
||||||
|
let keyfile = if let Some(ref keyfile) = details.keyfile {
|
||||||
|
format!(",,keyfile={}", keyfile)
|
||||||
|
} else {
|
||||||
|
"".to_owned()
|
||||||
|
};
|
||||||
|
drives.push(format!(
|
||||||
|
"file=pbs:repository={},,snapshot={},,archive={}{},read-only=on,if=none,id=drive{}",
|
||||||
|
details.repo, details.snapshot, file, keyfile, id
|
||||||
|
));
|
||||||
|
drives.push("-device".to_owned());
|
||||||
|
// drive serial is used by VM to map .fidx files to /dev paths
|
||||||
|
drives.push(format!("virtio-blk-pci,drive=drive{},serial={}", id, file));
|
||||||
|
id += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
|
||||||
|
let mut attempts = 0;
|
||||||
|
loop {
|
||||||
|
let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64");
|
||||||
|
qemu_cmd.args(base_args.iter());
|
||||||
|
qemu_cmd.args(&drives);
|
||||||
|
qemu_cmd.arg("-device");
|
||||||
|
qemu_cmd.arg(format!(
|
||||||
|
"vhost-vsock-pci,guest-cid={},disable-legacy=on",
|
||||||
|
cid
|
||||||
|
));
|
||||||
|
|
||||||
|
qemu_cmd.stdout(std::process::Stdio::null());
|
||||||
|
qemu_cmd.stderr(std::process::Stdio::piped());
|
||||||
|
|
||||||
|
let res = tokio::task::block_in_place(|| qemu_cmd.spawn()?.wait_with_output())?;
|
||||||
|
|
||||||
|
if res.status.success() {
|
||||||
|
// at this point QEMU is already daemonized and running, so if anything fails we
|
||||||
|
// technically leave behind a zombie-VM... this shouldn't matter, as it will stop
|
||||||
|
// itself soon enough (timer), and the following operations are unlikely to fail
|
||||||
|
let mut pid_file = unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) };
|
||||||
|
std::mem::forget(pid_fd); // FD ownership is now in pid_fd/File
|
||||||
|
let mut pidstr = String::new();
|
||||||
|
pid_file.read_to_string(&mut pidstr)?;
|
||||||
|
pid = pidstr.trim_end().parse().map_err(|err| {
|
||||||
|
format_err!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr, err)
|
||||||
|
})?;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
let out = String::from_utf8_lossy(&res.stderr);
|
||||||
|
if out.contains("unable to set guest cid: Address already in use") {
|
||||||
|
attempts += 1;
|
||||||
|
if attempts >= MAX_CID_TRIES {
|
||||||
|
bail!("CID '{}' in use, but max attempts reached, aborting", cid);
|
||||||
|
}
|
||||||
|
// CID in use, try next higher one
|
||||||
|
eprintln!("CID '{}' in use by other VM, attempting next one", cid);
|
||||||
|
// skip special-meaning low values
|
||||||
|
cid = cid.wrapping_add(1).max(10);
|
||||||
|
} else {
|
||||||
|
eprint!("{}", out);
|
||||||
|
bail!("Starting VM failed. See output above for more information.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// QEMU has started successfully, now wait for virtio socket to become ready
|
||||||
|
let pid_t = Pid::from_raw(pid);
|
||||||
|
for _ in 0..60 {
|
||||||
|
let client = VsockClient::new(cid as i32, DEFAULT_VSOCK_PORT, Some(ticket.to_owned()));
|
||||||
|
if let Ok(Ok(_)) =
|
||||||
|
time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await
|
||||||
|
{
|
||||||
|
return Ok((pid, cid as i32));
|
||||||
|
}
|
||||||
|
if kill(pid_t, None).is_err() {
|
||||||
|
// QEMU exited
|
||||||
|
bail!("VM exited before connection could be established");
|
||||||
|
}
|
||||||
|
time::sleep(Duration::from_millis(200)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// start failed
|
||||||
|
if let Err(err) = try_kill_vm(pid) {
|
||||||
|
eprintln!("killing failed VM failed: {}", err);
|
||||||
|
}
|
||||||
|
bail!("starting VM timed out");
|
||||||
|
}
|
370
src/bin/proxmox_restore_daemon/api.rs
Normal file
@ -0,0 +1,370 @@
|
|||||||
|
///! File-restore API running inside the restore VM
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use futures::FutureExt;
|
||||||
|
use hyper::http::request::Parts;
|
||||||
|
use hyper::{header, Body, Response, StatusCode};
|
||||||
|
use log::error;
|
||||||
|
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::fs;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment,
|
||||||
|
SubdirMap,
|
||||||
|
};
|
||||||
|
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
|
use proxmox_backup::api2::types::*;
|
||||||
|
use proxmox_backup::backup::DirEntryAttribute;
|
||||||
|
use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
|
||||||
|
use proxmox_backup::tools::{self, fs::read_subdir, zip::zip_directory};
|
||||||
|
|
||||||
|
use pxar::encoder::aio::TokioWriter;
|
||||||
|
|
||||||
|
use super::{disk::ResolveResult, watchdog_remaining, watchdog_ping};
|
||||||
|
|
||||||
|
// NOTE: All API endpoints must have Permission::Superuser, as the configs for authentication do
|
||||||
|
// not exist within the restore VM. Safety is guaranteed by checking a ticket via a custom ApiAuth.
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("extract", &Router::new().get(&API_METHOD_EXTRACT)),
|
||||||
|
("list", &Router::new().get(&API_METHOD_LIST)),
|
||||||
|
("status", &Router::new().get(&API_METHOD_STATUS)),
|
||||||
|
("stop", &Router::new().get(&API_METHOD_STOP)),
|
||||||
|
];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
fn read_uptime() -> Result<f32, Error> {
|
||||||
|
let uptime = fs::read_to_string("/proc/uptime")?;
|
||||||
|
// unwrap the Option, if /proc/uptime is empty we have bigger problems
|
||||||
|
Ok(uptime.split_ascii_whitespace().next().unwrap().parse()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"keep-timeout": {
|
||||||
|
type: bool,
|
||||||
|
description: "If true, do not reset the watchdog timer on this API call.",
|
||||||
|
default: false,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Permissions are handled outside restore VM. This call can be made without a ticket, but keep-timeout is always assumed 'true' then.",
|
||||||
|
permission: &Permission::World,
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: RestoreDaemonStatus,
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// General status information
|
||||||
|
fn status(rpcenv: &mut dyn RpcEnvironment, keep_timeout: bool) -> Result<RestoreDaemonStatus, Error> {
|
||||||
|
if !keep_timeout && rpcenv.get_auth_id().is_some() {
|
||||||
|
watchdog_ping();
|
||||||
|
}
|
||||||
|
Ok(RestoreDaemonStatus {
|
||||||
|
uptime: read_uptime()? as i64,
|
||||||
|
timeout: watchdog_remaining(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
access: {
|
||||||
|
description: "Permissions are handled outside restore VM.",
|
||||||
|
permission: &Permission::Superuser,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Stop the restore VM immediately, this will never return if successful
|
||||||
|
fn stop() {
|
||||||
|
use nix::sys::reboot;
|
||||||
|
println!("/stop called, shutting down");
|
||||||
|
let err = reboot::reboot(reboot::RebootMode::RB_POWER_OFF).unwrap_err();
|
||||||
|
println!("'reboot' syscall failed: {}", err);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_dir_entry(path: &Path) -> Result<DirEntryAttribute, Error> {
|
||||||
|
use nix::sys::stat;
|
||||||
|
|
||||||
|
let stat = stat::stat(path)?;
|
||||||
|
Ok(match stat.st_mode & libc::S_IFMT {
|
||||||
|
libc::S_IFREG => DirEntryAttribute::File {
|
||||||
|
size: stat.st_size as u64,
|
||||||
|
mtime: stat.st_mtime,
|
||||||
|
},
|
||||||
|
libc::S_IFDIR => DirEntryAttribute::Directory { start: 0 },
|
||||||
|
_ => bail!("unsupported file type: {}", stat.st_mode),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"path": {
|
||||||
|
type: String,
|
||||||
|
description: "base64-encoded path to list files and directories under",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Permissions are handled outside restore VM.",
|
||||||
|
permission: &Permission::Superuser,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List file details for given file or a list of files and directories under the given path if it
|
||||||
|
/// points to a directory.
|
||||||
|
fn list(
|
||||||
|
path: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||||
|
watchdog_ping();
|
||||||
|
|
||||||
|
let mut res = Vec::new();
|
||||||
|
|
||||||
|
let param_path = base64::decode(path)?;
|
||||||
|
let mut path = param_path.clone();
|
||||||
|
if let Some(b'/') = path.last() {
|
||||||
|
path.pop();
|
||||||
|
}
|
||||||
|
let path_str = OsStr::from_bytes(&path[..]);
|
||||||
|
let param_path_buf = Path::new(path_str);
|
||||||
|
|
||||||
|
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
||||||
|
let query_result = disk_state.resolve(¶m_path_buf)?;
|
||||||
|
|
||||||
|
match query_result {
|
||||||
|
ResolveResult::Path(vm_path) => {
|
||||||
|
let root_entry = get_dir_entry(&vm_path)?;
|
||||||
|
match root_entry {
|
||||||
|
DirEntryAttribute::File { .. } => {
|
||||||
|
// list on file, return details
|
||||||
|
res.push(ArchiveEntry::new(¶m_path, Some(&root_entry)));
|
||||||
|
}
|
||||||
|
DirEntryAttribute::Directory { .. } => {
|
||||||
|
// list on directory, return all contained files/dirs
|
||||||
|
for f in read_subdir(libc::AT_FDCWD, &vm_path)? {
|
||||||
|
if let Ok(f) = f {
|
||||||
|
let name = f.file_name().to_bytes();
|
||||||
|
let path = &Path::new(OsStr::from_bytes(name));
|
||||||
|
if path.components().count() == 1 {
|
||||||
|
// ignore '.' and '..'
|
||||||
|
match path.components().next().unwrap() {
|
||||||
|
std::path::Component::CurDir
|
||||||
|
| std::path::Component::ParentDir => continue,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut full_vm_path = PathBuf::new();
|
||||||
|
full_vm_path.push(&vm_path);
|
||||||
|
full_vm_path.push(path);
|
||||||
|
let mut full_path = PathBuf::new();
|
||||||
|
full_path.push(param_path_buf);
|
||||||
|
full_path.push(path);
|
||||||
|
|
||||||
|
let entry = get_dir_entry(&full_vm_path);
|
||||||
|
if let Ok(entry) = entry {
|
||||||
|
res.push(ArchiveEntry::new(
|
||||||
|
full_path.as_os_str().as_bytes(),
|
||||||
|
Some(&entry),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ResolveResult::BucketTypes(types) => {
|
||||||
|
for t in types {
|
||||||
|
let mut t_path = path.clone();
|
||||||
|
t_path.push(b'/');
|
||||||
|
t_path.extend(t.as_bytes());
|
||||||
|
res.push(ArchiveEntry::new(
|
||||||
|
&t_path[..],
|
||||||
|
None,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ResolveResult::BucketComponents(comps) => {
|
||||||
|
for c in comps {
|
||||||
|
let mut c_path = path.clone();
|
||||||
|
c_path.push(b'/');
|
||||||
|
c_path.extend(c.as_bytes());
|
||||||
|
res.push(ArchiveEntry::new(
|
||||||
|
&c_path[..],
|
||||||
|
// this marks the beginning of a filesystem, i.e. '/', so this is a Directory
|
||||||
|
Some(&DirEntryAttribute::Directory { start: 0 }),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_EXTRACT: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::AsyncHttp(&extract),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Extract a file or directory from the VM as a pxar archive.",
|
||||||
|
&sorted!([
|
||||||
|
(
|
||||||
|
"path",
|
||||||
|
false,
|
||||||
|
&StringSchema::new("base64-encoded path to list files and directories under")
|
||||||
|
.schema()
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"pxar",
|
||||||
|
true,
|
||||||
|
&BooleanSchema::new(concat!(
|
||||||
|
"if true, return a pxar archive, otherwise either the ",
|
||||||
|
"file content or the directory as a zip file"
|
||||||
|
))
|
||||||
|
.default(true)
|
||||||
|
.schema()
|
||||||
|
)
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.access(None, &Permission::Superuser);
|
||||||
|
|
||||||
|
fn extract(
|
||||||
|
_parts: Parts,
|
||||||
|
_req_body: Body,
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: Box<dyn RpcEnvironment>,
|
||||||
|
) -> ApiResponseFuture {
|
||||||
|
watchdog_ping();
|
||||||
|
async move {
|
||||||
|
let path = tools::required_string_param(¶m, "path")?;
|
||||||
|
let mut path = base64::decode(path)?;
|
||||||
|
if let Some(b'/') = path.last() {
|
||||||
|
path.pop();
|
||||||
|
}
|
||||||
|
let path = Path::new(OsStr::from_bytes(&path[..]));
|
||||||
|
|
||||||
|
let pxar = param["pxar"].as_bool().unwrap_or(true);
|
||||||
|
|
||||||
|
let query_result = {
|
||||||
|
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
||||||
|
disk_state.resolve(&path)?
|
||||||
|
};
|
||||||
|
|
||||||
|
let vm_path = match query_result {
|
||||||
|
ResolveResult::Path(vm_path) => vm_path,
|
||||||
|
_ => bail!("invalid path, cannot restore meta-directory: {:?}", path),
|
||||||
|
};
|
||||||
|
|
||||||
|
// check here so we can return a real error message, failing in the async task will stop
|
||||||
|
// the transfer, but not return a useful message
|
||||||
|
if !vm_path.exists() {
|
||||||
|
bail!("file or directory {:?} does not exist", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (mut writer, reader) = tokio::io::duplex(1024 * 64);
|
||||||
|
|
||||||
|
if pxar {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let result = async move {
|
||||||
|
// pxar always expects a directory as it's root, so to accommodate files as
|
||||||
|
// well we encode the parent dir with a filter only matching the target instead
|
||||||
|
let mut patterns = vec![MatchEntry::new(
|
||||||
|
MatchPattern::Pattern(Pattern::path(b"*").unwrap()),
|
||||||
|
MatchType::Exclude,
|
||||||
|
)];
|
||||||
|
|
||||||
|
let name = match vm_path.file_name() {
|
||||||
|
Some(name) => name,
|
||||||
|
None => bail!("no file name found for path: {:?}", vm_path),
|
||||||
|
};
|
||||||
|
|
||||||
|
if vm_path.is_dir() {
|
||||||
|
let mut pat = name.as_bytes().to_vec();
|
||||||
|
patterns.push(MatchEntry::new(
|
||||||
|
MatchPattern::Pattern(Pattern::path(pat.clone())?),
|
||||||
|
MatchType::Include,
|
||||||
|
));
|
||||||
|
pat.extend(b"/**/*".iter());
|
||||||
|
patterns.push(MatchEntry::new(
|
||||||
|
MatchPattern::Pattern(Pattern::path(pat)?),
|
||||||
|
MatchType::Include,
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
patterns.push(MatchEntry::new(
|
||||||
|
MatchPattern::Literal(name.as_bytes().to_vec()),
|
||||||
|
MatchType::Include,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let dir_path = vm_path.parent().unwrap_or_else(|| Path::new("/"));
|
||||||
|
let dir = nix::dir::Dir::open(
|
||||||
|
dir_path,
|
||||||
|
nix::fcntl::OFlag::O_NOFOLLOW,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let options = PxarCreateOptions {
|
||||||
|
entries_max: ENCODER_MAX_ENTRIES,
|
||||||
|
device_set: None,
|
||||||
|
patterns,
|
||||||
|
verbose: false,
|
||||||
|
skip_lost_and_found: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let pxar_writer = TokioWriter::new(writer);
|
||||||
|
create_archive(dir, pxar_writer, Flags::DEFAULT, |_| Ok(()), None, options)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
.await;
|
||||||
|
if let Err(err) = result {
|
||||||
|
error!("pxar streaming task failed - {}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let result = async move {
|
||||||
|
if vm_path.is_dir() {
|
||||||
|
zip_directory(&mut writer, &vm_path).await?;
|
||||||
|
Ok(())
|
||||||
|
} else if vm_path.is_file() {
|
||||||
|
let mut file = tokio::fs::OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(vm_path)
|
||||||
|
.await?;
|
||||||
|
tokio::io::copy(&mut file, &mut writer).await?;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
bail!("invalid entry type for path: {:?}", vm_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.await;
|
||||||
|
if let Err(err) = result {
|
||||||
|
error!("file or dir streaming task failed - {}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let stream = tokio_util::io::ReaderStream::new(reader);
|
||||||
|
|
||||||
|
let body = Body::wrap_stream(stream);
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
45
src/bin/proxmox_restore_daemon/auth.rs
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
//! Authentication via a static ticket file
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
|
||||||
|
use proxmox_backup::api2::types::Authid;
|
||||||
|
use proxmox_backup::config::cached_user_info::CachedUserInfo;
|
||||||
|
use proxmox_backup::server::auth::{ApiAuth, AuthError};
|
||||||
|
|
||||||
|
const TICKET_FILE: &str = "/ticket";
|
||||||
|
|
||||||
|
pub struct StaticAuth {
|
||||||
|
ticket: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiAuth for StaticAuth {
|
||||||
|
fn check_auth(
|
||||||
|
&self,
|
||||||
|
headers: &http::HeaderMap,
|
||||||
|
_method: &hyper::Method,
|
||||||
|
_user_info: &CachedUserInfo,
|
||||||
|
) -> Result<Authid, AuthError> {
|
||||||
|
match headers.get(hyper::header::AUTHORIZATION) {
|
||||||
|
Some(header) if header.to_str().unwrap_or("") == &self.ticket => {
|
||||||
|
Ok(Authid::root_auth_id().to_owned())
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(AuthError::Generic(format_err!(
|
||||||
|
"invalid file restore ticket provided"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ticket_auth() -> Result<StaticAuth, Error> {
|
||||||
|
let mut ticket_file = File::open(TICKET_FILE)?;
|
||||||
|
let mut ticket = String::new();
|
||||||
|
let len = ticket_file.read_to_string(&mut ticket)?;
|
||||||
|
if len <= 0 {
|
||||||
|
bail!("invalid ticket: cannot be empty");
|
||||||
|
}
|
||||||
|
Ok(StaticAuth { ticket })
|
||||||
|
}
|
329
src/bin/proxmox_restore_daemon/disk.rs
Normal file
@ -0,0 +1,329 @@
|
|||||||
|
//! Low-level disk (image) access functions for file restore VMs.
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use log::{info, warn};
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::{create_dir_all, File};
|
||||||
|
use std::io::{BufRead, BufReader};
|
||||||
|
use std::path::{Component, Path, PathBuf};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::tools::fs;
|
||||||
|
use proxmox_backup::api2::types::BLOCKDEVICE_NAME_REGEX;
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
VIRTIO_PART_REGEX = r"^vd[a-z]+(\d+)$";
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref FS_OPT_MAP: HashMap<&'static str, &'static str> = {
|
||||||
|
let mut m = HashMap::new();
|
||||||
|
|
||||||
|
// otherwise ext complains about mounting read-only
|
||||||
|
m.insert("ext2", "noload");
|
||||||
|
m.insert("ext3", "noload");
|
||||||
|
m.insert("ext4", "noload");
|
||||||
|
|
||||||
|
// ufs2 is used as default since FreeBSD 5.0 released in 2003, so let's assume that
|
||||||
|
// whatever the user is trying to restore is not using anything older...
|
||||||
|
m.insert("ufs", "ufstype=ufs2");
|
||||||
|
|
||||||
|
m
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ResolveResult {
|
||||||
|
Path(PathBuf),
|
||||||
|
BucketTypes(Vec<&'static str>),
|
||||||
|
BucketComponents(Vec<String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PartitionBucketData {
|
||||||
|
dev_node: String,
|
||||||
|
number: i32,
|
||||||
|
mountpoint: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A "Bucket" represents a mapping found on a disk, e.g. a partition, a zfs dataset or an LV. A
|
||||||
|
/// uniquely identifying path to a file then consists of four components:
|
||||||
|
/// "/disk/bucket/component/path"
|
||||||
|
/// where
|
||||||
|
/// disk: fidx file name
|
||||||
|
/// bucket: bucket type
|
||||||
|
/// component: identifier of the specific bucket
|
||||||
|
/// path: relative path of the file on the filesystem indicated by the other parts, may contain
|
||||||
|
/// more subdirectories
|
||||||
|
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
|
||||||
|
enum Bucket {
|
||||||
|
Partition(PartitionBucketData),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Bucket {
|
||||||
|
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
|
||||||
|
haystack: &'a mut Vec<Bucket>,
|
||||||
|
ty: A,
|
||||||
|
comp: B,
|
||||||
|
) -> Option<&'a mut Bucket> {
|
||||||
|
let ty = ty.as_ref();
|
||||||
|
let comp = comp.as_ref();
|
||||||
|
haystack.iter_mut().find(|b| match b {
|
||||||
|
Bucket::Partition(data) => ty == "part" && comp.parse::<i32>().unwrap() == data.number,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn type_string(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Bucket::Partition(_) => "part",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn component_string(&self) -> String {
|
||||||
|
match self {
|
||||||
|
Bucket::Partition(data) => data.number.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Functions related to the local filesystem. This mostly exists so we can use 'supported_fs' in
|
||||||
|
/// try_mount while a Bucket is still mutably borrowed from DiskState.
|
||||||
|
struct Filesystems {
|
||||||
|
supported_fs: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Filesystems {
|
||||||
|
fn scan() -> Result<Self, Error> {
|
||||||
|
// detect kernel supported filesystems
|
||||||
|
let mut supported_fs = Vec::new();
|
||||||
|
for f in BufReader::new(File::open("/proc/filesystems")?)
|
||||||
|
.lines()
|
||||||
|
.filter_map(Result::ok)
|
||||||
|
{
|
||||||
|
// ZFS is treated specially, don't attempt to do a regular mount with it
|
||||||
|
let f = f.trim();
|
||||||
|
if !f.starts_with("nodev") && f != "zfs" {
|
||||||
|
supported_fs.push(f.to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { supported_fs })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensure_mounted(&self, bucket: &mut Bucket) -> Result<PathBuf, Error> {
|
||||||
|
match bucket {
|
||||||
|
Bucket::Partition(data) => {
|
||||||
|
// regular data partition à la "/dev/vdxN"
|
||||||
|
if let Some(mp) = &data.mountpoint {
|
||||||
|
return Ok(mp.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mp = format!("/mnt{}/", data.dev_node);
|
||||||
|
self.try_mount(&data.dev_node, &mp)?;
|
||||||
|
let mp = PathBuf::from(mp);
|
||||||
|
data.mountpoint = Some(mp.clone());
|
||||||
|
Ok(mp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_mount(&self, source: &str, target: &str) -> Result<(), Error> {
|
||||||
|
use nix::mount::*;
|
||||||
|
|
||||||
|
create_dir_all(target)?;
|
||||||
|
|
||||||
|
// try all supported fs until one works - this is the way Busybox's 'mount' does it too:
|
||||||
|
// https://git.busybox.net/busybox/tree/util-linux/mount.c?id=808d93c0eca49e0b22056e23d965f0d967433fbb#n2152
|
||||||
|
// note that ZFS is intentionally left out (see scan())
|
||||||
|
let flags =
|
||||||
|
MsFlags::MS_RDONLY | MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
|
||||||
|
for fs in &self.supported_fs {
|
||||||
|
let fs: &str = fs.as_ref();
|
||||||
|
let opts = FS_OPT_MAP.get(fs).copied();
|
||||||
|
match mount(Some(source), target, Some(fs), flags, opts) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("mounting '{}' succeeded, fstype: '{}'", source, fs);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!("mount error on '{}' ({}) - {}", source, fs, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("all mounts failed or no supported file system")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DiskState {
|
||||||
|
filesystems: Filesystems,
|
||||||
|
disk_map: HashMap<String, Vec<Bucket>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiskState {
|
||||||
|
/// Scan all disks for supported buckets.
|
||||||
|
pub fn scan() -> Result<Self, Error> {
|
||||||
|
// create mapping for virtio drives and .fidx files (via serial description)
|
||||||
|
// note: disks::DiskManager relies on udev, which we don't have
|
||||||
|
let mut disk_map = HashMap::new();
|
||||||
|
for entry in proxmox_backup::tools::fs::scan_subdir(
|
||||||
|
libc::AT_FDCWD,
|
||||||
|
"/sys/block",
|
||||||
|
&BLOCKDEVICE_NAME_REGEX,
|
||||||
|
)?
|
||||||
|
.filter_map(Result::ok)
|
||||||
|
{
|
||||||
|
let name = unsafe { entry.file_name_utf8_unchecked() };
|
||||||
|
if !name.starts_with("vd") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let sys_path: &str = &format!("/sys/block/{}", name);
|
||||||
|
|
||||||
|
let serial = fs::file_read_string(&format!("{}/serial", sys_path));
|
||||||
|
let fidx = match serial {
|
||||||
|
Ok(serial) => serial,
|
||||||
|
Err(err) => {
|
||||||
|
warn!("disk '{}': could not read serial file - {}", name, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut parts = Vec::new();
|
||||||
|
for entry in proxmox_backup::tools::fs::scan_subdir(
|
||||||
|
libc::AT_FDCWD,
|
||||||
|
sys_path,
|
||||||
|
&VIRTIO_PART_REGEX,
|
||||||
|
)?
|
||||||
|
.filter_map(Result::ok)
|
||||||
|
{
|
||||||
|
let part_name = unsafe { entry.file_name_utf8_unchecked() };
|
||||||
|
let devnode = format!("/dev/{}", part_name);
|
||||||
|
let part_path = format!("/sys/block/{}/{}", name, part_name);
|
||||||
|
|
||||||
|
// create partition device node for further use
|
||||||
|
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", part_path))?;
|
||||||
|
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
|
||||||
|
Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
|
||||||
|
|
||||||
|
let number = fs::file_read_firstline(&format!("{}/partition", part_path))?
|
||||||
|
.trim()
|
||||||
|
.parse::<i32>()?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"drive '{}' ('{}'): found partition '{}' ({})",
|
||||||
|
name, fidx, devnode, number
|
||||||
|
);
|
||||||
|
|
||||||
|
let bucket = Bucket::Partition(PartitionBucketData {
|
||||||
|
dev_node: devnode,
|
||||||
|
mountpoint: None,
|
||||||
|
number,
|
||||||
|
});
|
||||||
|
|
||||||
|
parts.push(bucket);
|
||||||
|
}
|
||||||
|
|
||||||
|
disk_map.insert(fidx.to_owned(), parts);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
filesystems: Filesystems::scan()?,
|
||||||
|
disk_map,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a path like "/drive-scsi0.img.fidx/part/0/etc/passwd", this will mount the first
|
||||||
|
/// partition of 'drive-scsi0' on-demand (i.e. if not already mounted) and return a path
|
||||||
|
/// pointing to the requested file locally, e.g. "/mnt/vda1/etc/passwd", which can be used to
|
||||||
|
/// read the file. Given a partial path, i.e. only "/drive-scsi0.img.fidx" or
|
||||||
|
/// "/drive-scsi0.img.fidx/part", it will return a list of available bucket types or bucket
|
||||||
|
/// components respectively
|
||||||
|
pub fn resolve(&mut self, path: &Path) -> Result<ResolveResult, Error> {
|
||||||
|
let mut cmp = path.components().peekable();
|
||||||
|
match cmp.peek() {
|
||||||
|
Some(Component::RootDir) | Some(Component::CurDir) => {
|
||||||
|
cmp.next();
|
||||||
|
}
|
||||||
|
None => bail!("empty path cannot be resolved to file location"),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let req_fidx = match cmp.next() {
|
||||||
|
Some(Component::Normal(x)) => x.to_string_lossy(),
|
||||||
|
_ => bail!("no or invalid image in path"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let buckets = match self.disk_map.get_mut(req_fidx.as_ref()) {
|
||||||
|
Some(x) => x,
|
||||||
|
None => bail!("given image '{}' not found", req_fidx),
|
||||||
|
};
|
||||||
|
|
||||||
|
let bucket_type = match cmp.next() {
|
||||||
|
Some(Component::Normal(x)) => x.to_string_lossy(),
|
||||||
|
Some(c) => bail!("invalid bucket in path: {:?}", c),
|
||||||
|
None => {
|
||||||
|
// list bucket types available
|
||||||
|
let mut types = buckets
|
||||||
|
.iter()
|
||||||
|
.map(|b| b.type_string())
|
||||||
|
.collect::<Vec<&'static str>>();
|
||||||
|
// dedup requires duplicates to be consecutive, which is the case - see scan()
|
||||||
|
types.dedup();
|
||||||
|
return Ok(ResolveResult::BucketTypes(types));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let component = match cmp.next() {
|
||||||
|
Some(Component::Normal(x)) => x.to_string_lossy(),
|
||||||
|
Some(c) => bail!("invalid bucket component in path: {:?}", c),
|
||||||
|
None => {
|
||||||
|
// list bucket components available
|
||||||
|
let comps = buckets
|
||||||
|
.iter()
|
||||||
|
.filter(|b| b.type_string() == bucket_type)
|
||||||
|
.map(Bucket::component_string)
|
||||||
|
.collect();
|
||||||
|
return Ok(ResolveResult::BucketComponents(comps));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut bucket = match Bucket::filter_mut(buckets, &bucket_type, &component) {
|
||||||
|
Some(bucket) => bucket,
|
||||||
|
None => bail!(
|
||||||
|
"bucket/component path not found: {}/{}/{}",
|
||||||
|
req_fidx,
|
||||||
|
bucket_type,
|
||||||
|
component
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
// bucket found, check mount
|
||||||
|
let mountpoint = self
|
||||||
|
.filesystems
|
||||||
|
.ensure_mounted(&mut bucket)
|
||||||
|
.map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"mounting '{}/{}/{}' failed: {}",
|
||||||
|
req_fidx,
|
||||||
|
bucket_type,
|
||||||
|
component,
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let mut local_path = PathBuf::new();
|
||||||
|
local_path.push(mountpoint);
|
||||||
|
for rem in cmp {
|
||||||
|
local_path.push(rem);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ResolveResult::Path(local_path))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mknod_blk(path: &str, maj: u64, min: u64) -> Result<(), Error> {
|
||||||
|
use nix::sys::stat;
|
||||||
|
let dev = stat::makedev(maj, min);
|
||||||
|
stat::mknod(path, stat::SFlag::S_IFBLK, stat::Mode::S_IRWXU, dev)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
11
src/bin/proxmox_restore_daemon/mod.rs
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
///! File restore VM related functionality
|
||||||
|
mod api;
|
||||||
|
pub use api::*;
|
||||||
|
|
||||||
|
pub mod auth;
|
||||||
|
|
||||||
|
mod watchdog;
|
||||||
|
pub use watchdog::*;
|
||||||
|
|
||||||
|
mod disk;
|
||||||
|
pub use disk::*;
|
41
src/bin/proxmox_restore_daemon/watchdog.rs
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
//! Tokio-based watchdog that shuts down the VM if not pinged for TIMEOUT
|
||||||
|
use std::sync::atomic::{AtomicI64, Ordering};
|
||||||
|
use proxmox::tools::time::epoch_i64;
|
||||||
|
|
||||||
|
const TIMEOUT: i64 = 600; // seconds
|
||||||
|
static TRIGGERED: AtomicI64 = AtomicI64::new(0);
|
||||||
|
|
||||||
|
fn handle_expired() -> ! {
|
||||||
|
use nix::sys::reboot;
|
||||||
|
println!("watchdog expired, shutting down");
|
||||||
|
let err = reboot::reboot(reboot::RebootMode::RB_POWER_OFF).unwrap_err();
|
||||||
|
println!("'reboot' syscall failed: {}", err);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn watchdog_loop() {
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
loop {
|
||||||
|
let remaining = watchdog_remaining();
|
||||||
|
if remaining <= 0 {
|
||||||
|
handle_expired();
|
||||||
|
}
|
||||||
|
sleep(Duration::from_secs(remaining as u64)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize watchdog
|
||||||
|
pub fn watchdog_init() {
|
||||||
|
watchdog_ping();
|
||||||
|
tokio::spawn(watchdog_loop());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trigger watchdog keepalive
|
||||||
|
pub fn watchdog_ping() {
|
||||||
|
TRIGGERED.fetch_max(epoch_i64(), Ordering::AcqRel);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the remaining time before watchdog expiry in seconds
|
||||||
|
pub fn watchdog_remaining() -> i64 {
|
||||||
|
TIMEOUT - (epoch_i64() - TRIGGERED.load(Ordering::Acquire))
|
||||||
|
}
|
@ -21,7 +21,7 @@ use proxmox_backup::{
|
|||||||
config::drive::{
|
config::drive::{
|
||||||
complete_drive_name,
|
complete_drive_name,
|
||||||
complete_changer_name,
|
complete_changer_name,
|
||||||
complete_linux_drive_name,
|
complete_lto_drive_name,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -33,13 +33,13 @@ pub fn drive_commands() -> CommandLineInterface {
|
|||||||
.insert("config",
|
.insert("config",
|
||||||
CliCommand::new(&API_METHOD_GET_CONFIG)
|
CliCommand::new(&API_METHOD_GET_CONFIG)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
.completion_cb("name", complete_linux_drive_name)
|
.completion_cb("name", complete_lto_drive_name)
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"remove",
|
"remove",
|
||||||
CliCommand::new(&api2::config::drive::API_METHOD_DELETE_DRIVE)
|
CliCommand::new(&api2::config::drive::API_METHOD_DELETE_DRIVE)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
.completion_cb("name", complete_linux_drive_name)
|
.completion_cb("name", complete_lto_drive_name)
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"create",
|
"create",
|
||||||
@ -53,7 +53,7 @@ pub fn drive_commands() -> CommandLineInterface {
|
|||||||
"update",
|
"update",
|
||||||
CliCommand::new(&api2::config::drive::API_METHOD_UPDATE_DRIVE)
|
CliCommand::new(&api2::config::drive::API_METHOD_UPDATE_DRIVE)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
.completion_cb("name", complete_linux_drive_name)
|
.completion_cb("name", complete_lto_drive_name)
|
||||||
.completion_cb("path", complete_drive_path)
|
.completion_cb("path", complete_drive_path)
|
||||||
.completion_cb("changer", complete_changer_name)
|
.completion_cb("changer", complete_changer_name)
|
||||||
)
|
)
|
||||||
|
@ -177,12 +177,14 @@ fn list_content(
|
|||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.sortby("media-set-uuid", false)
|
.sortby("media-set-uuid", false)
|
||||||
.sortby("seq-nr", false)
|
.sortby("seq-nr", false)
|
||||||
|
.sortby("store", false)
|
||||||
.sortby("snapshot", false)
|
.sortby("snapshot", false)
|
||||||
.sortby("backup-time", false)
|
.sortby("backup-time", false)
|
||||||
.column(ColumnConfig::new("label-text"))
|
.column(ColumnConfig::new("label-text"))
|
||||||
.column(ColumnConfig::new("pool"))
|
.column(ColumnConfig::new("pool"))
|
||||||
.column(ColumnConfig::new("media-set-name"))
|
.column(ColumnConfig::new("media-set-name"))
|
||||||
.column(ColumnConfig::new("seq-nr"))
|
.column(ColumnConfig::new("seq-nr"))
|
||||||
|
.column(ColumnConfig::new("store"))
|
||||||
.column(ColumnConfig::new("snapshot"))
|
.column(ColumnConfig::new("snapshot"))
|
||||||
.column(ColumnConfig::new("media-set-uuid"))
|
.column(ColumnConfig::new("media-set-uuid"))
|
||||||
;
|
;
|
||||||
|
@ -130,22 +130,22 @@ fn extract_archive(
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut feature_flags = Flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= Flags::WITH_XATTRS;
|
feature_flags.remove(Flags::WITH_XATTRS);
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= Flags::WITH_FCAPS;
|
feature_flags.remove(Flags::WITH_FCAPS);
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= Flags::WITH_ACL;
|
feature_flags.remove(Flags::WITH_ACL);
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
feature_flags.remove(Flags::WITH_DEVICE_NODES);
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= Flags::WITH_FIFOS;
|
feature_flags.remove(Flags::WITH_FIFOS);
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= Flags::WITH_SOCKETS;
|
feature_flags.remove(Flags::WITH_SOCKETS);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||||
@ -353,22 +353,22 @@ async fn create_archive(
|
|||||||
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||||
let mut feature_flags = Flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= Flags::WITH_XATTRS;
|
feature_flags.remove(Flags::WITH_XATTRS);
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= Flags::WITH_FCAPS;
|
feature_flags.remove(Flags::WITH_FCAPS);
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= Flags::WITH_ACL;
|
feature_flags.remove(Flags::WITH_ACL);
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
feature_flags.remove(Flags::WITH_DEVICE_NODES);
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= Flags::WITH_FIFOS;
|
feature_flags.remove(Flags::WITH_FIFOS);
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= Flags::WITH_SOCKETS;
|
feature_flags.remove(Flags::WITH_SOCKETS);
|
||||||
}
|
}
|
||||||
|
|
||||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
/// Tape command implemented using scsi-generic raw commands
|
/// Helper to run tape commands as root. Currently only required
|
||||||
///
|
/// to read and set the encryption key.
|
||||||
/// SCSI-generic command needs root privileges, so this binary need
|
|
||||||
/// to be setuid root.
|
|
||||||
///
|
///
|
||||||
/// This command can use STDIN as tape device handle.
|
/// This command can use STDIN as tape device handle.
|
||||||
|
|
||||||
@ -24,41 +22,41 @@ use proxmox_backup::{
|
|||||||
config,
|
config,
|
||||||
backup::Fingerprint,
|
backup::Fingerprint,
|
||||||
api2::types::{
|
api2::types::{
|
||||||
LINUX_DRIVE_PATH_SCHEMA,
|
LTO_DRIVE_PATH_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
MEDIA_SET_UUID_SCHEMA,
|
MEDIA_SET_UUID_SCHEMA,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
LinuxTapeHandle,
|
LtoTapeHandle,
|
||||||
open_linux_tape_device,
|
open_lto_tape_device,
|
||||||
check_tape_is_linux_tape_device,
|
check_tape_is_lto_tape_device,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||||
|
|
||||||
let handle = if let Some(name) = param["drive"].as_str() {
|
let handle = if let Some(name) = param["drive"].as_str() {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
drive.open()?
|
drive.open()?
|
||||||
} else if let Some(device) = param["device"].as_str() {
|
} else if let Some(device) = param["device"].as_str() {
|
||||||
eprintln!("using device {}", device);
|
eprintln!("using device {}", device);
|
||||||
LinuxTapeHandle::new(open_linux_tape_device(&device)?)
|
LtoTapeHandle::new(open_lto_tape_device(&device)?)?
|
||||||
} else if let Some(true) = param["stdin"].as_bool() {
|
} else if let Some(true) = param["stdin"].as_bool() {
|
||||||
eprintln!("using stdin");
|
eprintln!("using stdin");
|
||||||
let fd = std::io::stdin().as_raw_fd();
|
let fd = std::io::stdin().as_raw_fd();
|
||||||
let file = unsafe { File::from_raw_fd(fd) };
|
let file = unsafe { File::from_raw_fd(fd) };
|
||||||
check_tape_is_linux_tape_device(&file)?;
|
check_tape_is_lto_tape_device(&file)?;
|
||||||
LinuxTapeHandle::new(file)
|
LtoTapeHandle::new(file)?
|
||||||
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
drive.open()?
|
drive.open()?
|
||||||
} else {
|
} else {
|
||||||
@ -66,13 +64,13 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
|||||||
|
|
||||||
let mut drive_names = Vec::new();
|
let mut drive_names = Vec::new();
|
||||||
for (name, (section_type, _)) in config.sections.iter() {
|
for (name, (section_type, _)) in config.sections.iter() {
|
||||||
if section_type != "linux" { continue; }
|
if section_type != "lto" { continue; }
|
||||||
drive_names.push(name);
|
drive_names.push(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
if drive_names.len() == 1 {
|
if drive_names.len() == 1 {
|
||||||
let name = drive_names[0];
|
let name = drive_names[0];
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
drive.open()?
|
drive.open()?
|
||||||
} else {
|
} else {
|
||||||
@ -83,111 +81,6 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
|||||||
Ok(handle)
|
Ok(handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
stdin: {
|
|
||||||
description: "Use standard input as device handle.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Tape/Media Status
|
|
||||||
fn status(
|
|
||||||
param: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let result = proxmox::try_block!({
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
|
||||||
handle.get_drive_and_media_status()
|
|
||||||
}).map_err(|err: Error| err.to_string());
|
|
||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
stdin: {
|
|
||||||
description: "Use standard input as device handle.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Read Cartridge Memory (Medium auxiliary memory attributes)
|
|
||||||
fn cartridge_memory(
|
|
||||||
param: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let result = proxmox::try_block!({
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
handle.cartridge_memory()
|
|
||||||
}).map_err(|err| err.to_string());
|
|
||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
stdin: {
|
|
||||||
description: "Use standard input as device handle.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Read Tape Alert Flags
|
|
||||||
fn tape_alert_flags(
|
|
||||||
param: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let result = proxmox::try_block!({
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
|
||||||
|
|
||||||
let flags = handle.tape_alert_flags()?;
|
|
||||||
Ok(flags.bits())
|
|
||||||
}).map_err(|err: Error| err.to_string());
|
|
||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -204,7 +97,7 @@ fn tape_alert_flags(
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
device: {
|
device: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
stdin: {
|
stdin: {
|
||||||
@ -245,40 +138,6 @@ fn set_encryption(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
device: {
|
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
stdin: {
|
|
||||||
description: "Use standard input as device handle.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Read volume statistics
|
|
||||||
fn volume_statistics(
|
|
||||||
param: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let result = proxmox::try_block!({
|
|
||||||
let mut handle = get_tape_handle(¶m)?;
|
|
||||||
handle.volume_statistics()
|
|
||||||
}).map_err(|err: Error| err.to_string());
|
|
||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
|
|
||||||
// check if we are user root or backup
|
// check if we are user root or backup
|
||||||
@ -300,22 +159,6 @@ fn main() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert(
|
|
||||||
"status",
|
|
||||||
CliCommand::new(&API_METHOD_STATUS)
|
|
||||||
)
|
|
||||||
.insert(
|
|
||||||
"cartridge-memory",
|
|
||||||
CliCommand::new(&API_METHOD_CARTRIDGE_MEMORY)
|
|
||||||
)
|
|
||||||
.insert(
|
|
||||||
"tape-alert-flags",
|
|
||||||
CliCommand::new(&API_METHOD_TAPE_ALERT_FLAGS)
|
|
||||||
)
|
|
||||||
.insert(
|
|
||||||
"volume-statistics",
|
|
||||||
CliCommand::new(&API_METHOD_VOLUME_STATISTICS)
|
|
||||||
)
|
|
||||||
.insert(
|
.insert(
|
||||||
"encryption",
|
"encryption",
|
||||||
CliCommand::new(&API_METHOD_SET_ENCRYPTION)
|
CliCommand::new(&API_METHOD_SET_ENCRYPTION)
|
||||||
|
@ -10,6 +10,14 @@ macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
|
||||||
|
() => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
|
||||||
|
}
|
||||||
|
|
||||||
/// namespaced directory for in-memory (tmpfs) run state
|
/// namespaced directory for in-memory (tmpfs) run state
|
||||||
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
||||||
|
|
||||||
@ -30,6 +38,15 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(
|
|||||||
/// the PID filename for the privileged api daemon
|
/// the PID filename for the privileged api daemon
|
||||||
pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
|
pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
|
||||||
|
|
||||||
|
/// filename of the cached initramfs to use for booting single file restore VMs, this file is
|
||||||
|
/// automatically created by APT hooks
|
||||||
|
pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
|
||||||
|
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
|
||||||
|
|
||||||
|
/// filename of the kernel to use for booting single file restore VMs
|
||||||
|
pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
||||||
|
concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
|
||||||
|
|
||||||
/// Prepend configuration directory to a file name
|
/// Prepend configuration directory to a file name
|
||||||
///
|
///
|
||||||
/// This is a simply way to get the full path for configuration files.
|
/// This is a simply way to get the full path for configuration files.
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
|
||||||
use futures::stream::Stream;
|
|
||||||
use futures::future::AbortHandle;
|
use futures::future::AbortHandle;
|
||||||
|
use futures::stream::Stream;
|
||||||
|
use futures::*;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
@ -14,11 +14,11 @@ use tokio_stream::wrappers::ReceiverStream;
|
|||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
|
|
||||||
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::tools::format::HumanByte;
|
use crate::tools::format::HumanByte;
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::{H2Client, HttpClient};
|
||||||
|
|
||||||
pub struct BackupWriter {
|
pub struct BackupWriter {
|
||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
@ -28,7 +28,6 @@ pub struct BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BackupWriter {
|
impl Drop for BackupWriter {
|
||||||
|
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.abort.abort();
|
self.abort.abort();
|
||||||
}
|
}
|
||||||
@ -48,13 +47,32 @@ pub struct UploadOptions {
|
|||||||
pub fixed_size: Option<u64>,
|
pub fixed_size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct UploadStats {
|
||||||
|
chunk_count: usize,
|
||||||
|
chunk_reused: usize,
|
||||||
|
size: usize,
|
||||||
|
size_reused: usize,
|
||||||
|
size_compressed: usize,
|
||||||
|
duration: std::time::Duration,
|
||||||
|
csum: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
|
fn new(
|
||||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
h2: H2Client,
|
||||||
Arc::new(Self { h2, abort, crypt_config, verbose })
|
abort: AbortHandle,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Arc<Self> {
|
||||||
|
Arc::new(Self {
|
||||||
|
h2,
|
||||||
|
abort,
|
||||||
|
crypt_config,
|
||||||
|
verbose,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: extract into (flattened) parameter struct?
|
// FIXME: extract into (flattened) parameter struct?
|
||||||
@ -67,9 +85,8 @@ impl BackupWriter {
|
|||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
benchmark: bool
|
benchmark: bool,
|
||||||
) -> Result<Arc<BackupWriter>, Error> {
|
) -> Result<Arc<BackupWriter>, Error> {
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
"backup-type": backup_type,
|
"backup-type": backup_type,
|
||||||
"backup-id": backup_id,
|
"backup-id": backup_id,
|
||||||
@ -80,34 +97,30 @@ impl BackupWriter {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let req = HttpClient::request_builder(
|
let req = HttpClient::request_builder(
|
||||||
client.server(), client.port(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
client.server(),
|
||||||
|
client.port(),
|
||||||
|
"GET",
|
||||||
|
"/api2/json/backup",
|
||||||
|
Some(param),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client
|
||||||
|
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.get(path, param).await
|
self.h2.get(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn put(
|
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.put(path, param).await
|
self.h2.put(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn post(
|
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.post(path, param).await
|
self.h2.post(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +131,9 @@ impl BackupWriter {
|
|||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
self.h2.upload("POST", path, param, content_type, data).await
|
self.h2
|
||||||
|
.upload("POST", path, param, content_type, data)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_upload_request(
|
pub async fn send_upload_request(
|
||||||
@ -129,9 +144,13 @@ impl BackupWriter {
|
|||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<h2::client::ResponseFuture, Error> {
|
) -> Result<h2::client::ResponseFuture, Error> {
|
||||||
|
let request =
|
||||||
let request = H2Client::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
|
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
||||||
let response_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
.unwrap();
|
||||||
|
let response_future = self
|
||||||
|
.h2
|
||||||
|
.send_request(request, Some(bytes::Bytes::from(data.clone())))
|
||||||
|
.await?;
|
||||||
Ok(response_future)
|
Ok(response_future)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +182,7 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
mut reader: R,
|
mut reader: R,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let mut raw_data = Vec::new();
|
let mut raw_data = Vec::new();
|
||||||
// fixme: avoid loading into memory
|
// fixme: avoid loading into memory
|
||||||
reader.read_to_end(&mut raw_data)?;
|
reader.read_to_end(&mut raw_data)?;
|
||||||
@ -171,7 +190,16 @@ impl BackupWriter {
|
|||||||
let csum = openssl::sha::sha256(&raw_data);
|
let csum = openssl::sha::sha256(&raw_data);
|
||||||
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
||||||
let size = raw_data.len() as u64;
|
let size = raw_data.len() as u64;
|
||||||
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
let _value = self
|
||||||
|
.h2
|
||||||
|
.upload(
|
||||||
|
"POST",
|
||||||
|
"blob",
|
||||||
|
Some(param),
|
||||||
|
"application/octet-stream",
|
||||||
|
raw_data,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats { size, csum })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,9 +210,11 @@ impl BackupWriter {
|
|||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let blob = match (options.encrypt, &self.crypt_config) {
|
let blob = match (options.encrypt, &self.crypt_config) {
|
||||||
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
||||||
(true, None) => bail!("requested encryption without a crypt config"),
|
(true, None) => bail!("requested encryption without a crypt config"),
|
||||||
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), options.compress)?,
|
(true, Some(crypt_config)) => {
|
||||||
|
DataBlob::encode(&data, Some(crypt_config), options.compress)?
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let raw_data = blob.into_inner();
|
let raw_data = blob.into_inner();
|
||||||
@ -192,7 +222,16 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let csum = openssl::sha::sha256(&raw_data);
|
let csum = openssl::sha::sha256(&raw_data);
|
||||||
let param = json!({"encoded-size": size, "file-name": file_name });
|
let param = json!({"encoded-size": size, "file-name": file_name });
|
||||||
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
let _value = self
|
||||||
|
.h2
|
||||||
|
.upload(
|
||||||
|
"POST",
|
||||||
|
"blob",
|
||||||
|
Some(param),
|
||||||
|
"application/octet-stream",
|
||||||
|
raw_data,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats { size, csum })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +241,6 @@ impl BackupWriter {
|
|||||||
file_name: &str,
|
file_name: &str,
|
||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
|
|
||||||
let mut file = tokio::fs::File::open(src_path)
|
let mut file = tokio::fs::File::open(src_path)
|
||||||
@ -215,7 +253,8 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
self.upload_blob_from_data(contents, file_name, options).await
|
self.upload_blob_from_data(contents, file_name, options)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
@ -245,72 +284,118 @@ impl BackupWriter {
|
|||||||
// try, but ignore errors
|
// try, but ignore errors
|
||||||
match archive_type(archive_name) {
|
match archive_type(archive_name) {
|
||||||
Ok(ArchiveType::FixedIndex) => {
|
Ok(ArchiveType::FixedIndex) => {
|
||||||
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
let _ = self
|
||||||
|
.download_previous_fixed_index(
|
||||||
|
archive_name,
|
||||||
|
&manifest,
|
||||||
|
known_chunks.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
Ok(ArchiveType::DynamicIndex) => {
|
Ok(ArchiveType::DynamicIndex) => {
|
||||||
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
let _ = self
|
||||||
|
.download_previous_dynamic_index(
|
||||||
|
archive_name,
|
||||||
|
&manifest,
|
||||||
|
known_chunks.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
_ => { /* do nothing */ }
|
_ => { /* do nothing */ }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self
|
||||||
|
.h2
|
||||||
|
.post(&index_path, Some(param))
|
||||||
|
.await?
|
||||||
|
.as_u64()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
|
let upload_stats = Self::upload_chunk_info_stream(
|
||||||
Self::upload_chunk_info_stream(
|
self.h2.clone(),
|
||||||
self.h2.clone(),
|
wid,
|
||||||
wid,
|
stream,
|
||||||
stream,
|
&prefix,
|
||||||
&prefix,
|
known_chunks.clone(),
|
||||||
known_chunks.clone(),
|
if options.encrypt {
|
||||||
if options.encrypt { self.crypt_config.clone() } else { None },
|
self.crypt_config.clone()
|
||||||
options.compress,
|
} else {
|
||||||
self.verbose,
|
None
|
||||||
)
|
},
|
||||||
.await?;
|
options.compress,
|
||||||
|
self.verbose,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let uploaded = size - size_reused;
|
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||||
let vsize_h: HumanByte = size.into();
|
let size: HumanByte = upload_stats.size.into();
|
||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
crate::tools::format::strip_server_file_extension(archive_name)
|
crate::tools::format::strip_server_file_extension(archive_name)
|
||||||
};
|
};
|
||||||
if archive_name != CATALOG_NAME {
|
if archive_name != CATALOG_NAME {
|
||||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
let speed: HumanByte =
|
||||||
let uploaded: HumanByte = uploaded.into();
|
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
|
||||||
println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
let size_dirty: HumanByte = size_dirty.into();
|
||||||
|
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
||||||
|
println!(
|
||||||
|
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
|
||||||
|
archive,
|
||||||
|
size_dirty,
|
||||||
|
size,
|
||||||
|
size_compressed,
|
||||||
|
upload_stats.duration.as_secs_f64()
|
||||||
|
);
|
||||||
|
println!("{}: average backup speed: {}/s", archive, speed);
|
||||||
} else {
|
} else {
|
||||||
println!("Uploaded backup catalog ({})", vsize_h);
|
println!("Uploaded backup catalog ({})", size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if size_reused > 0 && size > 1024*1024 {
|
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
|
||||||
let reused_percent = size_reused as f64 * 100. / size as f64;
|
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
|
||||||
let reused: HumanByte = size_reused.into();
|
let reused: HumanByte = upload_stats.size_reused.into();
|
||||||
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
|
println!(
|
||||||
|
"{}: backup was done incrementally, reused {} ({:.1}%)",
|
||||||
|
archive, reused, reused_percent
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if self.verbose && chunk_count > 0 {
|
if self.verbose && upload_stats.chunk_count > 0 {
|
||||||
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
|
println!(
|
||||||
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
|
"{}: Reused {} from {} chunks.",
|
||||||
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
|
archive, upload_stats.chunk_reused, upload_stats.chunk_count
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"{}: Average chunk size was {}.",
|
||||||
|
archive,
|
||||||
|
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"{}: Average time per request: {} microseconds.",
|
||||||
|
archive,
|
||||||
|
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
"wid": wid ,
|
"wid": wid ,
|
||||||
"chunk-count": chunk_count,
|
"chunk-count": upload_stats.chunk_count,
|
||||||
"size": size,
|
"size": upload_stats.size,
|
||||||
"csum": proxmox::tools::digest_to_hex(&csum),
|
"csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
|
||||||
});
|
});
|
||||||
let _value = self.h2.post(&close_path, Some(param)).await?;
|
let _value = self.h2.post(&close_path, Some(param)).await?;
|
||||||
Ok(BackupStats {
|
Ok(BackupStats {
|
||||||
size: size as u64,
|
size: upload_stats.size as u64,
|
||||||
csum,
|
csum: upload_stats.csum,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue(verbose: bool) -> (
|
fn response_queue(
|
||||||
|
verbose: bool,
|
||||||
|
) -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>
|
oneshot::Receiver<Result<(), Error>>,
|
||||||
) {
|
) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
||||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||||
@ -336,12 +421,16 @@ impl BackupWriter {
|
|||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
|
.map_ok(move |result| {
|
||||||
|
if verbose {
|
||||||
|
println!("RESPONSE: {:?}", result)
|
||||||
|
}
|
||||||
|
})
|
||||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||||
})
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
let _ignore_closed_channel = verify_result_tx.send(result);
|
let _ignore_closed_channel = verify_result_tx.send(result);
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
(verify_queue_tx, verify_result_rx)
|
(verify_queue_tx, verify_result_rx)
|
||||||
@ -418,9 +507,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<FixedIndexReader, Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -428,10 +516,13 @@ impl BackupWriter {
|
|||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut tmpfile)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let index = FixedIndexReader::new(tmpfile)
|
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
format_err!("unable to read fixed index '{}' - {}", archive_name, err)
|
||||||
|
})?;
|
||||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(archive_name, &csum, size)?;
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
@ -443,7 +534,11 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
println!(
|
||||||
|
"{}: known chunks list length is {}",
|
||||||
|
archive_name,
|
||||||
|
index.index_count()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(index)
|
Ok(index)
|
||||||
@ -453,9 +548,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<DynamicIndexReader, Error> {
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -463,10 +557,13 @@ impl BackupWriter {
|
|||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut tmpfile)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
|
||||||
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
|
||||||
|
})?;
|
||||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(archive_name, &csum, size)?;
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
@ -478,7 +575,11 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
println!(
|
||||||
|
"{}: known chunks list length is {}",
|
||||||
|
archive_name,
|
||||||
|
index.index_count()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(index)
|
Ok(index)
|
||||||
@ -487,23 +588,29 @@ impl BackupWriter {
|
|||||||
/// Retrieve backup time of last backup
|
/// Retrieve backup time of last backup
|
||||||
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
|
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
|
||||||
let data = self.h2.get("previous_backup_time", None).await?;
|
let data = self.h2.get("previous_backup_time", None).await?;
|
||||||
serde_json::from_value(data)
|
serde_json::from_value(data).map_err(|err| {
|
||||||
.map_err(|err| format_err!("Failed to parse backup time value returned by server - {}", err))
|
format_err!(
|
||||||
|
"Failed to parse backup time value returned by server - {}",
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download backup manifest (index.json) of last backup
|
/// Download backup manifest (index.json) of last backup
|
||||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut raw_data)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
// no expected digest available
|
// no expected digest available
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
||||||
|
|
||||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let manifest =
|
||||||
|
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
@ -517,12 +624,11 @@ impl BackupWriter {
|
|||||||
wid: u64,
|
wid: u64,
|
||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
|
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||||
|
|
||||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||||
let total_chunks2 = total_chunks.clone();
|
let total_chunks2 = total_chunks.clone();
|
||||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||||
@ -530,6 +636,8 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||||
let stream_len2 = stream_len.clone();
|
let stream_len2 = stream_len.clone();
|
||||||
|
let compressed_stream_len = Arc::new(AtomicU64::new(0));
|
||||||
|
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||||
let reused_len2 = reused_len.clone();
|
let reused_len2 = reused_len.clone();
|
||||||
|
|
||||||
@ -547,14 +655,12 @@ impl BackupWriter {
|
|||||||
|
|
||||||
stream
|
stream
|
||||||
.and_then(move |data| {
|
.and_then(move |data| {
|
||||||
|
|
||||||
let chunk_len = data.len();
|
let chunk_len = data.len();
|
||||||
|
|
||||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
|
||||||
.compress(compress);
|
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
@ -568,7 +674,9 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_end = offset + chunk_len as u64;
|
let chunk_end = offset + chunk_len as u64;
|
||||||
|
|
||||||
if !is_fixed_chunk_size { csum.update(&chunk_end.to_le_bytes()); }
|
if !is_fixed_chunk_size {
|
||||||
|
csum.update(&chunk_end.to_le_bytes());
|
||||||
|
}
|
||||||
csum.update(digest);
|
csum.update(digest);
|
||||||
|
|
||||||
let chunk_is_known = known_chunks.contains(digest);
|
let chunk_is_known = known_chunks.contains(digest);
|
||||||
@ -577,16 +685,17 @@ impl BackupWriter {
|
|||||||
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||||
} else {
|
} else {
|
||||||
|
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||||
known_chunks.insert(*digest);
|
known_chunks.insert(*digest);
|
||||||
future::ready(chunk_builder
|
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
|
||||||
.build()
|
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
|
||||||
.map(move |(chunk, digest)| MergedChunkInfo::New(ChunkInfo {
|
MergedChunkInfo::New(ChunkInfo {
|
||||||
chunk,
|
chunk,
|
||||||
digest,
|
digest,
|
||||||
chunk_len: chunk_len as u64,
|
chunk_len: chunk_len as u64,
|
||||||
offset,
|
offset,
|
||||||
}))
|
})
|
||||||
)
|
}))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.merge_known_chunks()
|
.merge_known_chunks()
|
||||||
@ -614,20 +723,28 @@ impl BackupWriter {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let ct = "application/octet-stream";
|
let ct = "application/octet-stream";
|
||||||
let request = H2Client::request_builder("localhost", "POST", &upload_chunk_path, Some(param), Some(ct)).unwrap();
|
let request = H2Client::request_builder(
|
||||||
|
"localhost",
|
||||||
|
"POST",
|
||||||
|
&upload_chunk_path,
|
||||||
|
Some(param),
|
||||||
|
Some(ct),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
let upload_data = Some(bytes::Bytes::from(chunk_data));
|
let upload_data = Some(bytes::Bytes::from(chunk_data));
|
||||||
|
|
||||||
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
||||||
|
|
||||||
future::Either::Left(h2
|
future::Either::Left(h2.send_request(request, upload_data).and_then(
|
||||||
.send_request(request, upload_data)
|
move |response| async move {
|
||||||
.and_then(move |response| async move {
|
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((new_info, Some(response)))
|
.send((new_info, Some(response)))
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("failed to send to upload queue: {}", err))
|
.map_err(|err| {
|
||||||
})
|
format_err!("failed to send to upload queue: {}", err)
|
||||||
)
|
})
|
||||||
|
},
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
future::Either::Right(async move {
|
future::Either::Right(async move {
|
||||||
upload_queue
|
upload_queue
|
||||||
@ -637,31 +754,37 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.then(move |result| async move {
|
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
|
||||||
upload_result.await?.and(result)
|
|
||||||
}.boxed())
|
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
let duration = start_time.elapsed();
|
let duration = start_time.elapsed();
|
||||||
let total_chunks = total_chunks2.load(Ordering::SeqCst);
|
let chunk_count = total_chunks2.load(Ordering::SeqCst);
|
||||||
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
|
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
|
||||||
let stream_len = stream_len2.load(Ordering::SeqCst);
|
let size = stream_len2.load(Ordering::SeqCst);
|
||||||
let reused_len = reused_len2.load(Ordering::SeqCst);
|
let size_reused = reused_len2.load(Ordering::SeqCst);
|
||||||
|
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
|
||||||
|
|
||||||
let mut guard = index_csum_2.lock().unwrap();
|
let mut guard = index_csum_2.lock().unwrap();
|
||||||
let csum = guard.take().unwrap().finish();
|
let csum = guard.take().unwrap().finish();
|
||||||
|
|
||||||
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
|
futures::future::ok(UploadStats {
|
||||||
|
chunk_count,
|
||||||
|
chunk_reused,
|
||||||
|
size,
|
||||||
|
size_reused,
|
||||||
|
size_compressed,
|
||||||
|
duration,
|
||||||
|
csum,
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upload speed test - prints result to stderr
|
/// Upload speed test - prints result to stderr
|
||||||
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
// generate pseudo random byte sequence
|
// generate pseudo random byte sequence
|
||||||
for i in 0..1024*1024 {
|
for i in 0..1024 * 1024 {
|
||||||
for j in 0..4 {
|
for j in 0..4 {
|
||||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
let byte = ((i >> (j << 3)) & 0xff) as u8;
|
||||||
data.push(byte);
|
data.push(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -680,9 +803,15 @@ impl BackupWriter {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
|
if verbose {
|
||||||
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
eprintln!("send test data ({} bytes)", data.len());
|
||||||
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
}
|
||||||
|
let request =
|
||||||
|
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||||
|
let request_future = self
|
||||||
|
.h2
|
||||||
|
.send_request(request, Some(bytes::Bytes::from(data.clone())))
|
||||||
|
.await?;
|
||||||
|
|
||||||
upload_queue.send(request_future).await?;
|
upload_queue.send(request_future).await?;
|
||||||
}
|
}
|
||||||
@ -691,9 +820,16 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let _ = upload_result.await?;
|
let _ = upload_result.await?;
|
||||||
|
|
||||||
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
eprintln!(
|
||||||
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
|
"Uploaded {} chunks in {} seconds.",
|
||||||
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
repeat,
|
||||||
|
start_time.elapsed().as_secs()
|
||||||
|
);
|
||||||
|
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
|
||||||
|
eprintln!(
|
||||||
|
"Time per request: {} microseconds.",
|
||||||
|
(start_time.elapsed().as_micros()) / (repeat as u128)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(speed)
|
Ok(speed)
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,10 @@ use nix::fcntl::OFlag;
|
|||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
use crate::backup::CatalogWriter;
|
use crate::backup::CatalogWriter;
|
||||||
|
use crate::tools::{
|
||||||
|
StdChannelWriter,
|
||||||
|
TokioWriterAdapter,
|
||||||
|
};
|
||||||
|
|
||||||
/// Stream implementation to encode and upload .pxar archives.
|
/// Stream implementation to encode and upload .pxar archives.
|
||||||
///
|
///
|
||||||
@ -45,10 +49,10 @@ impl PxarBackupStream {
|
|||||||
let error = Arc::new(Mutex::new(None));
|
let error = Arc::new(Mutex::new(None));
|
||||||
let error2 = Arc::clone(&error);
|
let error2 = Arc::clone(&error);
|
||||||
let handler = async move {
|
let handler = async move {
|
||||||
let writer = std::io::BufWriter::with_capacity(
|
let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
|
||||||
buffer_size,
|
buffer_size,
|
||||||
crate::tools::StdChannelWriter::new(tx),
|
StdChannelWriter::new(tx),
|
||||||
);
|
));
|
||||||
|
|
||||||
let verbose = options.verbose;
|
let verbose = options.verbose;
|
||||||
|
|
||||||
|