Compare commits
60 Commits
Author | SHA1 | Date | |
---|---|---|---|
d16ed66c88 | |||
3ec6e249b3 | |||
dfa517ad6c | |||
8b2ad84a25 | |||
3dacedce71 | |||
512d50a455 | |||
b53f637914 | |||
152a926149 | |||
7f388acea8 | |||
b2bfb46835 | |||
24406ebc0c | |||
1f24d9114c | |||
859fe9c1fb | |||
2107a5aebc | |||
3638341aa4 | |||
067fe514e6 | |||
8c6e5ce23c | |||
0351f23ba4 | |||
c1ff544eff | |||
69e5d71961 | |||
48e22a8900 | |||
a7a5f56daa | |||
05389a0109 | |||
b65390ebc9 | |||
3bad3e6e52 | |||
24be37e3f6 | |||
1008a69a13 | |||
521a0acb2e | |||
3b66040de6 | |||
af3a0ae7b1 | |||
4e36f78438 | |||
f28d9088ed | |||
56b814e378 | |||
0c136efe30 | |||
cdead6cd12 | |||
c950826e46 | |||
f91d58e157 | |||
1ff840ffad | |||
7443a6e092 | |||
3a9988638b | |||
96ee857752 | |||
887018bb79 | |||
9696f5193b | |||
e13c4f66bb | |||
8a25809573 | |||
d87b193b0b | |||
ea5289e869 | |||
1f6a4f587a | |||
705b2293ec | |||
d2c7ef09ba | |||
27f86f997e | |||
fc93d38076 | |||
a5a85d41ff | |||
08cb2038bd | |||
6f711c1737 | |||
42ec9f577f | |||
9de69cdb1a | |||
bd260569d3 | |||
36cb4b30ef | |||
4e717240bf |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.5.0"
|
version = "0.8.2"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -38,7 +38,7 @@ pam-sys = "0.5"
|
|||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.1"
|
pathpatterns = "0.1.1"
|
||||||
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
|
12
Makefile
12
Makefile
@ -37,11 +37,15 @@ CARGO ?= cargo
|
|||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||||
|
|
||||||
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
@ -56,7 +60,7 @@ $(SUBDIRS):
|
|||||||
test:
|
test:
|
||||||
#cargo test test_broadcast_future
|
#cargo test test_broadcast_future
|
||||||
#cargo test $(CARGO_BUILD_ARGS)
|
#cargo test $(CARGO_BUILD_ARGS)
|
||||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
#$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
doc:
|
doc:
|
||||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
||||||
@ -140,5 +144,5 @@ install: $(COMPILED_BINS)
|
|||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||||
|
64
debian/changelog
vendored
64
debian/changelog
vendored
@ -1,3 +1,67 @@
|
|||||||
|
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* buildsys: also upload debug packages
|
||||||
|
|
||||||
|
* src/backup/manifest.rs: rename into_string -> to_string
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 11:58:51 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* remove authhenticated data blobs (not needed)
|
||||||
|
|
||||||
|
* add signature to manifest
|
||||||
|
|
||||||
|
* improve docs
|
||||||
|
|
||||||
|
* client: introduce --keyfd parameter
|
||||||
|
|
||||||
|
* ui improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 10:01:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement get_runtime_with_builder
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 10:15:26 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.7.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement clone for RemoteChunkReader
|
||||||
|
|
||||||
|
* improve docs
|
||||||
|
|
||||||
|
* client: add --encryption boolen parameter
|
||||||
|
|
||||||
|
* client: use default encryption key if it is available
|
||||||
|
|
||||||
|
* d/rules: do not compress .pdf files
|
||||||
|
|
||||||
|
* ui: various fixes
|
||||||
|
|
||||||
|
* add beta text with link to bugtracker
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 07:40:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.6.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* make ReadChunk not require mutable self.
|
||||||
|
|
||||||
|
* ui: increase timeout for snapshot listing
|
||||||
|
|
||||||
|
* ui: consistently spell Datastore without space between words
|
||||||
|
|
||||||
|
* ui: disk create: sync and improve 'add-datastore' checkbox label
|
||||||
|
|
||||||
|
* proxmox-backup-client: add benchmark command
|
||||||
|
|
||||||
|
* pxar: fixup 'vanished-file' logic a bit
|
||||||
|
|
||||||
|
* ui: add verify button
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 03 Jul 2020 09:45:52 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||||
|
3
debian/control.in
vendored
3
debian/control.in
vendored
@ -3,11 +3,14 @@ Architecture: any
|
|||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
|
lvm2,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.2-4),
|
||||||
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
Recommends: zfsutils-linux,
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
tools. This includes a web-based graphical user interface.
|
tools. This includes a web-based graphical user interface.
|
||||||
|
1
debian/lintian-overrides
vendored
Normal file
1
debian/lintian-overrides
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
1
debian/proxmox-backup-docs.link
vendored
Normal file
1
debian/proxmox-backup-docs.link
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/docs/proxmox-backup.pdf
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -1,6 +1,7 @@
|
|||||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||||
etc/proxmox-backup.service /lib/systemd/system/
|
etc/proxmox-backup.service /lib/systemd/system/
|
||||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||||
|
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -45,3 +45,6 @@ override_dh_installsystemd:
|
|||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
override_dh_dwz:
|
override_dh_dwz:
|
||||||
dh_dwz --no-dwz-multifile
|
dh_dwz --no-dwz-multifile
|
||||||
|
|
||||||
|
override_dh_compress:
|
||||||
|
dh_compress -X.pdf
|
||||||
|
@ -1,11 +1,5 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
|
||||||
COMPILEDIR := ../target/release
|
|
||||||
else
|
|
||||||
COMPILEDIR := ../target/debug
|
|
||||||
endif
|
|
||||||
|
|
||||||
GENERATED_SYNOPSIS := \
|
GENERATED_SYNOPSIS := \
|
||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
@ -26,6 +20,15 @@ SPHINXOPTS =
|
|||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
BUILDDIR = output
|
BUILDDIR = output
|
||||||
|
|
||||||
|
ifeq ($(BUILD_MODE), release)
|
||||||
|
COMPILEDIR := ../target/release
|
||||||
|
SPHINXOPTS += -t release
|
||||||
|
else
|
||||||
|
COMPILEDIR := ../target/debug
|
||||||
|
SPHINXOPTS += -t devbuild
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
# Sphinx internal variables.
|
# Sphinx internal variables.
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||||
|
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
Administration Guide
|
Backup Management
|
||||||
====================
|
=================
|
||||||
|
|
||||||
The administration guide.
|
.. The administration guide.
|
||||||
|
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
@ -13,16 +12,16 @@ Backup Content
|
|||||||
|
|
||||||
When doing deduplication, there are different strategies to get
|
When doing deduplication, there are different strategies to get
|
||||||
optimal results in terms of performance and/or deduplication rates.
|
optimal results in terms of performance and/or deduplication rates.
|
||||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||||
sized chunks.
|
sized chunks.
|
||||||
|
|
||||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
Fixed sized chunking requires minimal CPU power, and is used to
|
||||||
backup virtual machine images.
|
backup virtual machine images.
|
||||||
|
|
||||||
Variable sized chunking needs more CPU power, but is essential to get
|
Variable sized chunking needs more CPU power, but is essential to get
|
||||||
good deduplication rates for file archives.
|
good deduplication rates for file archives.
|
||||||
|
|
||||||
The backup server supports both strategies.
|
The Proxmox Backup Server supports both strategies.
|
||||||
|
|
||||||
|
|
||||||
File Archives: ``<name>.pxar``
|
File Archives: ``<name>.pxar``
|
||||||
@ -31,7 +30,7 @@ File Archives: ``<name>.pxar``
|
|||||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||||
|
|
||||||
A file archive stores a full directory tree. Content is stored using
|
A file archive stores a full directory tree. Content is stored using
|
||||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||||
is optimized to achieve good deduplication rates.
|
is optimized to achieve good deduplication rates.
|
||||||
|
|
||||||
|
|
||||||
@ -39,7 +38,7 @@ Image Archives: ``<name>.img``
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
This is used for virtual machine images and other large binary
|
This is used for virtual machine images and other large binary
|
||||||
data. Content is split into fixed sized chunks.
|
data. Content is split into fixed-sized chunks.
|
||||||
|
|
||||||
|
|
||||||
Binary Data (BLOBs)
|
Binary Data (BLOBs)
|
||||||
@ -56,7 +55,7 @@ Catalog File: ``catalog.pcat1``
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
The catalog file is an index for file archives. It contains
|
The catalog file is an index for file archives. It contains
|
||||||
the list of files and is used to speed-up search operations.
|
the list of files and is used to speed up search operations.
|
||||||
|
|
||||||
|
|
||||||
The Manifest: ``index.json``
|
The Manifest: ``index.json``
|
||||||
@ -74,12 +73,12 @@ The backup server groups backups by *type*, where *type* is one of:
|
|||||||
|
|
||||||
``vm``
|
``vm``
|
||||||
This type is used for :term:`virtual machine`\ s. Typically
|
This type is used for :term:`virtual machine`\ s. Typically
|
||||||
contains the virtual machine's configuration and an image archive
|
consists of the virtual machine's configuration file and an image archive
|
||||||
for each disk.
|
for each disk.
|
||||||
|
|
||||||
``ct``
|
``ct``
|
||||||
This type is used for :term:`container`\ s. Contains the container's
|
This type is used for :term:`container`\ s. Consists of the container's
|
||||||
configuration and a single file archive for the container content.
|
configuration and a single file archive for the filesystem content.
|
||||||
|
|
||||||
``host``
|
``host``
|
||||||
This type is used for backups created from within the backed up machine.
|
This type is used for backups created from within the backed up machine.
|
||||||
@ -90,7 +89,7 @@ The backup server groups backups by *type*, where *type* is one of:
|
|||||||
Backup ID
|
Backup ID
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
An unique ID. Usually the virtual machine or container ID. ``host``
|
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||||
type backups normally use the hostname.
|
type backups normally use the hostname.
|
||||||
|
|
||||||
|
|
||||||
@ -122,6 +121,13 @@ uniquely identifies a specific backup within a datastore.
|
|||||||
As you can see, the time format is RFC3399_ with Coordinated
|
As you can see, the time format is RFC3399_ with Coordinated
|
||||||
Universal Time (UTC_, identified by the trailing *Z*).
|
Universal Time (UTC_, identified by the trailing *Z*).
|
||||||
|
|
||||||
|
Backup Server Management
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The command line tool to configure and manage the backup server is called
|
||||||
|
:command:`proxmox-backup-manager`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
:term:`DataStore`
|
:term:`DataStore`
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
@ -134,20 +140,13 @@ Datastores are identified by a simple *ID*. You can configure it
|
|||||||
when setting up the backup server.
|
when setting up the backup server.
|
||||||
|
|
||||||
|
|
||||||
Backup Server Management
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
The command line tool to configure and manage the backup server is called
|
|
||||||
:command:`proxmox-backup-manager`.
|
|
||||||
|
|
||||||
|
|
||||||
Datastore Configuration
|
Datastore Configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
A :term:`datastore` is a place to store backups. You can configure
|
You can configure multiple datastores. Minimum one datastore needs to be
|
||||||
multiple datastores. At least one datastore needs to be
|
configured. The datastore is identified by a simple `name` and points to a
|
||||||
configured. The datastore is identified by a simple `name` and points
|
directory on the filesystem.
|
||||||
to a directory.
|
|
||||||
|
|
||||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||||
|
|
||||||
@ -179,17 +178,58 @@ Finally, it is possible to remove the datastore configuration:
|
|||||||
File Layout
|
File Layout
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
|
||||||
.. todo:: Add datastore file layout example
|
After creating a datastore, the following default layout will appear:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1
|
||||||
|
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||||
|
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||||
|
|
||||||
|
`.lock` is an empty file used for process locking.
|
||||||
|
|
||||||
|
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||||
|
directories will store the chunked data after a backup operation has been executed.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1/.chunks
|
||||||
|
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||||
|
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||||
|
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||||
|
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||||
|
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||||
|
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||||
|
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||||
|
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||||
|
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||||
|
...
|
||||||
|
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||||
|
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||||
|
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||||
|
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||||
|
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||||
|
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||||
|
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||||
|
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||||
|
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||||
|
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||||
|
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||||
|
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||||
|
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||||
|
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||||
|
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
User Management
|
User Management
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox Backup support several authentication realms, and you need to
|
Proxmox Backup Server supports several authentication realms, and you need to
|
||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as Linux system user (Users needs to exist on the
|
authenticate as Linux system user (Users need to exist on the
|
||||||
system).
|
system).
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
@ -216,8 +256,8 @@ normally want to add other users with less privileges:
|
|||||||
|
|
||||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||||
|
|
||||||
The create command lets you specify many option like ``--email`` or
|
The create command lets you specify many options like ``--email`` or
|
||||||
``--password``, but you can update or change any of them using the
|
``--password``. You can update or change any of them using the
|
||||||
update command later:
|
update command later:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -225,11 +265,10 @@ update command later:
|
|||||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||||
|
|
||||||
|
|
||||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||||
|
|
||||||
|
|
||||||
The resulting use list looks like this:
|
The resulting user list looks like this:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -242,16 +281,16 @@ The resulting use list looks like this:
|
|||||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
Newly created users do not have an permissions. Please read the next
|
Newly created users do not have any permissions. Please read the next
|
||||||
section to learn how to set access permissions.
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user update john@pbs --enable 0
|
# proxmox-backup-manager user update john@pbs --enable 0
|
||||||
|
|
||||||
Or completely remove the users with:
|
Or completely remove the user with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -261,20 +300,20 @@ Or completely remove the users with:
|
|||||||
Access Control
|
Access Control
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Users do not have any permission by default. Instead you need to
|
By default new users do not have any permission. Instead you need to
|
||||||
specify what is allowed and what not. You can do this by assigning
|
specify what is allowed and what is not. You can do this by assigning
|
||||||
roles to users on specific objects like datastores or remotes. The
|
roles to users on specific objects like datastores or remotes. The
|
||||||
following roles exist:
|
following roles exist:
|
||||||
|
|
||||||
|
**NoAccess**
|
||||||
|
Disable Access - nothing is allowed.
|
||||||
|
|
||||||
**Admin**
|
**Admin**
|
||||||
The Administrator can do anything.
|
The Administrator can do anything.
|
||||||
|
|
||||||
**Audit**
|
**Audit**
|
||||||
An Auditor can view things, but is not allowed to change settings.
|
An Auditor can view things, but is not allowed to change settings.
|
||||||
|
|
||||||
**NoAccess**
|
|
||||||
Disable Access - nothing is allowed.
|
|
||||||
|
|
||||||
**DatastoreAdmin**
|
**DatastoreAdmin**
|
||||||
Can do anything on datastores.
|
Can do anything on datastores.
|
||||||
|
|
||||||
@ -301,7 +340,6 @@ following roles exist:
|
|||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Backup Client usage
|
Backup Client usage
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
@ -316,8 +354,8 @@ on the backup server.
|
|||||||
|
|
||||||
[[username@]server:]datastore
|
[[username@]server:]datastore
|
||||||
|
|
||||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
The default value for ``username`` ist ``root``. If no server is specified,
|
||||||
default is the local host (``localhost``).
|
the default is the local host (``localhost``).
|
||||||
|
|
||||||
You can pass the repository with the ``--repository`` command
|
You can pass the repository with the ``--repository`` command
|
||||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||||
@ -381,7 +419,7 @@ This section explains how to create a backup from within the machine. This can
|
|||||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||||
and image archives. There are no restrictions in this case.
|
and image archives. There are no restrictions in this case.
|
||||||
|
|
||||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||||
|
|
||||||
For the following example you need to have a backup server set up, working
|
For the following example you need to have a backup server set up, working
|
||||||
credentials and need to know the repository name.
|
credentials and need to know the repository name.
|
||||||
@ -896,7 +934,3 @@ After that you should be able to see storage status with:
|
|||||||
.. include:: command-line-tools.rst
|
.. include:: command-line-tools.rst
|
||||||
|
|
||||||
.. include:: services.rst
|
.. include:: services.rst
|
||||||
|
|
||||||
.. include host system admin at the end
|
|
||||||
|
|
||||||
.. include:: sysadmin.rst
|
|
||||||
|
13
docs/conf.py
13
docs/conf.py
@ -17,7 +17,7 @@
|
|||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
#
|
#
|
||||||
# import os
|
import os
|
||||||
# import sys
|
# import sys
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
# sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
@ -45,8 +45,11 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||||
|
|
||||||
|
todo_link_only = True
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ['_templates']
|
||||||
|
|
||||||
@ -76,9 +79,11 @@ author = 'Proxmox Support Team'
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = '0.2'
|
vstr = lambda s: '<devbuild>' if s is None else str(s)
|
||||||
|
|
||||||
|
version = vstr(os.getenv('DEB_VERSION_UPSTREAM'))
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = '0.2-1'
|
release = vstr(os.getenv('DEB_VERSION'))
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
@ -107,7 +112,7 @@ exclude_patterns = [
|
|||||||
'pxar/man1.rst',
|
'pxar/man1.rst',
|
||||||
'epilog.rst',
|
'epilog.rst',
|
||||||
'pbs-copyright.rst',
|
'pbs-copyright.rst',
|
||||||
'sysadmin.rst',
|
'local-zfs.rst'
|
||||||
'package-repositories.rst',
|
'package-repositories.rst',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -11,8 +11,10 @@
|
|||||||
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
||||||
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
||||||
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
.. _Rust: https://www.rust-lang.org/
|
.. _Rust: https://www.rust-lang.org/
|
||||||
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
||||||
|
@ -1,18 +1,15 @@
|
|||||||
.. Proxmox Backup documentation master file
|
.. Proxmox Backup documentation master file
|
||||||
|
|
||||||
Welcome to Proxmox Backup's documentation!
|
Welcome to the Proxmox Backup documentation!
|
||||||
==========================================
|
============================================
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
Permission is granted to copy, distribute and/or modify this document
|
Permission is granted to copy, distribute and/or modify this document under the
|
||||||
under the terms of the GNU Free Documentation License, Version 1.3 or
|
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||||
any later version published by the Free Software Foundation; with no
|
published by the Free Software Foundation; with no Invariant Sections, no
|
||||||
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
|
Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||||
copy of the license is included in the section entitled "GNU Free
|
in the section entitled "GNU Free Documentation License".
|
||||||
Documentation License".
|
|
||||||
|
|
||||||
.. todolist::
|
|
||||||
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
@ -22,6 +19,7 @@ Documentation License".
|
|||||||
introduction.rst
|
introduction.rst
|
||||||
installation.rst
|
installation.rst
|
||||||
administration-guide.rst
|
administration-guide.rst
|
||||||
|
sysadmin.rst
|
||||||
|
|
||||||
.. raw:: latex
|
.. raw:: latex
|
||||||
|
|
||||||
@ -37,5 +35,14 @@ Documentation License".
|
|||||||
glossary.rst
|
glossary.rst
|
||||||
GFDL.rst
|
GFDL.rst
|
||||||
|
|
||||||
|
.. only:: html and devbuild
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Developer Appendix
|
||||||
|
|
||||||
|
todos.rst
|
||||||
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
* :ref:`genindex`
|
||||||
|
|
||||||
|
@ -1,101 +1,102 @@
|
|||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
What is Proxmox Backup Server
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server is an enterprise-class client-server backup software that
|
||||||
|
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
||||||
|
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
||||||
|
allows you to backup your data securely, even between remote sites, providing
|
||||||
|
easy management with a web-based user interface.
|
||||||
|
|
||||||
What is Proxmox Backup
|
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||||
----------------------
|
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
||||||
|
|
||||||
Proxmox Backup is an enterprise class client-server backup software,
|
|
||||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
|
||||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
|
||||||
possible to backup physical hosts.
|
|
||||||
|
|
||||||
It supports deduplication, compression and authenticated encryption
|
|
||||||
(AE_). Using :term:`Rust` as implementation language guarantees high
|
|
||||||
performance, low resource usage, and a safe, high quality code base.
|
performance, low resource usage, and a safe, high quality code base.
|
||||||
|
|
||||||
Encryption is done at the client side. This makes backups to not fully
|
It features strong encryption done on the client side. Thus, it's possible to
|
||||||
trusted targets possible.
|
backup data to not fully trusted targets.
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Proxmox Backup uses a `Client-server model`_. The server is
|
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||||
responsible to store the backup data and provides an API to create
|
backup data and provides an API to create backups and restore data. With the
|
||||||
backups and restore data. It is possible to manage disks and
|
API it's also possible to manage disks and other server side resources.
|
||||||
other server side resources using this API.
|
|
||||||
|
|
||||||
A backup client uses this API to access the backed up data,
|
The backup client uses this API to access the backed up data. With the command
|
||||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||||
backups and restore data. We deliver an integrated client for
|
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||||
QEMU_ with `Proxmox Virtual Environment`_.
|
|
||||||
|
|
||||||
A single backup is allowed to contain several archives. For example,
|
A single backup is allowed to contain several archives. For example, when you
|
||||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||||
separate archive inside that backup. The VM configuration also gets an
|
inside that backup. The VM configuration itself is stored as an extra file.
|
||||||
extra file. This way, it is easy to access and restore important parts
|
This way, it is easy to access and restore only important parts of the backup
|
||||||
of the backup without having to scan the whole backup.
|
without the need to scan the whole backup.
|
||||||
|
|
||||||
|
|
||||||
Main Features
|
Main Features
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||||
supported. You can backup :term:`virtual machine`\ s and
|
supported and you can easily backup :term:`virtual machine`\ s and
|
||||||
:term:`container`\ s.
|
:term:`container`\ s.
|
||||||
|
|
||||||
:GUI: We provide a graphical, web based user interface.
|
:Performance: The whole software stack is written in :term:`Rust`,
|
||||||
|
to provide high speed and memory efficiency.
|
||||||
|
|
||||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||||
data. The deduplication layer removes that redundancy and makes
|
data. The deduplication layer avoids redundancy and minimizes the used
|
||||||
incremental backups small and space efficient.
|
storage space.
|
||||||
|
|
||||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
:Incremental backups: Changes between backups are typically low. Reading and
|
||||||
|
sending only the delta reduces storage and network impact of backups.
|
||||||
|
|
||||||
|
:Data Integrity: The built-in `SHA-256`_ checksum algorithm assures the
|
||||||
accuracy and consistency of your backups.
|
accuracy and consistency of your backups.
|
||||||
|
|
||||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||||
sites. Only deltas containing new data are transferred.
|
sites. Only deltas containing new data are transferred.
|
||||||
|
|
||||||
:Performance: The whole software stack is written in :term:`Rust`,
|
:Compression: The ultra fast Zstandard_ compression is able to compress
|
||||||
to provide high speed and memory efficiency.
|
|
||||||
|
|
||||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mde
|
||||||
high performance on modern hardware.
|
provides very high performance on modern hardware.
|
||||||
|
|
||||||
:Open Source: No secrets. You have access to all the source code.
|
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||||
|
user interface.
|
||||||
|
|
||||||
:Support: Commercial support options are available from `Proxmox`_.
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
|
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
||||||
|
phase is over.
|
||||||
|
|
||||||
|
|
||||||
Why Backup?
|
Reasons for Data Backup?
|
||||||
-----------
|
------------------------
|
||||||
|
|
||||||
The primary purpose of a backup is to protect against data loss. Data
|
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||||
loss can be caused by faulty hardware, but also by human error.
|
caused by faulty hardware but also by human error.
|
||||||
|
|
||||||
A common mistake is to delete a file or folder which is still
|
A common mistake is to accidentally delete a file or folder which is still
|
||||||
required. Virtualization can amplify this problem. It is now
|
required. Virtualization can even amplify this problem; it easily happens that
|
||||||
easy to delete a whole virtual machine by pressing a single button.
|
a whole virtual machine is deleted by just pressing a single button.
|
||||||
|
|
||||||
Backups can serve as a toolkit for administrators to temporarily
|
For administrators, backups can serve as a useful toolkit for temporarily
|
||||||
store data. For example, it is common practice to perform full backups
|
storing data. For example, it is common practice to perform full backups before
|
||||||
before installing major software updates. If something goes wrong, you
|
installing major software updates. If something goes wrong, you can easily
|
||||||
can restore the previous state.
|
restore the previous state.
|
||||||
|
|
||||||
Another reason for backups are legal requirements. Some data must be
|
Another reason for backups are legal requirements. Some data, especially
|
||||||
kept in a safe place for several years by law, so that it can be accessed if
|
business records, must be kept in a safe place for several years by law, so
|
||||||
required.
|
that they can be accessed if required.
|
||||||
|
|
||||||
Data loss can be very costly as it can severely restrict your
|
In general, data loss is very costly as it can severely damage your business.
|
||||||
business. Therefore, make sure that you perform a backup regularly
|
Therefore, ensure that you perform regular backups and run restore tests.
|
||||||
and run restore tests.
|
|
||||||
|
|
||||||
|
|
||||||
Software Stack
|
Software Stack
|
||||||
@ -104,17 +105,43 @@ Software Stack
|
|||||||
.. todo:: Eplain why we use Rust (and Flutter)
|
.. todo:: Eplain why we use Rust (and Flutter)
|
||||||
|
|
||||||
|
|
||||||
|
Getting Help
|
||||||
|
------------
|
||||||
|
|
||||||
|
Community Support Forum
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We always encourage our users to discuss and share their knowledge using the
|
||||||
|
`Proxmox Community Forum`_. The forum is moderated by the Proxmox support team.
|
||||||
|
The large user base is spread out all over the world. Needless to say that such
|
||||||
|
a large forum is a great place to get information.
|
||||||
|
|
||||||
|
Mailing Lists
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||||
|
is the primary communication channel for developers:
|
||||||
|
:Mailing list for developers: `PBS Development List`_
|
||||||
|
|
||||||
|
Bug Tracker
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||||
|
issue appears, file your report there. An issue can be a bug as well as a
|
||||||
|
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||||
|
of the issue and will send a notification once it has been solved.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
Proxmox Backup is free software: you can redistribute it and/or modify
|
Proxmox Backup Server is free and open source software: you can use it,
|
||||||
it under the terms of the GNU Affero General Public License as
|
redistribute it, and/or modify it under the terms of the GNU Affero General
|
||||||
published by the Free Software Foundation, either version 3 of the
|
Public License as published by the Free Software Foundation, either version 3
|
||||||
License, or (at your option) any later version.
|
of the License, or (at your option) any later version.
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful, but
|
This program is distributed in the hope that it will be useful, but
|
||||||
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
||||||
|
401
docs/local-zfs.rst
Normal file
401
docs/local-zfs.rst
Normal file
@ -0,0 +1,401 @@
|
|||||||
|
ZFS on Linux
|
||||||
|
------------
|
||||||
|
|
||||||
|
ZFS is a combined file system and logical volume manager designed by
|
||||||
|
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||||
|
packages are included.
|
||||||
|
|
||||||
|
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||||
|
low budget hardware, but also high performance systems by leveraging
|
||||||
|
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||||
|
hardware raid cards by moderate CPU and memory load combined with easy
|
||||||
|
management.
|
||||||
|
|
||||||
|
General ZFS advantages
|
||||||
|
|
||||||
|
* Easy configuration and management with GUI and CLI.
|
||||||
|
* Reliable
|
||||||
|
* Protection against data corruption
|
||||||
|
* Data compression on file system level
|
||||||
|
* Snapshots
|
||||||
|
* Copy-on-write clone
|
||||||
|
* Various raid levels: RAID0, RAID1, RAID10, RAIDZ-1, RAIDZ-2 and RAIDZ-3
|
||||||
|
* Can use SSD for cache
|
||||||
|
* Self healing
|
||||||
|
* Continuous integrity checking
|
||||||
|
* Designed for high storage capacities
|
||||||
|
* Protection against data corruption
|
||||||
|
* Asynchronous replication over network
|
||||||
|
* Open Source
|
||||||
|
* Encryption
|
||||||
|
|
||||||
|
Hardware
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||||
|
practice, use as much you can get for your hardware/budget. To prevent
|
||||||
|
data corruption, we recommend the use of high quality ECC RAM.
|
||||||
|
|
||||||
|
If you use a dedicated cache and/or log disk, you should use an
|
||||||
|
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||||
|
increase the overall performance significantly.
|
||||||
|
|
||||||
|
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||||
|
own cache management. ZFS needs to directly communicate with disks. An
|
||||||
|
HBA adapter is the way to go, or something like LSI controller flashed
|
||||||
|
in ``IT`` mode.
|
||||||
|
|
||||||
|
|
||||||
|
ZFS Administration
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section gives you some usage examples for common tasks. ZFS
|
||||||
|
itself is really powerful and provides many options. The main commands
|
||||||
|
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||||
|
manual pages, which can be read with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# man zpool
|
||||||
|
# man zfs
|
||||||
|
|
||||||
|
Create a new zpool
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To create a new pool, at least one disk is needed. The `ashift` should
|
||||||
|
have the same sector-size (2 power of `ashift`) or larger as the
|
||||||
|
underlying disk.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device>
|
||||||
|
|
||||||
|
Create a new pool with RAID-0
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 1 disk
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device1> <device2>
|
||||||
|
|
||||||
|
Create a new pool with RAID-1
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 2 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2>
|
||||||
|
|
||||||
|
Create a new pool with RAID-10
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 4 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> mirror <device3> <device4>
|
||||||
|
|
||||||
|
Create a new pool with RAIDZ-1
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 3 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> raidz1 <device1> <device2> <device3>
|
||||||
|
|
||||||
|
Create a new pool with RAIDZ-2
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 4 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> raidz2 <device1> <device2> <device3> <device4>
|
||||||
|
|
||||||
|
Create a new pool with cache (L2ARC)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
|
the performance (use SSD).
|
||||||
|
|
||||||
|
As `<device>` it is possible to use more devices, like it's shown in
|
||||||
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device> cache <cache_device>
|
||||||
|
|
||||||
|
Create a new pool with log (ZIL)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
|
the performance (SSD).
|
||||||
|
|
||||||
|
As `<device>` it is possible to use more devices, like it's shown in
|
||||||
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device> log <log_device>
|
||||||
|
|
||||||
|
Add cache and log to an existing pool
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If you have a pool without cache and log. First partition the SSD in
|
||||||
|
2 partition with `parted` or `gdisk`
|
||||||
|
|
||||||
|
.. important:: Always use GPT partition tables.
|
||||||
|
|
||||||
|
The maximum size of a log device should be about half the size of
|
||||||
|
physical memory, so this is usually quite small. The rest of the SSD
|
||||||
|
can be used as cache.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool add -f <pool> log <device-part1> cache <device-part2>
|
||||||
|
|
||||||
|
|
||||||
|
Changing a failed device
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool replace -f <pool> <old device> <new device>
|
||||||
|
|
||||||
|
|
||||||
|
Changing a failed bootable device
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||||
|
as bootloader.
|
||||||
|
|
||||||
|
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||||
|
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||||
|
different steps are needed which depend on the bootloader in use.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sgdisk <healthy bootable device> -R <new device>
|
||||||
|
# sgdisk -G <new device>
|
||||||
|
# zpool replace -f <pool> <old zfs partition> <new zfs partition>
|
||||||
|
|
||||||
|
.. NOTE:: Use the `zpool status -v` command to monitor how far the resilvering process of the new disk has progressed.
|
||||||
|
|
||||||
|
With `systemd-boot`:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# pve-efiboot-tool format <new disk's ESP>
|
||||||
|
# pve-efiboot-tool init <new disk's ESP>
|
||||||
|
|
||||||
|
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||||
|
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
||||||
|
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
||||||
|
|
||||||
|
With `grub`:
|
||||||
|
|
||||||
|
Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# grub-install <new disk>
|
||||||
|
# grub-mkconfig -o /path/to/grub.cfg
|
||||||
|
|
||||||
|
|
||||||
|
Activate E-Mail Notification
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
ZFS comes with an event daemon, which monitors events generated by the
|
||||||
|
ZFS kernel module. The daemon can also send emails on ZFS events like
|
||||||
|
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
||||||
|
and you can install it using `apt-get`:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# apt-get install zfs-zed
|
||||||
|
|
||||||
|
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||||
|
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
ZED_EMAIL_ADDR="root"
|
||||||
|
|
||||||
|
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||||
|
configured for the root user.
|
||||||
|
|
||||||
|
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||||
|
other settings are optional.
|
||||||
|
|
||||||
|
Limit ZFS Memory Usage
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is good to use at most 50 percent (which is the default) of the
|
||||||
|
system memory for ZFS ARC to prevent performance shortage of the
|
||||||
|
host. Use your preferred editor to change the configuration in
|
||||||
|
`/etc/modprobe.d/zfs.conf` and insert:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
|
This example setting limits the usage to 8GB.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# update-initramfs -u
|
||||||
|
|
||||||
|
|
||||||
|
SWAP on ZFS
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||||
|
server or generating a high IO load, often seen when starting a Backup
|
||||||
|
to an external Storage.
|
||||||
|
|
||||||
|
We strongly recommend to use enough memory, so that you normally do not
|
||||||
|
run into low memory situations. Should you need or want to add swap, it is
|
||||||
|
preferred to create a partition on a physical disk and use it as swapdevice.
|
||||||
|
You can leave some space free for this purpose in the advanced options of the
|
||||||
|
installer. Additionally, you can lower the `swappiness` value.
|
||||||
|
A good value for servers is 10:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sysctl -w vm.swappiness=10
|
||||||
|
|
||||||
|
To make the swappiness persistent, open `/etc/sysctl.conf` with
|
||||||
|
an editor of your choice and add the following line:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
vm.swappiness = 10
|
||||||
|
|
||||||
|
.. table:: Linux kernel `swappiness` parameter values
|
||||||
|
:widths:auto
|
||||||
|
|
||||||
|
==================== ===============================================================
|
||||||
|
Value Strategy
|
||||||
|
==================== ===============================================================
|
||||||
|
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
|
||||||
|
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
|
||||||
|
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
|
||||||
|
vm.swappiness = 60 The default value.
|
||||||
|
vm.swappiness = 100 The kernel will swap aggressively.
|
||||||
|
==================== ===============================================================
|
||||||
|
|
||||||
|
ZFS Compression
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To activate compression:
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool set compression=lz4 <pool>
|
||||||
|
|
||||||
|
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||||
|
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||||
|
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||||
|
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||||
|
I/O performance.
|
||||||
|
|
||||||
|
You can disable compression at any time with:
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set compression=off <dataset>
|
||||||
|
|
||||||
|
Only new blocks will be affected by this change.
|
||||||
|
|
||||||
|
ZFS Special Device
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||||
|
pool is used to store metadata, deduplication tables, and optionally small
|
||||||
|
file blocks.
|
||||||
|
|
||||||
|
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||||
|
hard disks with a lot of metadata changes. For example workloads that involve
|
||||||
|
creating, updating or deleting a large number of files will benefit from the
|
||||||
|
presence of a `special` device. ZFS datasets can also be configured to store
|
||||||
|
whole small files on the `special` device which can further improve the
|
||||||
|
performance. Use fast SSDs for the `special` device.
|
||||||
|
|
||||||
|
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||||
|
pool, since the `special` device is a point of failure for the whole pool.
|
||||||
|
|
||||||
|
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||||
|
|
||||||
|
Create a pool with `special` device and RAID-1:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> special mirror <device3> <device4>
|
||||||
|
|
||||||
|
Adding a `special` device to an existing pool with RAID-1:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool add <pool> special mirror <device1> <device2>
|
||||||
|
|
||||||
|
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||||
|
`0` to disable storing small file blocks on the `special` device or a power of
|
||||||
|
two in the range between `512B` to `128K`. After setting the property new file
|
||||||
|
blocks smaller than `size` will be allocated on the `special` device.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||||
|
the `recordsize` (default `128K`) of the dataset, *all* data will be written to
|
||||||
|
the `special` device, so be careful!
|
||||||
|
|
||||||
|
Setting the `special_small_blocks` property on a pool will change the default
|
||||||
|
value of that property for all child ZFS datasets (for example all containers
|
||||||
|
in the pool will opt in for small file blocks).
|
||||||
|
|
||||||
|
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=4K <pool>
|
||||||
|
|
||||||
|
Opt in for small file blocks for a single dataset:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=4K <pool>/<filesystem>
|
||||||
|
|
||||||
|
Opt out from small file blocks for a single dataset:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=0 <pool>/<filesystem>
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Corrupted cachefile
|
||||||
|
|
||||||
|
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||||
|
boot until mounted manually later.
|
||||||
|
|
||||||
|
For each pool, run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||||
|
|
||||||
|
and afterwards update the `initramfs` by running:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# update-initramfs -u -k all
|
||||||
|
|
||||||
|
and finally reboot your node.
|
||||||
|
|
||||||
|
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||||
|
doesn't import the pools that aren't present in the cachefile.
|
||||||
|
|
||||||
|
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||||
|
which searches and imports pools via device scanning (usually slower).
|
@ -3,100 +3,110 @@
|
|||||||
Debian Package Repositories
|
Debian Package Repositories
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
All Debian based systems use APT_ as package
|
All Debian based systems use APT_ as package management tool. The list of
|
||||||
management tool. The list of repositories is defined in
|
repositories is defined in ``/etc/apt/sources.list`` and ``.list`` files found
|
||||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
with the ``apt`` command line tool, or via the GUI.
|
||||||
the ``apt`` command line tool, or via the GUI.
|
|
||||||
|
|
||||||
APT_ ``sources.list`` files list one package repository per line, with
|
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||||
the most preferred source listed first. Empty lines are ignored and a
|
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||||
``#`` character anywhere on a line marks the remainder of that line as a
|
anywhere on a line marks the remainder of that line as a comment. The
|
||||||
comment. The information available from the configured sources is
|
information available from the configured sources is acquired by ``apt
|
||||||
acquired by ``apt update``.
|
update``.
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian buster main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||||
|
|
||||||
|
|
||||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||||
|
|
||||||
In addition, Proxmox provides three different package repositories for
|
In addition, you need a package repositories from Proxmox to get the backup
|
||||||
the backup server binaries.
|
server updates.
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
During the Proxmox Backup beta phase only one repository (pbstest) will be
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
available. Once released, a Enterprise repository for production use and a
|
||||||
|
no-subscription repository will be provided.
|
||||||
|
|
||||||
This is the default, stable, and recommended repository. It is available for
|
.. comment
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
enabled by default:
|
|
||||||
|
|
||||||
.. code-block:: sources.list
|
This will be the default, stable, and recommended repository. It is available for
|
||||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
|
enabled by default:
|
||||||
|
|
||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
||||||
|
will be available.
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||||
|
|
||||||
|
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||||
|
|
||||||
|
|
||||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||||
notified via email about new packages as soon as they are available. The
|
notified via email about new packages as soon as they are available. The
|
||||||
change-log and details of each package can be viewed in the GUI (if available).
|
change-log and details of each package can be viewed in the GUI (if available).
|
||||||
|
|
||||||
Please note that you need a valid subscription key to access this
|
Please note that you need a valid subscription key to access this
|
||||||
repository. More information regarding subscription levels and pricing can be
|
repository. More information regarding subscription levels and pricing can be
|
||||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||||
|
|
||||||
.. note:: You can disable this repository by commenting out the above
|
.. note:: You can disable this repository by commenting out the above
|
||||||
line using a `#` (at the start of the line). This prevents error
|
line using a `#` (at the start of the line). This prevents error
|
||||||
messages if you do not have a subscription key. Please configure the
|
messages if you do not have a subscription key. Please configure the
|
||||||
``pbs-no-subscription`` repository in that case.
|
``pbs-no-subscription`` repository in that case.
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ No-Subscription Repository
|
`Proxmox Backup`_ No-Subscription Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
As the name suggests, you do not need a subscription key to access
|
As the name suggests, you do not need a subscription key to access
|
||||||
this repository. It can be used for testing and non-production
|
this repository. It can be used for testing and non-production
|
||||||
use. It is not recommended to use it on production servers, because these
|
use. It is not recommended to use it on production servers, because these
|
||||||
packages are not always heavily tested and validated.
|
packages are not always heavily tested and validated.
|
||||||
|
|
||||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian buster main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||||
|
|
||||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||||
# NOT recommended for production use
|
# NOT recommended for production use
|
||||||
deb http://download.proxmox.com/debian/bps buster pbs-no-subscription
|
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Test Repository
|
`Proxmox Backup`_ Beta Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Finally, there is a repository called ``pbstest``. This one contains the
|
During the public beta, there is a repository called ``pbstest``. This one
|
||||||
latest packages and is heavily used by developers to test new
|
contains the latest packages and is heavily used by developers to test new
|
||||||
features.
|
features.
|
||||||
|
|
||||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||||
only be used to test new features or bug fixes.
|
only be used to test new features or bug fixes.
|
||||||
|
|
||||||
You can configure this using ``/etc/apt/sources.list`` by
|
You can configure this using ``/etc/apt/sources.list`` by adding the following
|
||||||
adding the following line:
|
line:
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/bps buster pbstest
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||||
|
|
||||||
|
If you installed Proxmox Backup Server from the official beta ISO you should
|
||||||
|
have this repository already configured in
|
||||||
|
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Host System Administration
|
Host System Administration
|
||||||
--------------------------
|
==========================
|
||||||
|
|
||||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||||
distribution. That means that you have access to the whole world of
|
distribution. That means that you have access to the whole world of
|
||||||
@ -23,8 +23,4 @@ either explain things which are different on `Proxmox Backup`_, or
|
|||||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||||
please refer to the standard Debian documentation.
|
please refer to the standard Debian documentation.
|
||||||
|
|
||||||
ZFS
|
.. include:: local-zfs.rst
|
||||||
~~~
|
|
||||||
|
|
||||||
.. todo:: Add local ZFS admin guide (local.zfs.adoc)
|
|
||||||
|
|
||||||
|
6
docs/todos.rst
Normal file
6
docs/todos.rst
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Documentation Todo List
|
||||||
|
=======================
|
||||||
|
|
||||||
|
This is an auto-generated list of the todo references in the documentation.
|
||||||
|
|
||||||
|
.. todolist::
|
@ -7,7 +7,7 @@ DYNAMIC_UNITS := \
|
|||||||
proxmox-backup.service \
|
proxmox-backup.service \
|
||||||
proxmox-backup-proxy.service
|
proxmox-backup-proxy.service
|
||||||
|
|
||||||
all: $(UNITS) $(DYNAMIC_UNITS)
|
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(DYNAMIC_UNITS)
|
rm -f $(DYNAMIC_UNITS)
|
||||||
|
1
etc/pbstest-beta.list
Normal file
1
etc/pbstest-beta.list
Normal file
@ -0,0 +1 @@
|
|||||||
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
@ -46,20 +46,20 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
|||||||
|
|
||||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||||
|
|
||||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
|
||||||
|
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: item.filename.clone(),
|
filename: item.filename.clone(),
|
||||||
encrypted: item.encrypted,
|
crypt_mode: Some(item.crypt_mode),
|
||||||
size: Some(item.size),
|
size: Some(item.size),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||||
encrypted: Some(false),
|
crypt_mode: Some(manifest_crypt_mode),
|
||||||
size: Some(index_size),
|
size: Some(index_size),
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -79,7 +79,11 @@ fn get_all_snapshot_files(
|
|||||||
|
|
||||||
for file in &info.files {
|
for file in &info.files {
|
||||||
if file_set.contains(file) { continue; }
|
if file_set.contains(file) { continue; }
|
||||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
files.push(BackupContent {
|
||||||
|
filename: file.to_string(),
|
||||||
|
size: None,
|
||||||
|
crypt_mode: None,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(files)
|
Ok(files)
|
||||||
@ -350,7 +354,15 @@ pub fn list_snapshots (
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("error during snapshot file listing: '{}'", err);
|
eprintln!("error during snapshot file listing: '{}'", err);
|
||||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
info
|
||||||
|
.files
|
||||||
|
.iter()
|
||||||
|
.map(|x| BackupContent {
|
||||||
|
filename: x.to_string(),
|
||||||
|
size: None,
|
||||||
|
crypt_mode: None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -902,7 +914,7 @@ fn download_file_decoded(
|
|||||||
|
|
||||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == file_name && file.encrypted == Some(true) {
|
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,10 +26,10 @@ pub mod zfs;
|
|||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
},
|
},
|
||||||
skipsmart: {
|
skipsmart: {
|
||||||
description: "Skip smart checks.",
|
description: "Skip smart checks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
"usage-type": {
|
"usage-type": {
|
||||||
type: DiskUsageType,
|
type: DiskUsageType,
|
||||||
|
@ -5,6 +5,8 @@ use proxmox::api::{api, schema::*};
|
|||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||||
|
|
||||||
|
use crate::backup::CryptMode;
|
||||||
|
|
||||||
// File names: may not contain slashes, may not start with "."
|
// File names: may not contain slashes, may not start with "."
|
||||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||||
if name.starts_with('.') {
|
if name.starts_with('.') {
|
||||||
@ -496,6 +498,10 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
|||||||
"filename": {
|
"filename": {
|
||||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
@ -503,9 +509,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
|||||||
/// Basic information about archive files inside a backup snapshot.
|
/// Basic information about archive files inside a backup snapshot.
|
||||||
pub struct BackupContent {
|
pub struct BackupContent {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
/// Info if file is encrypted (or empty if we do not have that info)
|
/// Info if file is encrypted, signed, or neither.
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub encrypted: Option<bool>,
|
pub crypt_mode: Option<CryptMode>,
|
||||||
/// Archive size (from backup manifest).
|
/// Archive size (from backup manifest).
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
|
@ -36,7 +36,7 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
Self {
|
Self {
|
||||||
store: Some(store),
|
store: Some(store),
|
||||||
index,
|
index,
|
||||||
read_buffer: Vec::with_capacity(1024*1024),
|
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||||
current_chunk_idx: 0,
|
current_chunk_idx: 0,
|
||||||
current_chunk_digest: [0u8; 32],
|
current_chunk_digest: [0u8; 32],
|
||||||
state: AsyncIndexReaderState::NoData,
|
state: AsyncIndexReaderState::NoData,
|
||||||
@ -44,9 +44,10 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
||||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
where
|
||||||
I: IndexFile + Unpin
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin,
|
||||||
{
|
{
|
||||||
fn poll_read(
|
fn poll_read(
|
||||||
self: Pin<&mut Self>,
|
self: Pin<&mut Self>,
|
||||||
@ -57,7 +58,7 @@ I: IndexFile + Unpin
|
|||||||
loop {
|
loop {
|
||||||
match &mut this.state {
|
match &mut this.state {
|
||||||
AsyncIndexReaderState::NoData => {
|
AsyncIndexReaderState::NoData => {
|
||||||
if this.current_chunk_idx >= this.index.index_count() {
|
if this.current_chunk_idx >= this.index.index_count() {
|
||||||
return Poll::Ready(Ok(0));
|
return Poll::Ready(Ok(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,7 +68,7 @@ I: IndexFile + Unpin
|
|||||||
.ok_or(io_format_err!("could not get digest"))?
|
.ok_or(io_format_err!("could not get digest"))?
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
if digest == this.current_chunk_digest {
|
if digest == this.current_chunk_digest {
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -78,7 +79,7 @@ I: IndexFile + Unpin
|
|||||||
Some(store) => store,
|
Some(store) => store,
|
||||||
None => {
|
None => {
|
||||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let future = async move {
|
let future = async move {
|
||||||
@ -88,7 +89,7 @@ I: IndexFile + Unpin
|
|||||||
};
|
};
|
||||||
|
|
||||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||||
match ready!(future.as_mut().poll(cx)) {
|
match ready!(future.as_mut().poll(cx)) {
|
||||||
Ok((store, mut chunk_data)) => {
|
Ok((store, mut chunk_data)) => {
|
||||||
@ -96,12 +97,12 @@ I: IndexFile + Unpin
|
|||||||
this.read_buffer.append(&mut chunk_data);
|
this.read_buffer.append(&mut chunk_data);
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
this.store = Some(store);
|
this.store = Some(store);
|
||||||
},
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Poll::Ready(Err(io_err_other(err)));
|
return Poll::Ready(Err(io_err_other(err)));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::HaveData(offset) => {
|
AsyncIndexReaderState::HaveData(offset) => {
|
||||||
let offset = *offset;
|
let offset = *offset;
|
||||||
let len = this.read_buffer.len();
|
let len = this.read_buffer.len();
|
||||||
@ -111,7 +112,7 @@ I: IndexFile + Unpin
|
|||||||
buf.len()
|
buf.len()
|
||||||
};
|
};
|
||||||
|
|
||||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||||
if offset + n == len {
|
if offset + n == len {
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
this.current_chunk_idx += 1;
|
this.current_chunk_idx += 1;
|
||||||
@ -120,7 +121,7 @@ I: IndexFile + Unpin
|
|||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready(Ok(n));
|
return Poll::Ready(Ok(n));
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,30 @@
|
|||||||
//! See the Wikipedia Artikel for [Authenticated
|
//! See the Wikipedia Artikel for [Authenticated
|
||||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||||
//! for a short introduction.
|
//! for a short introduction.
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use openssl::pkcs5::pbkdf2_hmac;
|
|
||||||
use openssl::hash::MessageDigest;
|
|
||||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
use chrono::{Local, TimeZone, DateTime};
|
use chrono::{Local, TimeZone, DateTime};
|
||||||
|
use openssl::hash::MessageDigest;
|
||||||
|
use openssl::pkcs5::pbkdf2_hmac;
|
||||||
|
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api(default: "encrypt")]
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||||
|
pub enum CryptMode {
|
||||||
|
/// Don't encrypt.
|
||||||
|
None,
|
||||||
|
/// Encrypt.
|
||||||
|
Encrypt,
|
||||||
|
/// Only sign.
|
||||||
|
SignOnly,
|
||||||
|
}
|
||||||
|
|
||||||
/// Encryption Configuration with secret key
|
/// Encryption Configuration with secret key
|
||||||
///
|
///
|
||||||
@ -26,7 +44,6 @@ pub struct CryptConfig {
|
|||||||
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
||||||
// The private key used by the cipher.
|
// The private key used by the cipher.
|
||||||
enc_key: [u8; 32],
|
enc_key: [u8; 32],
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CryptConfig {
|
impl CryptConfig {
|
||||||
@ -63,10 +80,9 @@ impl CryptConfig {
|
|||||||
/// chunk digest values do not clash with values computed for
|
/// chunk digest values do not clash with values computed for
|
||||||
/// other sectret keys.
|
/// other sectret keys.
|
||||||
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
||||||
// FIXME: use HMAC-SHA256 instead??
|
|
||||||
let mut hasher = openssl::sha::Sha256::new();
|
let mut hasher = openssl::sha::Sha256::new();
|
||||||
hasher.update(&self.id_key);
|
|
||||||
hasher.update(data);
|
hasher.update(data);
|
||||||
|
hasher.update(&self.id_key); // at the end, to avoid length extensions attacks
|
||||||
hasher.finish()
|
hasher.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +219,7 @@ impl CryptConfig {
|
|||||||
created: DateTime<Local>,
|
created: DateTime<Local>,
|
||||||
) -> Result<Vec<u8>, Error> {
|
) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||||
|
|
||||||
|
@ -3,10 +3,10 @@ use std::convert::TryInto;
|
|||||||
|
|
||||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||||
|
|
||||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
|
||||||
|
|
||||||
use super::file_formats::*;
|
use super::file_formats::*;
|
||||||
use super::CryptConfig;
|
use super::{CryptConfig, CryptMode};
|
||||||
|
|
||||||
|
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||||
|
|
||||||
/// Encoded data chunk with digest and positional information
|
/// Encoded data chunk with digest and positional information
|
||||||
pub struct ChunkInfo {
|
pub struct ChunkInfo {
|
||||||
@ -166,6 +166,19 @@ impl DataBlob {
|
|||||||
Ok(blob)
|
Ok(blob)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the encryption mode for this blob.
|
||||||
|
pub fn crypt_mode(&self) -> Result<CryptMode, Error> {
|
||||||
|
let magic = self.magic();
|
||||||
|
|
||||||
|
Ok(if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 || magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||||
|
CryptMode::None
|
||||||
|
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
|
CryptMode::Encrypt
|
||||||
|
} else {
|
||||||
|
bail!("Invalid blob magic number.");
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Decode blob data
|
/// Decode blob data
|
||||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
@ -194,75 +207,11 @@ impl DataBlob {
|
|||||||
} else {
|
} else {
|
||||||
bail!("unable to decrypt blob - missing CryptConfig");
|
bail!("unable to decrypt blob - missing CryptConfig");
|
||||||
}
|
}
|
||||||
} else if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 || magic == &AUTHENTICATED_BLOB_MAGIC_1_0 {
|
|
||||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
let head = unsafe {
|
|
||||||
(&self.raw_data[..header_len]).read_le_value::<AuthenticatedDataBlobHeader>()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let data_start = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
|
|
||||||
// Note: only verify if we have a crypt config
|
|
||||||
if let Some(config) = config {
|
|
||||||
let signature = config.compute_auth_tag(&self.raw_data[data_start..]);
|
|
||||||
if signature != head.tag {
|
|
||||||
bail!("verifying blob signature failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 {
|
|
||||||
let data = zstd::block::decompress(&self.raw_data[data_start..], 16*1024*1024)?;
|
|
||||||
Ok(data)
|
|
||||||
} else {
|
|
||||||
Ok(self.raw_data[data_start..].to_vec())
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
bail!("Invalid blob magic number.");
|
bail!("Invalid blob magic number.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a signed DataBlob, optionally compressed
|
|
||||||
pub fn create_signed(
|
|
||||||
data: &[u8],
|
|
||||||
config: &CryptConfig,
|
|
||||||
compress: bool,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
if data.len() > MAX_BLOB_SIZE {
|
|
||||||
bail!("data blob too large ({} bytes).", data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
let compr_data;
|
|
||||||
let (_compress, data, magic) = if compress {
|
|
||||||
compr_data = zstd::block::compress(data, 1)?;
|
|
||||||
// Note: We only use compression if result is shorter
|
|
||||||
if compr_data.len() < data.len() {
|
|
||||||
(true, &compr_data[..], AUTH_COMPR_BLOB_MAGIC_1_0)
|
|
||||||
} else {
|
|
||||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
|
||||||
};
|
|
||||||
|
|
||||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
let mut raw_data = Vec::with_capacity(data.len() + header_len);
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic, crc: [0; 4] },
|
|
||||||
tag: config.compute_auth_tag(data),
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
raw_data.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
raw_data.extend_from_slice(data);
|
|
||||||
|
|
||||||
let mut blob = DataBlob { raw_data };
|
|
||||||
blob.set_crc(blob.compute_crc());
|
|
||||||
|
|
||||||
Ok(blob)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load blob from ``reader``
|
/// Load blob from ``reader``
|
||||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||||
|
|
||||||
@ -294,14 +243,6 @@ impl DataBlob {
|
|||||||
|
|
||||||
let blob = DataBlob { raw_data: data };
|
let blob = DataBlob { raw_data: data };
|
||||||
|
|
||||||
Ok(blob)
|
|
||||||
} else if magic == AUTH_COMPR_BLOB_MAGIC_1_0 || magic == AUTHENTICATED_BLOB_MAGIC_1_0 {
|
|
||||||
if data.len() < std::mem::size_of::<AuthenticatedDataBlobHeader>() {
|
|
||||||
bail!("authenticated blob too small ({} bytes).", data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
let blob = DataBlob { raw_data: data };
|
|
||||||
|
|
||||||
Ok(blob)
|
Ok(blob)
|
||||||
} else {
|
} else {
|
||||||
bail!("unable to parse raw blob - wrong magic");
|
bail!("unable to parse raw blob - wrong magic");
|
||||||
@ -376,7 +317,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
|||||||
|
|
||||||
/// Set encryption Configuration
|
/// Set encryption Configuration
|
||||||
///
|
///
|
||||||
/// If set, chunks are encrypted.
|
/// If set, chunks are encrypted
|
||||||
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
||||||
if self.digest_computed {
|
if self.digest_computed {
|
||||||
panic!("unable to set crypt_config after compute_digest().");
|
panic!("unable to set crypt_config after compute_digest().");
|
||||||
@ -415,12 +356,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
|||||||
self.compute_digest();
|
self.compute_digest();
|
||||||
}
|
}
|
||||||
|
|
||||||
let chunk = DataBlob::encode(
|
let chunk = DataBlob::encode(self.orig_data, self.config, self.compress)?;
|
||||||
self.orig_data,
|
|
||||||
self.config,
|
|
||||||
self.compress,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok((chunk, self.digest))
|
Ok((chunk, self.digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ use super::*;
|
|||||||
enum BlobReaderState<R: Read> {
|
enum BlobReaderState<R: Read> {
|
||||||
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
||||||
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||||
Signed { expected_crc: u32, expected_hmac: [u8; 32], csum_reader: ChecksumReader<R> },
|
|
||||||
SignedCompressed { expected_crc: u32, expected_hmac: [u8; 32], decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
|
||||||
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
||||||
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
||||||
}
|
}
|
||||||
@ -41,22 +39,6 @@ impl <R: Read> DataBlobReader<R> {
|
|||||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||||
}
|
}
|
||||||
AUTHENTICATED_BLOB_MAGIC_1_0 => {
|
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
|
||||||
let mut expected_hmac = [0u8; 32];
|
|
||||||
reader.read_exact(&mut expected_hmac)?;
|
|
||||||
let csum_reader = ChecksumReader::new(reader, config);
|
|
||||||
Ok(Self { state: BlobReaderState::Signed { expected_crc, expected_hmac, csum_reader }})
|
|
||||||
}
|
|
||||||
AUTH_COMPR_BLOB_MAGIC_1_0 => {
|
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
|
||||||
let mut expected_hmac = [0u8; 32];
|
|
||||||
reader.read_exact(&mut expected_hmac)?;
|
|
||||||
let csum_reader = ChecksumReader::new(reader, config);
|
|
||||||
|
|
||||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
|
||||||
Ok(Self { state: BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr }})
|
|
||||||
}
|
|
||||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
let expected_crc = u32::from_le_bytes(head.crc);
|
||||||
let mut iv = [0u8; 16];
|
let mut iv = [0u8; 16];
|
||||||
@ -99,31 +81,6 @@ impl <R: Read> DataBlobReader<R> {
|
|||||||
}
|
}
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
BlobReaderState::Signed { csum_reader, expected_crc, expected_hmac } => {
|
|
||||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
|
||||||
if crc != expected_crc {
|
|
||||||
bail!("blob crc check failed");
|
|
||||||
}
|
|
||||||
if let Some(hmac) = hmac {
|
|
||||||
if hmac != expected_hmac {
|
|
||||||
bail!("blob signature check failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(reader)
|
|
||||||
}
|
|
||||||
BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr } => {
|
|
||||||
let csum_reader = decompr.finish().into_inner();
|
|
||||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
|
||||||
if crc != expected_crc {
|
|
||||||
bail!("blob crc check failed");
|
|
||||||
}
|
|
||||||
if let Some(hmac) = hmac {
|
|
||||||
if hmac != expected_hmac {
|
|
||||||
bail!("blob signature check failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(reader)
|
|
||||||
}
|
|
||||||
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
||||||
let csum_reader = decrypt_reader.finish()?.into_inner();
|
let csum_reader = decrypt_reader.finish()?.into_inner();
|
||||||
let (reader, crc, _) = csum_reader.finish()?;
|
let (reader, crc, _) = csum_reader.finish()?;
|
||||||
@ -155,12 +112,6 @@ impl <R: Read> Read for DataBlobReader<R> {
|
|||||||
BlobReaderState::Compressed { decompr, .. } => {
|
BlobReaderState::Compressed { decompr, .. } => {
|
||||||
decompr.read(buf)
|
decompr.read(buf)
|
||||||
}
|
}
|
||||||
BlobReaderState::Signed { csum_reader, .. } => {
|
|
||||||
csum_reader.read(buf)
|
|
||||||
}
|
|
||||||
BlobReaderState::SignedCompressed { decompr, .. } => {
|
|
||||||
decompr.read(buf)
|
|
||||||
}
|
|
||||||
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
||||||
decrypt_reader.read(buf)
|
decrypt_reader.read(buf)
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,6 @@ use super::*;
|
|||||||
enum BlobWriterState<W: Write> {
|
enum BlobWriterState<W: Write> {
|
||||||
Uncompressed { csum_writer: ChecksumWriter<W> },
|
Uncompressed { csum_writer: ChecksumWriter<W> },
|
||||||
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||||
Signed { csum_writer: ChecksumWriter<W> },
|
|
||||||
SignedCompressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
|
||||||
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
||||||
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
||||||
}
|
}
|
||||||
@ -42,33 +40,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
|||||||
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_signed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: [0; 4] },
|
|
||||||
tag: [0u8; 32],
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
|
||||||
Ok(Self { state: BlobWriterState::Signed { csum_writer }})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_signed_compressed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: [0; 4] },
|
|
||||||
tag: [0u8; 32],
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
|
||||||
let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?;
|
|
||||||
Ok(Self { state: BlobWriterState::SignedCompressed { compr }})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
writer.seek(SeekFrom::Start(0))?;
|
||||||
let head = EncryptedDataBlobHeader {
|
let head = EncryptedDataBlobHeader {
|
||||||
@ -129,37 +100,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
|||||||
|
|
||||||
Ok(writer)
|
Ok(writer)
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { csum_writer } => {
|
|
||||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
|
||||||
tag: tag.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(writer)
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { compr } => {
|
|
||||||
let csum_writer = compr.finish()?;
|
|
||||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
|
||||||
tag: tag.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(writer)
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { crypt_writer } => {
|
BlobWriterState::Encrypted { crypt_writer } => {
|
||||||
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
||||||
let (mut writer, crc, _) = csum_writer.finish()?;
|
let (mut writer, crc, _) = csum_writer.finish()?;
|
||||||
@ -203,12 +143,6 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
|||||||
BlobWriterState::Compressed { ref mut compr } => {
|
BlobWriterState::Compressed { ref mut compr } => {
|
||||||
compr.write(buf)
|
compr.write(buf)
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
|
||||||
csum_writer.write(buf)
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
|
||||||
compr.write(buf)
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||||
crypt_writer.write(buf)
|
crypt_writer.write(buf)
|
||||||
}
|
}
|
||||||
@ -226,13 +160,7 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
|||||||
BlobWriterState::Compressed { ref mut compr } => {
|
BlobWriterState::Compressed { ref mut compr } => {
|
||||||
compr.flush()
|
compr.flush()
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||||
csum_writer.flush()
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
|
||||||
compr.flush()
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
|
||||||
crypt_writer.flush()
|
crypt_writer.flush()
|
||||||
}
|
}
|
||||||
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
||||||
|
@ -15,6 +15,7 @@ use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
|||||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||||
use super::index::*;
|
use super::index::*;
|
||||||
use super::{DataBlob, ArchiveType, archive_type};
|
use super::{DataBlob, ArchiveType, archive_type};
|
||||||
|
use crate::backup::CryptMode;
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
@ -494,9 +495,13 @@ impl DataStore {
|
|||||||
Ok((blob, raw_size))
|
Ok((blob, raw_size))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
pub fn load_manifest(
|
||||||
|
&self,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
) -> Result<(BackupManifest, CryptMode, u64), Error> {
|
||||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||||
|
let crypt_mode = blob.crypt_mode()?;
|
||||||
let manifest = BackupManifest::try_from(blob)?;
|
let manifest = BackupManifest::try_from(blob)?;
|
||||||
Ok((manifest, raw_size))
|
Ok((manifest, crypt_mode, raw_size))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,12 +17,6 @@ pub const ENCRYPTED_BLOB_MAGIC_1_0: [u8; 8] = [123, 103, 133, 190, 34, 45, 76, 2
|
|||||||
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
||||||
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
||||||
|
|
||||||
//openssl::sha::sha256(b"Proxmox Backup authenticated blob v1.0")[0..8]
|
|
||||||
pub const AUTHENTICATED_BLOB_MAGIC_1_0: [u8; 8] = [31, 135, 238, 226, 145, 206, 5, 2];
|
|
||||||
|
|
||||||
//openssl::sha::sha256(b"Proxmox Backup zstd compressed authenticated blob v1.0")[0..8]
|
|
||||||
pub const AUTH_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [126, 166, 15, 190, 145, 31, 169, 96];
|
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
||||||
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
||||||
|
|
||||||
@ -50,19 +44,6 @@ pub struct DataBlobHeader {
|
|||||||
pub crc: [u8; 4],
|
pub crc: [u8; 4],
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Authenticated data blob binary storage format
|
|
||||||
///
|
|
||||||
/// The ``DataBlobHeader`` for authenticated blobs additionally contains
|
|
||||||
/// a 16 byte HMAC tag, followed by the data:
|
|
||||||
///
|
|
||||||
/// (MAGIC || CRC32 || TAG || Data).
|
|
||||||
#[derive(Endian)]
|
|
||||||
#[repr(C,packed)]
|
|
||||||
pub struct AuthenticatedDataBlobHeader {
|
|
||||||
pub head: DataBlobHeader,
|
|
||||||
pub tag: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encrypted data blob binary storage format
|
/// Encrypted data blob binary storage format
|
||||||
///
|
///
|
||||||
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
||||||
@ -87,8 +68,6 @@ pub fn header_size(magic: &[u8; 8]) -> usize {
|
|||||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
&AUTHENTICATED_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
|
||||||
&AUTH_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
|
||||||
_ => panic!("unknown blob magic"),
|
_ => panic!("unknown blob magic"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use chrono::{Local, TimeZone, DateTime};
|
use chrono::{Local, TimeZone, DateTime};
|
||||||
@ -146,12 +146,26 @@ pub fn encrypt_key_with_passphrase(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_and_decrypt_key(path: &std::path::Path, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>) -> Result<([u8;32], DateTime<Local>), Error> {
|
pub fn load_and_decrypt_key(
|
||||||
|
path: &std::path::Path,
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
do_load_and_decrypt_key(path, passphrase)
|
||||||
|
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||||
|
}
|
||||||
|
|
||||||
let raw = file_get_contents(&path)?;
|
fn do_load_and_decrypt_key(
|
||||||
let data = String::from_utf8(raw)?;
|
path: &std::path::Path,
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
pub fn decrypt_key(
|
||||||
|
mut keydata: &[u8],
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||||
|
|
||||||
let raw_data = key_config.data;
|
let raw_data = key_config.data;
|
||||||
let created = key_config.created;
|
let created = key_config.created;
|
||||||
|
@ -3,22 +3,56 @@ use std::convert::TryFrom;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::backup::BackupDir;
|
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||||
|
|
||||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||||
|
|
||||||
|
mod hex_csum {
|
||||||
|
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||||
|
|
||||||
|
pub fn serialize<S>(
|
||||||
|
csum: &[u8; 32],
|
||||||
|
serializer: S,
|
||||||
|
) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
let s = proxmox::tools::digest_to_hex(csum);
|
||||||
|
serializer.serialize_str(&s)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(
|
||||||
|
deserializer: D,
|
||||||
|
) -> Result<[u8; 32], D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let s = String::deserialize(deserializer)?;
|
||||||
|
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
pub encrypted: Option<bool>,
|
pub crypt_mode: CryptMode,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
|
#[serde(with = "hex_csum")]
|
||||||
pub csum: [u8; 32],
|
pub csum: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
pub struct BackupManifest {
|
pub struct BackupManifest {
|
||||||
snapshot: BackupDir,
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
backup_time: i64,
|
||||||
files: Vec<FileInfo>,
|
files: Vec<FileInfo>,
|
||||||
|
pub unprotected: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
@ -46,12 +80,18 @@ pub fn archive_type<P: AsRef<Path>>(
|
|||||||
impl BackupManifest {
|
impl BackupManifest {
|
||||||
|
|
||||||
pub fn new(snapshot: BackupDir) -> Self {
|
pub fn new(snapshot: BackupDir) -> Self {
|
||||||
Self { files: Vec::new(), snapshot }
|
Self {
|
||||||
|
backup_type: snapshot.group().backup_type().into(),
|
||||||
|
backup_id: snapshot.group().backup_id().into(),
|
||||||
|
backup_time: snapshot.backup_time().timestamp(),
|
||||||
|
files: Vec::new(),
|
||||||
|
unprotected: json!({}),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||||
let _archive_type = archive_type(&filename)?; // check type
|
let _archive_type = archive_type(&filename)?; // check type
|
||||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,31 +124,103 @@ impl BackupManifest {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_json(self) -> Value {
|
// Generate cannonical json
|
||||||
json!({
|
fn to_canonical_json(value: &Value, output: &mut String) -> Result<(), Error> {
|
||||||
"backup-type": self.snapshot.group().backup_type(),
|
match value {
|
||||||
"backup-id": self.snapshot.group().backup_id(),
|
Value::Null => bail!("got unexpected null value"),
|
||||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
Value::String(_) => {
|
||||||
"files": self.files.iter()
|
output.push_str(&serde_json::to_string(value)?);
|
||||||
.fold(Vec::new(), |mut acc, info| {
|
},
|
||||||
let mut value = json!({
|
Value::Number(_) => {
|
||||||
"filename": info.filename,
|
output.push_str(&serde_json::to_string(value)?);
|
||||||
"encrypted": info.encrypted,
|
}
|
||||||
"size": info.size,
|
Value::Bool(_) => {
|
||||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
output.push_str(&serde_json::to_string(value)?);
|
||||||
});
|
},
|
||||||
|
Value::Array(list) => {
|
||||||
|
output.push('[');
|
||||||
|
for (i, item) in list.iter().enumerate() {
|
||||||
|
if i != 0 { output.push(','); }
|
||||||
|
Self::to_canonical_json(item, output)?;
|
||||||
|
}
|
||||||
|
output.push(']');
|
||||||
|
}
|
||||||
|
Value::Object(map) => {
|
||||||
|
output.push('{');
|
||||||
|
let mut keys: Vec<String> = map.keys().map(|s| s.clone()).collect();
|
||||||
|
keys.sort();
|
||||||
|
for (i, key) in keys.iter().enumerate() {
|
||||||
|
let item = map.get(key).unwrap();
|
||||||
|
if i != 0 { output.push(','); }
|
||||||
|
|
||||||
if let Some(encrypted) = info.encrypted {
|
output.push_str(&serde_json::to_string(&Value::String(key.clone()))?);
|
||||||
value["encrypted"] = encrypted.into();
|
output.push(':');
|
||||||
}
|
Self::to_canonical_json(item, output)?;
|
||||||
|
}
|
||||||
acc.push(value);
|
output.push('}');
|
||||||
acc
|
}
|
||||||
})
|
}
|
||||||
})
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute manifest signature
|
||||||
|
///
|
||||||
|
/// By generating a HMAC SHA256 over the canonical json
|
||||||
|
/// representation, The 'unpreotected' property is excluded.
|
||||||
|
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||||
|
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||||
|
|
||||||
|
let mut signed_data = data.clone();
|
||||||
|
|
||||||
|
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||||
|
|
||||||
|
let mut canonical = String::new();
|
||||||
|
Self::to_canonical_json(&signed_data, &mut canonical)?;
|
||||||
|
|
||||||
|
let sig = crypt_config.compute_auth_tag(canonical.as_bytes());
|
||||||
|
|
||||||
|
Ok(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||||
|
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let mut manifest = serde_json::to_value(&self)?;
|
||||||
|
|
||||||
|
if let Some(crypt_config) = crypt_config {
|
||||||
|
let sig = self.signature(crypt_config)?;
|
||||||
|
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||||
|
}
|
||||||
|
|
||||||
|
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||||
|
Ok(manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||||
|
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||||
|
let json: Value = serde_json::from_slice(data)?;
|
||||||
|
let signature = json["signature"].as_str().map(String::from);
|
||||||
|
|
||||||
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
|
if let Some(signature) = signature {
|
||||||
|
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||||
|
if signature != expected_signature {
|
||||||
|
bail!("wrong signature in manifest");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// not signed: warn/fail?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||||
|
Ok(manifest)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
@ -117,41 +229,50 @@ impl TryFrom<super::DataBlob> for BackupManifest {
|
|||||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])
|
let json: Value = serde_json::from_slice(&data[..])
|
||||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||||
BackupManifest::try_from(json)
|
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||||
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<Value> for BackupManifest {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(data: Value) -> Result<Self, Error> {
|
#[test]
|
||||||
|
fn test_manifest_signature() -> Result<(), Error> {
|
||||||
|
|
||||||
use crate::tools::{required_string_property, required_integer_property, required_array_property};
|
use crate::backup::{KeyDerivationConfig};
|
||||||
|
|
||||||
proxmox::try_block!({
|
let pw = b"test";
|
||||||
let backup_type = required_string_property(&data, "backup-type")?;
|
|
||||||
let backup_id = required_string_property(&data, "backup-id")?;
|
|
||||||
let backup_time = required_integer_property(&data, "backup-time")?;
|
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
let kdf = KeyDerivationConfig::Scrypt {
|
||||||
|
n: 65536,
|
||||||
|
r: 8,
|
||||||
|
p: 1,
|
||||||
|
salt: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
let mut manifest = BackupManifest::new(snapshot);
|
let testkey = kdf.derive_key(pw)?;
|
||||||
|
|
||||||
for item in required_array_property(&data, "files")?.iter() {
|
let crypt_config = CryptConfig::new(testkey)?;
|
||||||
let filename = required_string_property(item, "filename")?.to_owned();
|
|
||||||
let csum = required_string_property(item, "csum")?;
|
|
||||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
|
||||||
let size = required_integer_property(item, "size")? as u64;
|
|
||||||
let encrypted = item["encrypted"].as_bool();
|
|
||||||
manifest.add_file(filename, size, csum, encrypted)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if manifest.files().is_empty() {
|
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||||
bail!("manifest does not list any files.");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(manifest)
|
let mut manifest = BackupManifest::new(snapshot);
|
||||||
}).map_err(|err: Error| format_err!("unable to parse backup manifest - {}", err))
|
|
||||||
|
|
||||||
}
|
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||||
|
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||||
|
|
||||||
|
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||||
|
|
||||||
|
let text = manifest.to_string(Some(&crypt_config))?;
|
||||||
|
|
||||||
|
let manifest: Value = serde_json::from_str(&text)?;
|
||||||
|
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||||
|
|
||||||
|
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||||
|
|
||||||
|
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||||
|
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||||
|
|
||||||
|
assert_eq!(signature, expected_signature);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
|||||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _crypt_mode, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::io::{self, Write, Seek, SeekFrom};
|
use std::convert::TryFrom;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::io::{self, Read, Write, Seek, SeekFrom};
|
||||||
|
use std::os::unix::io::{FromRawFd, RawFd};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@ -15,9 +16,7 @@ use tokio::sync::mpsc;
|
|||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||||
use proxmox::{sortable, identity};
|
|
||||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||||
use proxmox::sys::linux::tty;
|
|
||||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::api::cli::*;
|
use proxmox::api::cli::*;
|
||||||
@ -30,9 +29,7 @@ use proxmox_backup::client::*;
|
|||||||
use proxmox_backup::pxar::catalog::*;
|
use proxmox_backup::pxar::catalog::*;
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
archive_type,
|
archive_type,
|
||||||
encrypt_key_with_passphrase,
|
decrypt_key,
|
||||||
load_and_decrypt_key,
|
|
||||||
store_key_config,
|
|
||||||
verify_chunk_size,
|
verify_chunk_size,
|
||||||
ArchiveType,
|
ArchiveType,
|
||||||
AsyncReadChunk,
|
AsyncReadChunk,
|
||||||
@ -40,17 +37,17 @@ use proxmox_backup::backup::{
|
|||||||
BackupGroup,
|
BackupGroup,
|
||||||
BackupManifest,
|
BackupManifest,
|
||||||
BufferedDynamicReader,
|
BufferedDynamicReader,
|
||||||
|
CATALOG_NAME,
|
||||||
CatalogReader,
|
CatalogReader,
|
||||||
CatalogWriter,
|
CatalogWriter,
|
||||||
CATALOG_NAME,
|
|
||||||
ChunkStream,
|
ChunkStream,
|
||||||
CryptConfig,
|
CryptConfig,
|
||||||
|
CryptMode,
|
||||||
DataBlob,
|
DataBlob,
|
||||||
DynamicIndexReader,
|
DynamicIndexReader,
|
||||||
FixedChunkStream,
|
FixedChunkStream,
|
||||||
FixedIndexReader,
|
FixedIndexReader,
|
||||||
IndexFile,
|
IndexFile,
|
||||||
KeyConfig,
|
|
||||||
MANIFEST_BLOB_NAME,
|
MANIFEST_BLOB_NAME,
|
||||||
Shell,
|
Shell,
|
||||||
};
|
};
|
||||||
@ -71,6 +68,11 @@ pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
|||||||
"Path to encryption key. All data will be encrypted using this key.")
|
"Path to encryption key. All data will be encrypted using this key.")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const KEYFD_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Pass an encryption key via an already opened file descriptor.")
|
||||||
|
.minimum(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
|
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
|
||||||
"Chunk size in KB. Must be a power of 2.")
|
"Chunk size in KB. Must be a power of 2.")
|
||||||
.minimum(64)
|
.minimum(64)
|
||||||
@ -271,6 +273,8 @@ async fn backup_directory<P: AsRef<Path>>(
|
|||||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||||
exclude_pattern: Vec<MatchEntry>,
|
exclude_pattern: Vec<MatchEntry>,
|
||||||
entries_max: usize,
|
entries_max: usize,
|
||||||
|
compress: bool,
|
||||||
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let pxar_stream = PxarBackupStream::open(
|
let pxar_stream = PxarBackupStream::open(
|
||||||
@ -297,7 +301,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
|||||||
});
|
});
|
||||||
|
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None, compress, encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
@ -310,6 +314,8 @@ async fn backup_image<P: AsRef<Path>>(
|
|||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
image_size: u64,
|
image_size: u64,
|
||||||
chunk_size: Option<usize>,
|
chunk_size: Option<usize>,
|
||||||
|
compress: bool,
|
||||||
|
encrypt: bool,
|
||||||
_verbose: bool,
|
_verbose: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
@ -323,7 +329,7 @@ async fn backup_image<P: AsRef<Path>>(
|
|||||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||||
|
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size), compress, encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
@ -546,79 +552,6 @@ fn api_logout(param: Value) -> Result<Value, Error> {
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
snapshot: {
|
|
||||||
type: String,
|
|
||||||
description: "Snapshot path.",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// Dump catalog.
|
|
||||||
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
||||||
let snapshot: BackupDir = path.parse()?;
|
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
|
||||||
None => None,
|
|
||||||
Some(path) => {
|
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
let client = BackupReader::start(
|
|
||||||
client,
|
|
||||||
crypt_config.clone(),
|
|
||||||
repo.store(),
|
|
||||||
&snapshot.group().backup_type(),
|
|
||||||
&snapshot.group().backup_id(),
|
|
||||||
snapshot.backup_time(),
|
|
||||||
true,
|
|
||||||
).await?;
|
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
|
||||||
|
|
||||||
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
|
||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
||||||
|
|
||||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
|
|
||||||
let mut catalogfile = std::fs::OpenOptions::new()
|
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
std::io::copy(&mut reader, &mut catalogfile)
|
|
||||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
|
||||||
|
|
||||||
catalogfile.seek(SeekFrom::Start(0))?;
|
|
||||||
|
|
||||||
let mut catalog_reader = CatalogReader::new(catalogfile);
|
|
||||||
|
|
||||||
catalog_reader.dump()?;
|
|
||||||
|
|
||||||
record_repository(&repo);
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -706,7 +639,8 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_catalog_upload(
|
fn spawn_catalog_upload(
|
||||||
client: Arc<BackupWriter>
|
client: Arc<BackupWriter>,
|
||||||
|
encrypt: bool,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(
|
(
|
||||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||||
@ -724,7 +658,7 @@ fn spawn_catalog_upload(
|
|||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let catalog_upload_result = client
|
let catalog_upload_result = client
|
||||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None, true, encrypt)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if let Err(ref err) = catalog_upload_result {
|
if let Err(ref err) = catalog_upload_result {
|
||||||
@ -738,6 +672,74 @@ fn spawn_catalog_upload(
|
|||||||
Ok((catalog, catalog_result_rx))
|
Ok((catalog, catalog_result_rx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Error> {
|
||||||
|
let keyfile = match param.get("keyfile") {
|
||||||
|
Some(Value::String(keyfile)) => Some(keyfile),
|
||||||
|
Some(_) => bail!("bad --keyfile parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let key_fd = match param.get("keyfd") {
|
||||||
|
Some(Value::Number(key_fd)) => Some(
|
||||||
|
RawFd::try_from(key_fd
|
||||||
|
.as_i64()
|
||||||
|
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
||||||
|
),
|
||||||
|
Some(_) => bail!("bad --keyfd parameter type"),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let crypt_mode: Option<CryptMode> = match param.get("crypt-mode") {
|
||||||
|
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let keydata = match (keyfile, key_fd) {
|
||||||
|
(None, None) => None,
|
||||||
|
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
||||||
|
(Some(keyfile), None) => Some(file_get_contents(keyfile)?),
|
||||||
|
(None, Some(fd)) => {
|
||||||
|
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
||||||
|
let mut data = Vec::new();
|
||||||
|
let _len: usize = { input }.read_to_end(&mut data)
|
||||||
|
.map_err(|err| {
|
||||||
|
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
||||||
|
})?;
|
||||||
|
Some(data)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(match (keydata, crypt_mode) {
|
||||||
|
// no parameters:
|
||||||
|
(None, None) => match key::read_optional_default_encryption_key()? {
|
||||||
|
Some(key) => (Some(key), CryptMode::Encrypt),
|
||||||
|
None => (None, CryptMode::None),
|
||||||
|
},
|
||||||
|
|
||||||
|
// just --crypt-mode=none
|
||||||
|
(None, Some(CryptMode::None)) => (None, CryptMode::None),
|
||||||
|
|
||||||
|
// just --crypt-mode other than none
|
||||||
|
(None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
|
||||||
|
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
||||||
|
Some(key) => (Some(key), crypt_mode),
|
||||||
|
}
|
||||||
|
|
||||||
|
// just --keyfile
|
||||||
|
(Some(key), None) => (Some(key), CryptMode::Encrypt),
|
||||||
|
|
||||||
|
// --keyfile and --crypt-mode=none
|
||||||
|
(Some(_), Some(CryptMode::None)) => {
|
||||||
|
bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
|
||||||
|
}
|
||||||
|
|
||||||
|
// --keyfile and --crypt-mode other than none
|
||||||
|
(Some(key), Some(crypt_mode)) => (Some(key), crypt_mode),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -764,6 +766,14 @@ fn spawn_catalog_upload(
|
|||||||
schema: KEYFILE_SCHEMA,
|
schema: KEYFILE_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"skip-lost-and-found": {
|
"skip-lost-and-found": {
|
||||||
type: Boolean,
|
type: Boolean,
|
||||||
description: "Skip lost+found directory.",
|
description: "Skip lost+found directory.",
|
||||||
@ -833,7 +843,7 @@ async fn create_backup(
|
|||||||
verify_chunk_size(size)?;
|
verify_chunk_size(size)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
|
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
|
||||||
|
|
||||||
@ -932,27 +942,25 @@ async fn create_backup(
|
|||||||
|
|
||||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||||
|
|
||||||
let (crypt_config, rsa_encrypted_key) = match keyfile {
|
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||||
None => (None, None),
|
None => (None, None),
|
||||||
Some(path) => {
|
Some(key) => {
|
||||||
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||||
|
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
|
||||||
let path = master_pubkey_path()?;
|
match key::find_master_pubkey()? {
|
||||||
if path.exists() {
|
Some(ref path) if path.exists() => {
|
||||||
let pem_data = file_get_contents(&path)?;
|
let pem_data = file_get_contents(path)?;
|
||||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||||
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
||||||
(Some(Arc::new(crypt_config)), Some(enc_key))
|
(Some(Arc::new(crypt_config)), Some(enc_key))
|
||||||
} else {
|
}
|
||||||
(Some(Arc::new(crypt_config)), None)
|
_ => (Some(Arc::new(crypt_config)), None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let is_encrypted = Some(crypt_config.is_some());
|
|
||||||
|
|
||||||
let client = BackupWriter::start(
|
let client = BackupWriter::start(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
@ -980,21 +988,21 @@ async fn create_backup(
|
|||||||
BackupSpecificationType::CONFIG => {
|
BackupSpecificationType::CONFIG => {
|
||||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::PXAR => {
|
BackupSpecificationType::PXAR => {
|
||||||
// start catalog upload on first use
|
// start catalog upload on first use
|
||||||
if catalog.is_none() {
|
if catalog.is_none() {
|
||||||
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
|
||||||
catalog = Some(cat);
|
catalog = Some(cat);
|
||||||
catalog_result_tx = Some(res);
|
catalog_result_tx = Some(res);
|
||||||
}
|
}
|
||||||
@ -1014,8 +1022,10 @@ async fn create_backup(
|
|||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
pattern_list.clone(),
|
pattern_list.clone(),
|
||||||
entries_max as usize,
|
entries_max as usize,
|
||||||
|
true,
|
||||||
|
crypt_mode == CryptMode::Encrypt,
|
||||||
).await?;
|
).await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||||
catalog.lock().unwrap().end_directory()?;
|
catalog.lock().unwrap().end_directory()?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::IMAGE => {
|
BackupSpecificationType::IMAGE => {
|
||||||
@ -1027,9 +1037,11 @@ async fn create_backup(
|
|||||||
&target,
|
&target,
|
||||||
size,
|
size,
|
||||||
chunk_size_opt,
|
chunk_size_opt,
|
||||||
|
true,
|
||||||
|
crypt_mode == CryptMode::Encrypt,
|
||||||
verbose,
|
verbose,
|
||||||
).await?;
|
).await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1046,7 +1058,7 @@ async fn create_backup(
|
|||||||
|
|
||||||
if let Some(catalog_result_rx) = catalog_result_tx {
|
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||||
let stats = catalog_result_rx.await??;
|
let stats = catalog_result_rx.await??;
|
||||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1054,9 +1066,9 @@ async fn create_backup(
|
|||||||
let target = "rsa-encrypted.key";
|
let target = "rsa-encrypted.key";
|
||||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
.upload_blob_from_data(rsa_encrypted_key, target, false, false)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, crypt_mode)?;
|
||||||
|
|
||||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||||
/*
|
/*
|
||||||
@ -1069,12 +1081,14 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create manifest (index.json)
|
// create manifest (index.json)
|
||||||
let manifest = manifest.into_json();
|
// manifests are never encrypted, but include a signature
|
||||||
|
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
|
||||||
|
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
||||||
|
|
||||||
|
|
||||||
println!("Upload index.json to '{:?}'", repo);
|
println!("Upload index.json to '{:?}'", repo);
|
||||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
|
||||||
client
|
client
|
||||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
client.finish().await?;
|
client.finish().await?;
|
||||||
@ -1198,6 +1212,14 @@ We do not extraxt '.pxar' archives when writing to standard output.
|
|||||||
schema: KEYFILE_SCHEMA,
|
schema: KEYFILE_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -1228,12 +1250,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
let target = tools::required_string_param(¶m, "target")?;
|
let target = tools::required_string_param(¶m, "target")?;
|
||||||
let target = if target == "-" { None } else { Some(target) };
|
let target = if target == "-" { None } else { Some(target) };
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keydata {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(key) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1248,18 +1270,17 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
let (manifest, backup_index_data) = client.download_manifest().await?;
|
||||||
|
|
||||||
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||||
|
|
||||||
if archive_name == MANIFEST_BLOB_NAME {
|
if archive_name == MANIFEST_BLOB_NAME {
|
||||||
let backup_index_data = manifest.into_json().to_string();
|
|
||||||
if let Some(target) = target {
|
if let Some(target) = target {
|
||||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
replace_file(target, &backup_index_data, CreateOptions::new())?;
|
||||||
} else {
|
} else {
|
||||||
let stdout = std::io::stdout();
|
let stdout = std::io::stdout();
|
||||||
let mut writer = stdout.lock();
|
let mut writer = stdout.lock();
|
||||||
writer.write_all(backup_index_data.as_bytes())
|
writer.write_all(&backup_index_data)
|
||||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1358,6 +1379,14 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
schema: KEYFILE_SCHEMA,
|
schema: KEYFILE_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -1372,12 +1401,12 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keydata {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(key) => {
|
||||||
let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
Some(Arc::new(crypt_config))
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
@ -1385,7 +1414,11 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let data = file_get_contents(logfile)?;
|
let data = file_get_contents(logfile)?;
|
||||||
|
|
||||||
let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
|
// fixme: howto sign log?
|
||||||
|
let blob = match crypt_mode {
|
||||||
|
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
|
||||||
|
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
|
||||||
|
};
|
||||||
|
|
||||||
let raw_data = blob.into_inner();
|
let raw_data = blob.into_inner();
|
||||||
|
|
||||||
@ -1742,248 +1775,6 @@ fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<Stri
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
|
||||||
|
|
||||||
// fixme: implement other input methods
|
|
||||||
|
|
||||||
use std::env::VarError::*;
|
|
||||||
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
|
||||||
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
|
||||||
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
|
||||||
Err(NotPresent) => {
|
|
||||||
// Try another method
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're on a TTY, query the user for a password
|
|
||||||
if tty::stdin_isatty() {
|
|
||||||
return Ok(tty::read_password("Encryption Key Password: ")?);
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("no password input mechanism available");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_create(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "path")?;
|
|
||||||
let path = PathBuf::from(path);
|
|
||||||
|
|
||||||
let kdf = param["kdf"].as_str().unwrap_or("scrypt");
|
|
||||||
|
|
||||||
let key = proxmox::sys::linux::random_data(32)?;
|
|
||||||
|
|
||||||
if kdf == "scrypt" {
|
|
||||||
// always read passphrase from tty
|
|
||||||
if !tty::stdin_isatty() {
|
|
||||||
bail!("unable to read passphrase - no tty");
|
|
||||||
}
|
|
||||||
|
|
||||||
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
|
||||||
|
|
||||||
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
|
||||||
|
|
||||||
store_key_config(&path, false, key_config)?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
} else if kdf == "none" {
|
|
||||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
|
||||||
|
|
||||||
store_key_config(&path, false, KeyConfig {
|
|
||||||
kdf: None,
|
|
||||||
created,
|
|
||||||
modified: created,
|
|
||||||
data: key,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
} else {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn master_pubkey_path() -> Result<PathBuf, Error> {
|
|
||||||
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
|
||||||
|
|
||||||
// usually $HOME/.config/proxmox-backup/master-public.pem
|
|
||||||
let path = base.place_config_file("master-public.pem")?;
|
|
||||||
|
|
||||||
Ok(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_import_master_pubkey(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "path")?;
|
|
||||||
let path = PathBuf::from(path);
|
|
||||||
|
|
||||||
let pem_data = file_get_contents(&path)?;
|
|
||||||
|
|
||||||
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
|
||||||
bail!("Unable to decode PEM data - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
let target_path = master_pubkey_path()?;
|
|
||||||
|
|
||||||
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
|
||||||
|
|
||||||
println!("Imported public master key to {:?}", target_path);
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_create_master_key(
|
|
||||||
_param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
// we need a TTY to query the new password
|
|
||||||
if !tty::stdin_isatty() {
|
|
||||||
bail!("unable to create master key - no tty");
|
|
||||||
}
|
|
||||||
|
|
||||||
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
|
||||||
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
|
||||||
|
|
||||||
|
|
||||||
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
|
||||||
|
|
||||||
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
|
||||||
let filename_pub = "master-public.pem";
|
|
||||||
println!("Writing public master key to {}", filename_pub);
|
|
||||||
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
|
||||||
|
|
||||||
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
|
||||||
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
|
||||||
|
|
||||||
let filename_priv = "master-private.pem";
|
|
||||||
println!("Writing private master key to {}", filename_priv);
|
|
||||||
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_change_passphrase(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "path")?;
|
|
||||||
let path = PathBuf::from(path);
|
|
||||||
|
|
||||||
let kdf = param["kdf"].as_str().unwrap_or("scrypt");
|
|
||||||
|
|
||||||
// we need a TTY to query the new password
|
|
||||||
if !tty::stdin_isatty() {
|
|
||||||
bail!("unable to change passphrase - no tty");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
|
||||||
|
|
||||||
if kdf == "scrypt" {
|
|
||||||
|
|
||||||
let password = tty::read_and_verify_password("New Password: ")?;
|
|
||||||
|
|
||||||
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
|
|
||||||
new_key_config.created = created; // keep original value
|
|
||||||
|
|
||||||
store_key_config(&path, true, new_key_config)?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
} else if kdf == "none" {
|
|
||||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
|
||||||
|
|
||||||
store_key_config(&path, true, KeyConfig {
|
|
||||||
kdf: None,
|
|
||||||
created, // keep original value
|
|
||||||
modified,
|
|
||||||
data: key.to_vec(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
} else {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_mgmt_cli() -> CliCommandMap {
|
|
||||||
|
|
||||||
const KDF_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
|
|
||||||
.format(&ApiStringFormat::Enum(&[
|
|
||||||
EnumEntry::new("scrypt", "SCrypt"),
|
|
||||||
EnumEntry::new("none", "Do not encrypt the key")]))
|
|
||||||
.default("scrypt")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&key_create),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Create a new encryption key.",
|
|
||||||
&sorted!([
|
|
||||||
("path", false, &StringSchema::new("File system path.").schema()),
|
|
||||||
("kdf", true, &KDF_SCHEMA),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
|
|
||||||
.arg_param(&["path"])
|
|
||||||
.completion_cb("path", tools::complete_file_name);
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&key_change_passphrase),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Change the passphrase required to decrypt the key.",
|
|
||||||
&sorted!([
|
|
||||||
("path", false, &StringSchema::new("File system path.").schema()),
|
|
||||||
("kdf", true, &KDF_SCHEMA),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
|
|
||||||
.arg_param(&["path"])
|
|
||||||
.completion_cb("path", tools::complete_file_name);
|
|
||||||
|
|
||||||
const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&key_create_master_key),
|
|
||||||
&ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
|
|
||||||
);
|
|
||||||
|
|
||||||
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&key_import_master_pubkey),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
|
|
||||||
&sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
|
|
||||||
.arg_param(&["path"])
|
|
||||||
.completion_cb("path", tools::complete_file_name);
|
|
||||||
|
|
||||||
CliCommandMap::new()
|
|
||||||
.insert("create", key_create_cmd_def)
|
|
||||||
.insert("create-master-key", key_create_master_key_cmd_def)
|
|
||||||
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
|
||||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
use proxmox_backup::client::RemoteChunkReader;
|
use proxmox_backup::client::RemoteChunkReader;
|
||||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||||
/// async use!
|
/// async use!
|
||||||
@ -2010,7 +1801,6 @@ impl ReadAt for BufferedDynamicReadAt {
|
|||||||
buf: &'a mut [u8],
|
buf: &'a mut [u8],
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
use std::io::Read;
|
|
||||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||||
let mut reader = self.inner.lock().unwrap();
|
let mut reader = self.inner.lock().unwrap();
|
||||||
reader.seek(SeekFrom::Start(offset))?;
|
reader.seek(SeekFrom::Start(offset))?;
|
||||||
@ -2026,140 +1816,6 @@ impl ReadAt for BufferedDynamicReadAt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
"snapshot": {
|
|
||||||
type: String,
|
|
||||||
description: "Group/Snapshot path.",
|
|
||||||
},
|
|
||||||
"archive-name": {
|
|
||||||
type: String,
|
|
||||||
description: "Backup archive name.",
|
|
||||||
},
|
|
||||||
"repository": {
|
|
||||||
optional: true,
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
},
|
|
||||||
"keyfile": {
|
|
||||||
optional: true,
|
|
||||||
type: String,
|
|
||||||
description: "Path to encryption key.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
/// Shell to interactively inspect and restore snapshots.
|
|
||||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
|
||||||
|
|
||||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
|
||||||
let group: BackupGroup = path.parse()?;
|
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
|
||||||
} else {
|
|
||||||
let snapshot: BackupDir = path.parse()?;
|
|
||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
|
||||||
};
|
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
|
||||||
let crypt_config = match keyfile {
|
|
||||||
None => None,
|
|
||||||
Some(path) => {
|
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
|
||||||
format!("{}.didx", archive_name)
|
|
||||||
} else {
|
|
||||||
bail!("Can only mount pxar archives.");
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = BackupReader::start(
|
|
||||||
client,
|
|
||||||
crypt_config.clone(),
|
|
||||||
repo.store(),
|
|
||||||
&backup_type,
|
|
||||||
&backup_id,
|
|
||||||
backup_time,
|
|
||||||
true,
|
|
||||||
).await?;
|
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
|
||||||
|
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
let archive_size = reader.archive_size();
|
|
||||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
|
||||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
|
||||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
|
||||||
|
|
||||||
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
|
||||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
|
||||||
|
|
||||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
|
||||||
let (csum, size) = index.compute_csum();
|
|
||||||
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
|
||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
||||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
let mut catalogfile = std::fs::OpenOptions::new()
|
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
std::io::copy(&mut reader, &mut catalogfile)
|
|
||||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
|
||||||
|
|
||||||
catalogfile.seek(SeekFrom::Start(0))?;
|
|
||||||
let catalog_reader = CatalogReader::new(catalogfile);
|
|
||||||
let state = Shell::new(
|
|
||||||
catalog_reader,
|
|
||||||
&server_archive_name,
|
|
||||||
decoder,
|
|
||||||
).await?;
|
|
||||||
|
|
||||||
println!("Starting interactive shell");
|
|
||||||
state.shell().await?;
|
|
||||||
|
|
||||||
record_repository(&repo);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn catalog_mgmt_cli() -> CliCommandMap {
|
|
||||||
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
|
||||||
.arg_param(&["snapshot", "archive-name"])
|
|
||||||
.completion_cb("repository", complete_repository)
|
|
||||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
|
||||||
.completion_cb("snapshot", complete_group_or_snapshot);
|
|
||||||
|
|
||||||
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
|
||||||
.arg_param(&["snapshot"])
|
|
||||||
.completion_cb("repository", complete_repository)
|
|
||||||
.completion_cb("snapshot", complete_backup_snapshot);
|
|
||||||
|
|
||||||
CliCommandMap::new()
|
|
||||||
.insert("dump", catalog_dump_cmd_def)
|
|
||||||
.insert("shell", catalog_shell_cmd_def)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
||||||
@ -2235,7 +1891,7 @@ fn main() {
|
|||||||
.insert("snapshots", snapshots_cmd_def)
|
.insert("snapshots", snapshots_cmd_def)
|
||||||
.insert("files", files_cmd_def)
|
.insert("files", files_cmd_def)
|
||||||
.insert("status", status_cmd_def)
|
.insert("status", status_cmd_def)
|
||||||
.insert("key", key_mgmt_cli())
|
.insert("key", key::cli())
|
||||||
.insert("mount", mount_cmd_def())
|
.insert("mount", mount_cmd_def())
|
||||||
.insert("catalog", catalog_mgmt_cli())
|
.insert("catalog", catalog_mgmt_cli())
|
||||||
.insert("task", task_mgmt_cli())
|
.insert("task", task_mgmt_cli())
|
||||||
|
@ -19,7 +19,6 @@ use proxmox_backup::client::*;
|
|||||||
use crate::{
|
use crate::{
|
||||||
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
get_encryption_key_password,
|
|
||||||
record_repository,
|
record_repository,
|
||||||
connect,
|
connect,
|
||||||
};
|
};
|
||||||
@ -52,7 +51,7 @@ pub async fn benchmark(
|
|||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
Some(Arc::new(crypt_config))
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
|
246
src/bin/proxmox_backup_client/catalog.rs
Normal file
246
src/bin/proxmox_backup_client/catalog.rs
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::io::{Seek, SeekFrom};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
record_repository,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
complete_backup_snapshot,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
connect,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
CatalogReader,
|
||||||
|
CATALOG_NAME,
|
||||||
|
CryptConfig,
|
||||||
|
DynamicIndexReader,
|
||||||
|
IndexFile,
|
||||||
|
Shell,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::backup::load_and_decrypt_key;
|
||||||
|
|
||||||
|
use crate::key::get_encryption_key_password;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
snapshot: {
|
||||||
|
type: String,
|
||||||
|
description: "Snapshot path.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Dump catalog.
|
||||||
|
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&snapshot.group().backup_type(),
|
||||||
|
&snapshot.group().backup_id(),
|
||||||
|
snapshot.backup_time(),
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
|
let mut catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
|
||||||
|
catalog_reader.dump()?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"snapshot": {
|
||||||
|
type: String,
|
||||||
|
description: "Group/Snapshot path.",
|
||||||
|
},
|
||||||
|
"archive-name": {
|
||||||
|
type: String,
|
||||||
|
description: "Backup archive name.",
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
optional: true,
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
},
|
||||||
|
"keyfile": {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Path to encryption key.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Shell to interactively inspect and restore snapshots.
|
||||||
|
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
|
||||||
|
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||||
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
|
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||||
|
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
let catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
let state = Shell::new(
|
||||||
|
catalog_reader,
|
||||||
|
&server_archive_name,
|
||||||
|
decoder,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Starting interactive shell");
|
||||||
|
state.shell().await?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn catalog_mgmt_cli() -> CliCommandMap {
|
||||||
|
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
||||||
|
.arg_param(&["snapshot", "archive-name"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||||
|
|
||||||
|
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
||||||
|
.arg_param(&["snapshot"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_backup_snapshot);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("dump", catalog_dump_cmd_def)
|
||||||
|
.insert("shell", catalog_shell_cmd_def)
|
||||||
|
}
|
274
src/bin/proxmox_backup_client/key.rs
Normal file
274
src/bin/proxmox_backup_client/key.rs
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use chrono::{Local, TimeZone};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::cli::{CliCommand, CliCommandMap};
|
||||||
|
use proxmox::sys::linux::tty;
|
||||||
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
encrypt_key_with_passphrase, load_and_decrypt_key, store_key_config, KeyConfig,
|
||||||
|
};
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||||
|
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||||
|
|
||||||
|
pub fn find_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_master_pubkey() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_optional_default_encryption_key() -> Result<Option<Vec<u8>>, Error> {
|
||||||
|
find_default_encryption_key()?
|
||||||
|
.map(file_get_contents)
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
|
// fixme: implement other input methods
|
||||||
|
|
||||||
|
use std::env::VarError::*;
|
||||||
|
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
||||||
|
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
||||||
|
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
||||||
|
Err(NotPresent) => {
|
||||||
|
// Try another method
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're on a TTY, query the user for a password
|
||||||
|
if tty::stdin_isatty() {
|
||||||
|
return Ok(tty::read_password("Encryption Key Password: ")?);
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("no password input mechanism available");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
default: "scrypt",
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description:
|
||||||
|
"Output file. Without this the key will become the new default encryption key.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new encryption key.
|
||||||
|
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => place_default_encryption_key()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
let key = proxmox::sys::linux::random_data(32)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
false,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created,
|
||||||
|
modified: created,
|
||||||
|
data: key,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
// always read passphrase from tty
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to read passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
||||||
|
|
||||||
|
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
|
||||||
|
store_key_config(&path, false, key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description: "Key file. Without this the default key's password will be changed.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Change the encryption key's password.
|
||||||
|
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => find_default_encryption_key()?
|
||||||
|
.ok_or_else(|| format_err!("no encryption file provided and no default file found"))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to change passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
true,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created, // keep original value
|
||||||
|
modified,
|
||||||
|
data: key.to_vec(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
let password = tty::read_and_verify_password("New Password: ")?;
|
||||||
|
|
||||||
|
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
new_key_config.created = created; // keep original value
|
||||||
|
|
||||||
|
store_key_config(&path, true, new_key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
path: {
|
||||||
|
description: "Path to the PEM formatted RSA public key.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Import an RSA public key used to put an encrypted version of the symmetric backup encryption
|
||||||
|
/// key onto the backup server along with each backup.
|
||||||
|
fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||||
|
let pem_data = file_get_contents(&path)?;
|
||||||
|
|
||||||
|
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
||||||
|
bail!("Unable to decode PEM data - {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let target_path = place_master_pubkey()?;
|
||||||
|
|
||||||
|
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||||
|
|
||||||
|
println!("Imported public master key to {:?}", target_path);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// Create an RSA public/private key pair used to put an encrypted version of the symmetric backup
|
||||||
|
/// encryption key onto the backup server along with each backup.
|
||||||
|
fn create_master_key() -> Result<(), Error> {
|
||||||
|
// we need a TTY to query the new password
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to create master key - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
||||||
|
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
||||||
|
|
||||||
|
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
||||||
|
|
||||||
|
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
||||||
|
let filename_pub = "master-public.pem";
|
||||||
|
println!("Writing public master key to {}", filename_pub);
|
||||||
|
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
||||||
|
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
||||||
|
|
||||||
|
let filename_priv = "master-private.pem";
|
||||||
|
println!("Writing private master key to {}", filename_priv);
|
||||||
|
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cli() -> CliCommandMap {
|
||||||
|
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
|
||||||
|
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("create", key_create_cmd_def)
|
||||||
|
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||||
|
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||||
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
|
}
|
@ -1,6 +1,39 @@
|
|||||||
|
use anyhow::{Context, Error};
|
||||||
|
|
||||||
mod benchmark;
|
mod benchmark;
|
||||||
pub use benchmark::*;
|
pub use benchmark::*;
|
||||||
mod mount;
|
mod mount;
|
||||||
pub use mount::*;
|
pub use mount::*;
|
||||||
mod task;
|
mod task;
|
||||||
pub use task::*;
|
pub use task::*;
|
||||||
|
mod catalog;
|
||||||
|
pub use catalog::*;
|
||||||
|
|
||||||
|
pub mod key;
|
||||||
|
|
||||||
|
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
||||||
|
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience helper for better error messages:
|
||||||
|
pub fn find_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<Option<std::path::PathBuf>, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.map(|base| base.find_config_file(file_name))
|
||||||
|
.with_context(|| format!("error searching for {}", description))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<std::path::PathBuf, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.and_then(|base| {
|
||||||
|
base.place_config_file(file_name).map_err(Error::from)
|
||||||
|
})
|
||||||
|
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||||
|
}
|
||||||
|
@ -30,7 +30,6 @@ use proxmox_backup::client::*;
|
|||||||
use crate::{
|
use crate::{
|
||||||
REPO_URL_SCHEMA,
|
REPO_URL_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
get_encryption_key_password,
|
|
||||||
complete_pxar_archive_name,
|
complete_pxar_archive_name,
|
||||||
complete_group_or_snapshot,
|
complete_group_or_snapshot,
|
||||||
complete_repository,
|
complete_repository,
|
||||||
@ -119,7 +118,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -140,7 +139,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
if server_archive_name.ends_with(".didx") {
|
if server_archive_name.ends_with(".didx") {
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
@ -123,18 +123,19 @@ impl BackupReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Download backup manifest (index.json)
|
/// Download backup manifest (index.json)
|
||||||
pub async fn download_manifest(&self) -> Result<BackupManifest, Error> {
|
///
|
||||||
|
/// The manifest signature is verified if we have a crypt_config.
|
||||||
use std::convert::TryFrom;
|
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
blob.verify_crc()?;
|
blob.verify_crc()?;
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let data = blob.decode(None)?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])?;
|
|
||||||
|
|
||||||
BackupManifest::try_from(json)
|
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
|
Ok((manifest, data))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download a .blob file
|
/// Download a .blob file
|
||||||
|
@ -3,7 +3,7 @@ use std::os::unix::fs::OpenOptionsExt;
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
@ -163,21 +163,12 @@ impl BackupWriter {
|
|||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
crypt_or_sign: Option<bool>,
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
let blob = match (encrypt, &self.crypt_config) {
|
||||||
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
(false, _) => DataBlob::encode(&data, None, compress)?,
|
||||||
if let Some(encrypt) = crypt_or_sign {
|
(true, None) => bail!("requested encryption without a crypt config"),
|
||||||
if encrypt {
|
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), compress)?,
|
||||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
|
||||||
} else {
|
|
||||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DataBlob::encode(&data, None, compress)?
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DataBlob::encode(&data, None, compress)?
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let raw_data = blob.into_inner();
|
let raw_data = blob.into_inner();
|
||||||
@ -194,8 +185,8 @@ impl BackupWriter {
|
|||||||
src_path: P,
|
src_path: P,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
crypt_or_sign: Option<bool>,
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
|
|
||||||
@ -209,7 +200,7 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
self.upload_blob_from_data(contents, file_name, compress, encrypt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
@ -219,6 +210,8 @@ impl BackupWriter {
|
|||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
fixed_size: Option<u64>,
|
fixed_size: Option<u64>,
|
||||||
|
compress: bool,
|
||||||
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||||
|
|
||||||
@ -227,6 +220,10 @@ impl BackupWriter {
|
|||||||
param["size"] = size.into();
|
param["size"] = size.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if encrypt && self.crypt_config.is_none() {
|
||||||
|
bail!("requested encryption without a crypt config");
|
||||||
|
}
|
||||||
|
|
||||||
let index_path = format!("{}_index", prefix);
|
let index_path = format!("{}_index", prefix);
|
||||||
let close_path = format!("{}_close", prefix);
|
let close_path = format!("{}_close", prefix);
|
||||||
|
|
||||||
@ -252,7 +249,8 @@ impl BackupWriter {
|
|||||||
stream,
|
stream,
|
||||||
&prefix,
|
&prefix,
|
||||||
known_chunks.clone(),
|
known_chunks.clone(),
|
||||||
self.crypt_config.clone(),
|
if encrypt { self.crypt_config.clone() } else { None },
|
||||||
|
compress,
|
||||||
self.verbose,
|
self.verbose,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -455,8 +453,6 @@ impl BackupWriter {
|
|||||||
/// Download backup manifest (index.json) of last backup
|
/// Download backup manifest (index.json) of last backup
|
||||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
@ -465,8 +461,8 @@ impl BackupWriter {
|
|||||||
let blob = DataBlob::from_raw(raw_data)?;
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
blob.verify_crc()?;
|
blob.verify_crc()?;
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])?;
|
|
||||||
let manifest = BackupManifest::try_from(json)?;
|
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
@ -478,6 +474,7 @@ impl BackupWriter {
|
|||||||
prefix: &str,
|
prefix: &str,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
compress: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
||||||
|
|
||||||
@ -508,7 +505,7 @@ impl BackupWriter {
|
|||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||||
.compress(true);
|
.compress(compress);
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
|
@ -10,11 +10,12 @@ use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
|||||||
use crate::tools::runtime::block_on;
|
use crate::tools::runtime::block_on;
|
||||||
|
|
||||||
/// Read chunks from remote host using ``BackupReader``
|
/// Read chunks from remote host using ``BackupReader``
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct RemoteChunkReader {
|
pub struct RemoteChunkReader {
|
||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
cache: Mutex<HashMap<[u8; 32], Vec<u8>>>,
|
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteChunkReader {
|
impl RemoteChunkReader {
|
||||||
@ -30,7 +31,7 @@ impl RemoteChunkReader {
|
|||||||
client,
|
client,
|
||||||
crypt_config,
|
crypt_config,
|
||||||
cache_hint,
|
cache_hint,
|
||||||
cache: Mutex::new(HashMap::new()),
|
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,10 +452,10 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
use pxar::format::mode;
|
use pxar::format::mode;
|
||||||
|
|
||||||
let file_mode = stat.st_mode & libc::S_IFMT;
|
let file_mode = stat.st_mode & libc::S_IFMT;
|
||||||
let open_mode = if !(file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR) {
|
let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR {
|
||||||
OFlag::O_PATH
|
|
||||||
} else {
|
|
||||||
OFlag::empty()
|
OFlag::empty()
|
||||||
|
} else {
|
||||||
|
OFlag::O_PATH
|
||||||
};
|
};
|
||||||
|
|
||||||
let fd = self.open_file(
|
let fd = self.open_file(
|
||||||
|
@ -56,30 +56,41 @@ extern {
|
|||||||
///
|
///
|
||||||
/// This makes sure that tokio's worker threads are marked for us so that we know whether we
|
/// This makes sure that tokio's worker threads are marked for us so that we know whether we
|
||||||
/// can/need to use `block_in_place` in our `block_on` helper.
|
/// can/need to use `block_in_place` in our `block_on` helper.
|
||||||
pub fn get_runtime() -> Arc<Runtime> {
|
pub fn get_runtime_with_builder<F: Fn() -> runtime::Builder>(get_builder: F) -> Arc<Runtime> {
|
||||||
|
|
||||||
let mut guard = RUNTIME.lock().unwrap();
|
let mut guard = RUNTIME.lock().unwrap();
|
||||||
|
|
||||||
if let Some(rt) = guard.upgrade() { return rt; }
|
if let Some(rt) = guard.upgrade() { return rt; }
|
||||||
|
|
||||||
let rt = Arc::new(
|
let mut builder = get_builder();
|
||||||
runtime::Builder::new()
|
builder.on_thread_stop(|| {
|
||||||
.on_thread_stop(|| {
|
// avoid openssl bug: https://github.com/openssl/openssl/issues/6214
|
||||||
// avoid openssl bug: https://github.com/openssl/openssl/issues/6214
|
// call OPENSSL_thread_stop to avoid race with openssl cleanup handlers
|
||||||
// call OPENSSL_thread_stop to avoid race with openssl cleanup handlers
|
unsafe { OPENSSL_thread_stop(); }
|
||||||
unsafe { OPENSSL_thread_stop(); }
|
});
|
||||||
})
|
|
||||||
.threaded_scheduler()
|
let runtime = builder.build().expect("failed to spawn tokio runtime");
|
||||||
.enable_all()
|
let rt = Arc::new(runtime);
|
||||||
.build()
|
|
||||||
.expect("failed to spawn tokio runtime")
|
|
||||||
);
|
|
||||||
|
|
||||||
*guard = Arc::downgrade(&rt.clone());
|
*guard = Arc::downgrade(&rt.clone());
|
||||||
|
|
||||||
rt
|
rt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get or create the current main tokio runtime.
|
||||||
|
///
|
||||||
|
/// This calls get_runtime_with_builder() using the tokio default threaded scheduler
|
||||||
|
pub fn get_runtime() -> Arc<Runtime> {
|
||||||
|
|
||||||
|
get_runtime_with_builder(|| {
|
||||||
|
let mut builder = runtime::Builder::new();
|
||||||
|
builder.threaded_scheduler();
|
||||||
|
builder.enable_all();
|
||||||
|
builder
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Block on a synchronous piece of code.
|
/// Block on a synchronous piece of code.
|
||||||
pub fn block_in_place<R>(fut: impl FnOnce() -> R) -> R {
|
pub fn block_in_place<R>(fut: impl FnOnce() -> R) -> R {
|
||||||
// don't double-exit the context (tokio doesn't like that)
|
// don't double-exit the context (tokio doesn't like that)
|
||||||
|
@ -78,24 +78,6 @@ fn test_compressed_blob_writer() -> Result<(), Error> {
|
|||||||
verify_test_blob(blob_writer.finish()?)
|
verify_test_blob(blob_writer.finish()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_signed_blob_writer() -> Result<(), Error> {
|
|
||||||
let tmp = Cursor::new(Vec::<u8>::new());
|
|
||||||
let mut blob_writer = DataBlobWriter::new_signed(tmp, CRYPT_CONFIG.clone())?;
|
|
||||||
blob_writer.write_all(&TEST_DATA)?;
|
|
||||||
|
|
||||||
verify_test_blob(blob_writer.finish()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_signed_compressed_blob_writer() -> Result<(), Error> {
|
|
||||||
let tmp = Cursor::new(Vec::<u8>::new());
|
|
||||||
let mut blob_writer = DataBlobWriter::new_signed_compressed(tmp, CRYPT_CONFIG.clone())?;
|
|
||||||
blob_writer.write_all(&TEST_DATA)?;
|
|
||||||
|
|
||||||
verify_test_blob(blob_writer.finish()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encrypted_blob_writer() -> Result<(), Error> {
|
fn test_encrypted_blob_writer() -> Result<(), Error> {
|
||||||
let tmp = Cursor::new(Vec::<u8>::new());
|
let tmp = Cursor::new(Vec::<u8>::new());
|
||||||
|
@ -10,7 +10,7 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
},
|
},
|
||||||
'files',
|
'files',
|
||||||
'owner',
|
'owner',
|
||||||
{ name: 'size', type: 'int' },
|
{ name: 'size', type: 'int', allowNull: true, },
|
||||||
{
|
{
|
||||||
name: 'encrypted',
|
name: 'encrypted',
|
||||||
type: 'boolean',
|
type: 'boolean',
|
||||||
@ -154,7 +154,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
if (item.encrypted > 0) {
|
if (item.encrypted > 0) {
|
||||||
encrypted++;
|
encrypted++;
|
||||||
}
|
}
|
||||||
if (item["backup-time"] > last_backup) {
|
if (item["backup-time"] > last_backup && item.size !== null) {
|
||||||
last_backup = item["backup-time"];
|
last_backup = item["backup-time"];
|
||||||
group["backup-time"] = last_backup;
|
group["backup-time"] = last_backup;
|
||||||
group.files = item.files;
|
group.files = item.files;
|
||||||
@ -343,7 +343,13 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
header: gettext("Size"),
|
header: gettext("Size"),
|
||||||
sortable: true,
|
sortable: true,
|
||||||
dataIndex: 'size',
|
dataIndex: 'size',
|
||||||
renderer: Proxmox.Utils.format_size,
|
renderer: (v, meta, record) => {
|
||||||
|
if (v === undefined || v === null) {
|
||||||
|
meta.tdCls = "x-grid-row-loading";
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
return Proxmox.Utils.format_size(v);
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'numbercolumn',
|
xtype: 'numbercolumn',
|
||||||
@ -396,12 +402,13 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
iconCls: 'fa fa-refresh',
|
iconCls: 'fa fa-refresh',
|
||||||
handler: 'reload',
|
handler: 'reload',
|
||||||
},
|
},
|
||||||
|
'-',
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Verify'),
|
text: gettext('Verify'),
|
||||||
disabled: true,
|
disabled: true,
|
||||||
parentXType: 'pbsDataStoreContent',
|
parentXType: 'pbsDataStoreContent',
|
||||||
enableFn: function(record) { return !!record.data; },
|
enableFn: (rec) => !!rec.data && rec.data.size !== null,
|
||||||
handler: 'onVerify',
|
handler: 'onVerify',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -409,7 +416,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
text: gettext('Prune'),
|
text: gettext('Prune'),
|
||||||
disabled: true,
|
disabled: true,
|
||||||
parentXType: 'pbsDataStoreContent',
|
parentXType: 'pbsDataStoreContent',
|
||||||
enableFn: function(record) { return !record.data.leaf; },
|
enableFn: (rec) => !rec.data.leaf,
|
||||||
handler: 'onPrune',
|
handler: 'onPrune',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -418,24 +425,22 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
disabled: true,
|
disabled: true,
|
||||||
parentXType: 'pbsDataStoreContent',
|
parentXType: 'pbsDataStoreContent',
|
||||||
handler: 'onForget',
|
handler: 'onForget',
|
||||||
|
dangerous: true,
|
||||||
confirmMsg: function(record) {
|
confirmMsg: function(record) {
|
||||||
console.log(record);
|
//console.log(record);
|
||||||
let name = record.data.text;
|
let name = record.data.text;
|
||||||
return Ext.String.format(gettext('Are you sure you want to remove snapshot {0}'), `'${name}'`);
|
return Ext.String.format(gettext('Are you sure you want to remove snapshot {0}'), `'${name}'`);
|
||||||
},
|
},
|
||||||
enableFn: function(record) {
|
enableFn: (rec) => !!rec.data.leaf && rec.data.size !== null,
|
||||||
return !!record.data.leaf;
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
'-',
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Download Files'),
|
text: gettext('Download Files'),
|
||||||
disabled: true,
|
disabled: true,
|
||||||
parentXType: 'pbsDataStoreContent',
|
parentXType: 'pbsDataStoreContent',
|
||||||
handler: 'openBackupFileDownloader',
|
handler: 'openBackupFileDownloader',
|
||||||
enableFn: function(record) {
|
enableFn: (rec) => !!rec.data.leaf && rec.data.size !== null,
|
||||||
return !!record.data.leaf;
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: "proxmoxButton",
|
xtype: "proxmoxButton",
|
||||||
@ -444,7 +449,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
handler: 'openPxarBrowser',
|
handler: 'openPxarBrowser',
|
||||||
parentXType: 'pbsDataStoreContent',
|
parentXType: 'pbsDataStoreContent',
|
||||||
enableFn: function(record) {
|
enableFn: function(record) {
|
||||||
return !!record.data.leaf && record.data.files.some(el => el.filename.endsWith('pxar.didx'));
|
return !!record.data.leaf && record.size !== null && record.data.files.some(el => el.filename.endsWith('pxar.didx'));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -200,7 +200,13 @@ Ext.define('PBS.MainView', {
|
|||||||
xtype: 'versioninfo'
|
xtype: 'versioninfo'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
flex: 1
|
padding: 5,
|
||||||
|
html: '<a href="https://bugzilla.proxmox.com" target="_blank">BETA</a>',
|
||||||
|
baseCls: 'x-plain',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
flex: 1,
|
||||||
|
baseCls: 'x-plain',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
baseCls: 'x-plain',
|
baseCls: 'x-plain',
|
||||||
|
@ -19,6 +19,7 @@ JSSRC= \
|
|||||||
window/ACLEdit.js \
|
window/ACLEdit.js \
|
||||||
window/DataStoreEdit.js \
|
window/DataStoreEdit.js \
|
||||||
window/CreateDirectory.js \
|
window/CreateDirectory.js \
|
||||||
|
window/ZFSCreate.js \
|
||||||
window/FileBrowser.js \
|
window/FileBrowser.js \
|
||||||
window/BackupFileDownloader.js \
|
window/BackupFileDownloader.js \
|
||||||
dashboard/DataStoreStatistics.js \
|
dashboard/DataStoreStatistics.js \
|
||||||
|
@ -117,8 +117,7 @@ Ext.define('PBS.admin.ZFSList', {
|
|||||||
text: gettext('Detail'),
|
text: gettext('Detail'),
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
disabled: true,
|
disabled: true,
|
||||||
handler: function() {
|
handler: 'openDetailWindow',
|
||||||
}
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
||||||
|
@ -23,7 +23,8 @@ Ext.define('PBS.window.CreateZFS', {
|
|||||||
xtype: 'proxmoxtextfield',
|
xtype: 'proxmoxtextfield',
|
||||||
name: 'name',
|
name: 'name',
|
||||||
fieldLabel: gettext('Name'),
|
fieldLabel: gettext('Name'),
|
||||||
allowBlank: false
|
minLength: 3,
|
||||||
|
allowBlank: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxcheckbox',
|
xtype: 'proxmoxcheckbox',
|
||||||
@ -75,7 +76,7 @@ Ext.define('PBS.window.CreateZFS', {
|
|||||||
xtype: 'pmxMultiDiskSelector',
|
xtype: 'pmxMultiDiskSelector',
|
||||||
name: 'devices',
|
name: 'devices',
|
||||||
nodename: 'localhost',
|
nodename: 'localhost',
|
||||||
typeParam: 'usage-type',
|
typeParameter: 'usage-type',
|
||||||
valueField: 'name',
|
valueField: 'name',
|
||||||
height: 200,
|
height: 200,
|
||||||
emptyText: gettext('No Disks unused'),
|
emptyText: gettext('No Disks unused'),
|
||||||
|
Reference in New Issue
Block a user