Compare commits
80 Commits
Author | SHA1 | Date | |
---|---|---|---|
cf063c1973 | |||
f58233a73a | |||
d257c2ecbd | |||
e4ee7b7ac8 | |||
1f0d23f792 | |||
bfcef26a99 | |||
ec01eeadc6 | |||
660a34892d | |||
d86034afec | |||
62593aba1e | |||
0eaef8eb84 | |||
e39974afbf | |||
dde18bbb85 | |||
a40e1b0e8b | |||
a0eb0cd372 | |||
41067870c6 | |||
33a87bc39a | |||
bed3e15f16 | |||
c687da9e8e | |||
be30e7d269 | |||
106603c58f | |||
7ba2c1c386 | |||
4327a8462a | |||
e193544b8e | |||
323b2f3dd6 | |||
7884e7ef4f | |||
fae11693f0 | |||
22231524e2 | |||
9634ca07db | |||
62f6a7e3d9 | |||
86443141b5 | |||
f6e964b96e | |||
c8bed1b4d7 | |||
a3970d6c1e | |||
cc83c13660 | |||
bf7e2a4648 | |||
e284073e4a | |||
3ec99affc8 | |||
a9649ddc44 | |||
4f9096a211 | |||
c3a4b5e2e1 | |||
7957fabff2 | |||
20a4e4e252 | |||
2774566b03 | |||
4459ffe30e | |||
d16ed66c88 | |||
3ec6e249b3 | |||
dfa517ad6c | |||
8b2ad84a25 | |||
3dacedce71 | |||
512d50a455 | |||
b53f637914 | |||
152a926149 | |||
7f388acea8 | |||
b2bfb46835 | |||
24406ebc0c | |||
1f24d9114c | |||
859fe9c1fb | |||
2107a5aebc | |||
3638341aa4 | |||
067fe514e6 | |||
8c6e5ce23c | |||
0351f23ba4 | |||
c1ff544eff | |||
69e5d71961 | |||
48e22a8900 | |||
a7a5f56daa | |||
05389a0109 | |||
b65390ebc9 | |||
3bad3e6e52 | |||
24be37e3f6 | |||
1008a69a13 | |||
521a0acb2e | |||
3b66040de6 | |||
af3a0ae7b1 | |||
4e36f78438 | |||
f28d9088ed | |||
56b814e378 | |||
0c136efe30 | |||
cdead6cd12 |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.0"
|
||||
version = "0.8.6"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -37,8 +37,8 @@ pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.2.0", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
|
10
Makefile
10
Makefile
@ -40,10 +40,12 @@ COMPILED_BINS := \
|
||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||
|
||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||
|
||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||
|
||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||
|
||||
@ -58,7 +60,7 @@ $(SUBDIRS):
|
||||
test:
|
||||
#cargo test test_broadcast_future
|
||||
#cargo test $(CARGO_BUILD_ARGS)
|
||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
#$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
|
||||
doc:
|
||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
||||
@ -142,5 +144,5 @@ install: $(COMPILED_BINS)
|
||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
|
71
debian/changelog
vendored
71
debian/changelog
vendored
@ -1,3 +1,74 @@
|
||||
rust-proxmox-backup (0.8.6-1) unstable; urgency=medium
|
||||
|
||||
* ui: add button for easily showing the server fingerprint dashboard
|
||||
|
||||
* proxmox-backup-client benchmark: add --verbose flag and improve output
|
||||
format
|
||||
|
||||
* docs: reference PDF variant in HTML output
|
||||
|
||||
* proxmox-backup-client: add simple version command
|
||||
|
||||
* improve keyfile and signature handling in catalog and manifest
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 10 Jul 2020 11:34:14 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.5-1) unstable; urgency=medium
|
||||
|
||||
* fix cross process task listing
|
||||
|
||||
* docs: expand datastore documentation
|
||||
|
||||
* docs: add remotes and sync-jobs and schedules
|
||||
|
||||
* bump pathpatterns to 0.1.2
|
||||
|
||||
* ui: align version and user-menu spacing with pve/pmg
|
||||
|
||||
* ui: make username a menu-button
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 15:32:39 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.4-1) unstable; urgency=medium
|
||||
|
||||
* add TaskButton in header
|
||||
|
||||
* simpler lost+found pattern
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 14:28:24 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.3-1) unstable; urgency=medium
|
||||
|
||||
* get_disks: don't fail on zfs_devices
|
||||
|
||||
* allow some more characters for zpool list
|
||||
|
||||
* ui: adapt for new sign-only crypt mode
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 13:55:06 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||
|
||||
* buildsys: also upload debug packages
|
||||
|
||||
* src/backup/manifest.rs: rename into_string -> to_string
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 11:58:51 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.1-1) unstable; urgency=medium
|
||||
|
||||
* remove authhenticated data blobs (not needed)
|
||||
|
||||
* add signature to manifest
|
||||
|
||||
* improve docs
|
||||
|
||||
* client: introduce --keyfd parameter
|
||||
|
||||
* ui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 10:01:25 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.0-1) unstable; urgency=medium
|
||||
|
||||
* implement get_runtime_with_builder
|
||||
|
1
debian/lintian-overrides
vendored
Normal file
1
debian/lintian-overrides
vendored
Normal file
@ -0,0 +1 @@
|
||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
1
debian/proxmox-backup-docs.links
vendored
Normal file
1
debian/proxmox-backup-docs.links
vendored
Normal file
@ -0,0 +1 @@
|
||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -1,6 +1,7 @@
|
||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||
etc/proxmox-backup.service /lib/systemd/system/
|
||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
|
@ -1,9 +1,8 @@
|
||||
Administration Guide
|
||||
====================
|
||||
Backup Management
|
||||
=================
|
||||
|
||||
The administration guide.
|
||||
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
.. The administration guide.
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
@ -13,16 +12,16 @@ Backup Content
|
||||
|
||||
When doing deduplication, there are different strategies to get
|
||||
optimal results in terms of performance and/or deduplication rates.
|
||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
||||
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||
sized chunks.
|
||||
|
||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
||||
Fixed sized chunking requires minimal CPU power, and is used to
|
||||
backup virtual machine images.
|
||||
|
||||
Variable sized chunking needs more CPU power, but is essential to get
|
||||
good deduplication rates for file archives.
|
||||
|
||||
The backup server supports both strategies.
|
||||
The Proxmox Backup Server supports both strategies.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
@ -31,7 +30,7 @@ File Archives: ``<name>.pxar``
|
||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||
|
||||
A file archive stores a full directory tree. Content is stored using
|
||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
||||
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
@ -39,7 +38,7 @@ Image Archives: ``<name>.img``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is used for virtual machine images and other large binary
|
||||
data. Content is split into fixed sized chunks.
|
||||
data. Content is split into fixed-sized chunks.
|
||||
|
||||
|
||||
Binary Data (BLOBs)
|
||||
@ -56,7 +55,7 @@ Catalog File: ``catalog.pcat1``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed-up search operations.
|
||||
the list of files and is used to speed up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
@ -74,12 +73,12 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
contains the virtual machine's configuration and an image archive
|
||||
consists of the virtual machine's configuration file and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Contains the container's
|
||||
configuration and a single file archive for the container content.
|
||||
This type is used for :term:`container`\ s. Consists of the container's
|
||||
configuration and a single file archive for the filesystem content.
|
||||
|
||||
``host``
|
||||
This type is used for backups created from within the backed up machine.
|
||||
@ -90,7 +89,7 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
Backup ID
|
||||
~~~~~~~~~
|
||||
|
||||
An unique ID. Usually the virtual machine or container ID. ``host``
|
||||
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||
type backups normally use the hostname.
|
||||
|
||||
|
||||
@ -122,6 +121,13 @@ uniquely identifies a specific backup within a datastore.
|
||||
As you can see, the time format is RFC3399_ with Coordinated
|
||||
Universal Time (UTC_, identified by the trailing *Z*).
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
@ -134,20 +140,18 @@ Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :term:`datastore` is a place to store backups. You can configure
|
||||
multiple datastores. At least one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points
|
||||
to a directory.
|
||||
You can configure multiple datastores. Minimum one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points to a
|
||||
directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as an time independent
|
||||
number of backups to keep in that store. :ref:`Pruning <pruning>` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured :term:`schedule` per datastore.
|
||||
|
||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
@ -166,6 +170,30 @@ To list existing datastores run:
|
||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||
└────────┴──────────────────────┴─────────────────────────────┘
|
||||
|
||||
You can change settings of a datastore, for example to set a prune and garbage
|
||||
collection schedule or retention settings using ``update`` subcommand and view
|
||||
a datastore with the ``show`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update store1 --keep-last 7 --prune-schedule daily --gc-schedule 'Tue 04:27'
|
||||
# proxmox-backup-manager datastore show store1
|
||||
┌────────────────┬─────────────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞════════════════╪═════════════════════════════╡
|
||||
│ name │ store1 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ path │ /backup/disk1/store1 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ comment │ This is my default storage. │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ gc-schedule │ Tue 04:27 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ keep-last │ 7 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ prune-schedule │ daily │
|
||||
└────────────────┴─────────────────────────────┘
|
||||
|
||||
Finally, it is possible to remove the datastore configuration:
|
||||
|
||||
.. code-block:: console
|
||||
@ -179,17 +207,58 @@ Finally, it is possible to remove the datastore configuration:
|
||||
File Layout
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. todo:: Add datastore file layout example
|
||||
After creating a datastore, the following default layout will appear:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1
|
||||
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||
|
||||
`.lock` is an empty file used for process locking.
|
||||
|
||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||
directories will store the chunked data after a backup operation has been executed.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1/.chunks
|
||||
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||
...
|
||||
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||
|
||||
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup support several authentication realms, and you need to
|
||||
Proxmox Backup Server supports several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users needs to exist on the
|
||||
authenticate as Linux system user (Users need to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
@ -216,8 +285,8 @@ normally want to add other users with less privileges:
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
|
||||
The create command lets you specify many option like ``--email`` or
|
||||
``--password``, but you can update or change any of them using the
|
||||
The create command lets you specify many options like ``--email`` or
|
||||
``--password``. You can update or change any of them using the
|
||||
update command later:
|
||||
|
||||
.. code-block:: console
|
||||
@ -225,11 +294,10 @@ update command later:
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
|
||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||
|
||||
|
||||
The resulting use list looks like this:
|
||||
The resulting user list looks like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -242,16 +310,16 @@ The resulting use list looks like this:
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have an permissions. Please read the next
|
||||
Newly created users do not have any permissions. Please read the next
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the users with:
|
||||
Or completely remove the user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -261,20 +329,20 @@ Or completely remove the users with:
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Users do not have any permission by default. Instead you need to
|
||||
specify what is allowed and what not. You can do this by assigning
|
||||
By default new users do not have any permission. Instead you need to
|
||||
specify what is allowed and what is not. You can do this by assigning
|
||||
roles to users on specific objects like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**Admin**
|
||||
The Administrator can do anything.
|
||||
|
||||
**Audit**
|
||||
An Auditor can view things, but is not allowed to change settings.
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
|
||||
@ -301,6 +369,63 @@ following roles exist:
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
|
||||
:term:`Remote`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
A remote is a different Proxmox Backup Server installation and a user on that
|
||||
installation, from which you can `sync` datastores to a local datastore with a
|
||||
`Sync Job`.
|
||||
|
||||
For adding a remote you need its hostname or ip, a userid and password on the
|
||||
remote and its certificate fingerprint to add it. To get the fingerprint use
|
||||
the ``proxmox-backup-manager cert info`` command on the remote.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
With the needed information add the remote with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager remote create pbs2 --host pbs2.mydomain.example --userid sync@pam --password 'SECRET' --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||
``proxmox-backup-manager remote`` to manage your remotes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager remote update pbs2 --host pbs2.example
|
||||
# proxmox-backup-manager remote list
|
||||
┌──────┬──────────────┬──────────┬───────────────────────────────────────────┬─────────┐
|
||||
│ name │ host │ userid │ fingerprint │ comment │
|
||||
╞══════╪══════════════╪══════════╪═══════════════════════════════════════════╪═════════╡
|
||||
│ pbs2 │ pbs2.example │ sync@pam │64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe │ │
|
||||
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||
# proxmox-backup-manager remote remove pbs2
|
||||
|
||||
|
||||
Sync Jobs
|
||||
~~~~~~~~~
|
||||
|
||||
Sync jobs are configured to pull the contents of a datastore on a `Remote` to a
|
||||
local datastore. You can either start the sync job manually on the GUI or
|
||||
provide it with a :term:`schedule` to run regularly. The
|
||||
``proxmox-backup-manager sync-job`` command is used to manage sync jobs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job create pbs2-local --remote pbs2 --remote-store local --store local --schedule 'Wed 02:30'
|
||||
# proxmox-backup-manager sync-job update pbs2-local --comment 'offsite'
|
||||
# proxmox-backup-manager sync-job list
|
||||
┌────────────┬───────┬────────┬──────────────┬───────────┬─────────┐
|
||||
│ id │ store │ remote │ remote-store │ schedule │ comment │
|
||||
╞════════════╪═══════╪════════╪══════════════╪═══════════╪═════════╡
|
||||
│ pbs2-local │ local │ pbs2 │ local │ Wed 02:30 │ offsite │
|
||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||
# proxmox-backup-manager sync-job remove pbs2-local
|
||||
|
||||
|
||||
Backup Client usage
|
||||
-------------------
|
||||
@ -316,8 +441,8 @@ on the backup server.
|
||||
|
||||
[[username@]server:]datastore
|
||||
|
||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
||||
default is the local host (``localhost``).
|
||||
The default value for ``username`` ist ``root``. If no server is specified,
|
||||
the default is the local host (``localhost``).
|
||||
|
||||
You can pass the repository with the ``--repository`` command
|
||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||
@ -381,7 +506,7 @@ This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
@ -726,6 +851,8 @@ To remove the ticket, issue a logout:
|
||||
# proxmox-backup-client logout
|
||||
|
||||
|
||||
.. _pruning:
|
||||
|
||||
Pruning and Removing Backups
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -896,7 +1023,3 @@ After that you should be able to see storage status with:
|
||||
.. include:: command-line-tools.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
||||
.. include host system admin at the end
|
||||
|
||||
.. include:: sysadmin.rst
|
||||
|
@ -112,7 +112,7 @@ exclude_patterns = [
|
||||
'pxar/man1.rst',
|
||||
'epilog.rst',
|
||||
'pbs-copyright.rst',
|
||||
'sysadmin.rst',
|
||||
'local-zfs.rst'
|
||||
'package-repositories.rst',
|
||||
]
|
||||
|
||||
|
@ -11,8 +11,10 @@
|
||||
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
||||
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
.. _Rust: https://www.rust-lang.org/
|
||||
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
||||
|
@ -46,3 +46,19 @@ Glossary
|
||||
kernel driver handles filesystem requests and sends them to a
|
||||
userspace application.
|
||||
|
||||
Remote
|
||||
|
||||
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||
You can pull datastores from a remote to a local datastore in order to
|
||||
have redundant backups.
|
||||
|
||||
Schedule
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a subset of the
|
||||
`systemd Time and Date Specification
|
||||
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
|
||||
The subset currently supports time of day specifications and weekdays, in
|
||||
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
|
||||
There is no support for specifying timezones, the tasks are run in the
|
||||
timezone configured on the server.
|
||||
|
@ -12,6 +12,10 @@ Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||
in the section entitled "GNU Free Documentation License".
|
||||
|
||||
|
||||
.. only:: html
|
||||
|
||||
A `PDF` version of the documentation is `also available here <./proxmox-backup.pdf>`_
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
:caption: Table of Contents
|
||||
@ -19,6 +23,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
introduction.rst
|
||||
installation.rst
|
||||
administration-guide.rst
|
||||
sysadmin.rst
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
|
@ -1,10 +1,6 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
This documentation is written in :term:`reStructuredText` and formatted with
|
||||
:term:`Sphinx`.
|
||||
|
||||
|
||||
What is Proxmox Backup Server
|
||||
-----------------------------
|
||||
|
||||
@ -57,7 +53,7 @@ Main Features
|
||||
:Incremental backups: Changes between backups are typically low. Reading and
|
||||
sending only the delta reduces storage and network impact of backups.
|
||||
|
||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||
:Data Integrity: The built-in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
@ -67,16 +63,17 @@ Main Features
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||
high performance on modern hardware.
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mde
|
||||
provides very high performance on modern hardware.
|
||||
|
||||
:Web interface: Manage Proxmox backups with the integrated web-based user
|
||||
interface.
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||
user interface.
|
||||
|
||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||
software. The source code is licensed under AGPL, v3.
|
||||
software. The source code is licensed under AGPL, v3.
|
||||
|
||||
:Support: Enterprise support is available from `Proxmox`_.
|
||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
||||
phase is over.
|
||||
|
||||
|
||||
Reasons for Data Backup?
|
||||
@ -105,8 +102,56 @@ Therefore, ensure that you perform regular backups and run restore tests.
|
||||
Software Stack
|
||||
--------------
|
||||
|
||||
.. todo:: Eplain why we use Rust (and Flutter)
|
||||
|
||||
Proxmox Backup Server consists of multiple components:
|
||||
|
||||
* server-daemon providing, among others, a RESTfull API, super-fast
|
||||
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||
events, strict separation of privileged and unprivileged execution
|
||||
environments, ...
|
||||
* JavaScript management webinterface
|
||||
* management CLI tool for the server (`proxmox-backup-manager`)
|
||||
* client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||
any `Linux amd64` environment.
|
||||
|
||||
Everything besides the web interface are written in the Rust programming
|
||||
language.
|
||||
|
||||
"The Rust programming language helps you write faster, more reliable software.
|
||||
High-level ergonomics and low-level control are often at odds in programming
|
||||
language design; Rust challenges that conflict. Through balancing powerful
|
||||
technical capacity and a great developer experience, Rust gives you the option
|
||||
to control low-level details (such as memory usage) without all the hassle
|
||||
traditionally associated with such control."
|
||||
|
||||
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
||||
|
||||
.. todo:: further explain the software stack
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
Community Support Forum
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We always encourage our users to discuss and share their knowledge using the
|
||||
`Proxmox Community Forum`_. The forum is moderated by the Proxmox support team.
|
||||
The large user base is spread out all over the world. Needless to say that such
|
||||
a large forum is a great place to get information.
|
||||
|
||||
Mailing Lists
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||
is the primary communication channel for developers:
|
||||
:Mailing list for developers: `PBS Development List`_
|
||||
|
||||
Bug Tracker
|
||||
~~~~~~~~~~~
|
||||
|
||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||
issue appears, file your report there. An issue can be a bug as well as a
|
||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||
of the issue and will send a notification once it has been solved.
|
||||
|
||||
License
|
||||
-------
|
||||
|
401
docs/local-zfs.rst
Normal file
401
docs/local-zfs.rst
Normal file
@ -0,0 +1,401 @@
|
||||
ZFS on Linux
|
||||
------------
|
||||
|
||||
ZFS is a combined file system and logical volume manager designed by
|
||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||
packages are included.
|
||||
|
||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||
low budget hardware, but also high performance systems by leveraging
|
||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||
hardware raid cards by moderate CPU and memory load combined with easy
|
||||
management.
|
||||
|
||||
General ZFS advantages
|
||||
|
||||
* Easy configuration and management with GUI and CLI.
|
||||
* Reliable
|
||||
* Protection against data corruption
|
||||
* Data compression on file system level
|
||||
* Snapshots
|
||||
* Copy-on-write clone
|
||||
* Various raid levels: RAID0, RAID1, RAID10, RAIDZ-1, RAIDZ-2 and RAIDZ-3
|
||||
* Can use SSD for cache
|
||||
* Self healing
|
||||
* Continuous integrity checking
|
||||
* Designed for high storage capacities
|
||||
* Protection against data corruption
|
||||
* Asynchronous replication over network
|
||||
* Open Source
|
||||
* Encryption
|
||||
|
||||
Hardware
|
||||
~~~~~~~~~
|
||||
|
||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||
practice, use as much you can get for your hardware/budget. To prevent
|
||||
data corruption, we recommend the use of high quality ECC RAM.
|
||||
|
||||
If you use a dedicated cache and/or log disk, you should use an
|
||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||
increase the overall performance significantly.
|
||||
|
||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||
own cache management. ZFS needs to directly communicate with disks. An
|
||||
HBA adapter is the way to go, or something like LSI controller flashed
|
||||
in ``IT`` mode.
|
||||
|
||||
|
||||
ZFS Administration
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section gives you some usage examples for common tasks. ZFS
|
||||
itself is really powerful and provides many options. The main commands
|
||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||
manual pages, which can be read with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# man zpool
|
||||
# man zfs
|
||||
|
||||
Create a new zpool
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a new pool, at least one disk is needed. The `ashift` should
|
||||
have the same sector-size (2 power of `ashift`) or larger as the
|
||||
underlying disk.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device>
|
||||
|
||||
Create a new pool with RAID-0
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 1 disk
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 2 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-10
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> mirror <device3> <device4>
|
||||
|
||||
Create a new pool with RAIDZ-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 3 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz1 <device1> <device2> <device3>
|
||||
|
||||
Create a new pool with RAIDZ-2
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz2 <device1> <device2> <device3> <device4>
|
||||
|
||||
Create a new pool with cache (L2ARC)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (use SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> cache <cache_device>
|
||||
|
||||
Create a new pool with log (ZIL)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> log <log_device>
|
||||
|
||||
Add cache and log to an existing pool
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have a pool without cache and log. First partition the SSD in
|
||||
2 partition with `parted` or `gdisk`
|
||||
|
||||
.. important:: Always use GPT partition tables.
|
||||
|
||||
The maximum size of a log device should be about half the size of
|
||||
physical memory, so this is usually quite small. The rest of the SSD
|
||||
can be used as cache.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add -f <pool> log <device-part1> cache <device-part2>
|
||||
|
||||
|
||||
Changing a failed device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool replace -f <pool> <old device> <new device>
|
||||
|
||||
|
||||
Changing a failed bootable device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||
as bootloader.
|
||||
|
||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||
different steps are needed which depend on the bootloader in use.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sgdisk <healthy bootable device> -R <new device>
|
||||
# sgdisk -G <new device>
|
||||
# zpool replace -f <pool> <old zfs partition> <new zfs partition>
|
||||
|
||||
.. NOTE:: Use the `zpool status -v` command to monitor how far the resilvering process of the new disk has progressed.
|
||||
|
||||
With `systemd-boot`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pve-efiboot-tool format <new disk's ESP>
|
||||
# pve-efiboot-tool init <new disk's ESP>
|
||||
|
||||
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
||||
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
||||
|
||||
With `grub`:
|
||||
|
||||
Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# grub-install <new disk>
|
||||
# grub-mkconfig -o /path/to/grub.cfg
|
||||
|
||||
|
||||
Activate E-Mail Notification
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
ZFS comes with an event daemon, which monitors events generated by the
|
||||
ZFS kernel module. The daemon can also send emails on ZFS events like
|
||||
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
||||
and you can install it using `apt-get`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# apt-get install zfs-zed
|
||||
|
||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
ZED_EMAIL_ADDR="root"
|
||||
|
||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||
configured for the root user.
|
||||
|
||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||
other settings are optional.
|
||||
|
||||
Limit ZFS Memory Usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is good to use at most 50 percent (which is the default) of the
|
||||
system memory for ZFS ARC to prevent performance shortage of the
|
||||
host. Use your preferred editor to change the configuration in
|
||||
`/etc/modprobe.d/zfs.conf` and insert:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
options zfs zfs_arc_max=8589934592
|
||||
|
||||
This example setting limits the usage to 8GB.
|
||||
|
||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u
|
||||
|
||||
|
||||
SWAP on ZFS
|
||||
^^^^^^^^^^^
|
||||
|
||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||
server or generating a high IO load, often seen when starting a Backup
|
||||
to an external Storage.
|
||||
|
||||
We strongly recommend to use enough memory, so that you normally do not
|
||||
run into low memory situations. Should you need or want to add swap, it is
|
||||
preferred to create a partition on a physical disk and use it as swapdevice.
|
||||
You can leave some space free for this purpose in the advanced options of the
|
||||
installer. Additionally, you can lower the `swappiness` value.
|
||||
A good value for servers is 10:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sysctl -w vm.swappiness=10
|
||||
|
||||
To make the swappiness persistent, open `/etc/sysctl.conf` with
|
||||
an editor of your choice and add the following line:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
vm.swappiness = 10
|
||||
|
||||
.. table:: Linux kernel `swappiness` parameter values
|
||||
:widths:auto
|
||||
|
||||
==================== ===============================================================
|
||||
Value Strategy
|
||||
==================== ===============================================================
|
||||
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
|
||||
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
|
||||
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
|
||||
vm.swappiness = 60 The default value.
|
||||
vm.swappiness = 100 The kernel will swap aggressively.
|
||||
==================== ===============================================================
|
||||
|
||||
ZFS Compression
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
To activate compression:
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set compression=lz4 <pool>
|
||||
|
||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||
I/O performance.
|
||||
|
||||
You can disable compression at any time with:
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set compression=off <dataset>
|
||||
|
||||
Only new blocks will be affected by this change.
|
||||
|
||||
ZFS Special Device
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||
pool is used to store metadata, deduplication tables, and optionally small
|
||||
file blocks.
|
||||
|
||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||
hard disks with a lot of metadata changes. For example workloads that involve
|
||||
creating, updating or deleting a large number of files will benefit from the
|
||||
presence of a `special` device. ZFS datasets can also be configured to store
|
||||
whole small files on the `special` device which can further improve the
|
||||
performance. Use fast SSDs for the `special` device.
|
||||
|
||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||
pool, since the `special` device is a point of failure for the whole pool.
|
||||
|
||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||
|
||||
Create a pool with `special` device and RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> special mirror <device3> <device4>
|
||||
|
||||
Adding a `special` device to an existing pool with RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add <pool> special mirror <device1> <device2>
|
||||
|
||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||
`0` to disable storing small file blocks on the `special` device or a power of
|
||||
two in the range between `512B` to `128K`. After setting the property new file
|
||||
blocks smaller than `size` will be allocated on the `special` device.
|
||||
|
||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||
the `recordsize` (default `128K`) of the dataset, *all* data will be written to
|
||||
the `special` device, so be careful!
|
||||
|
||||
Setting the `special_small_blocks` property on a pool will change the default
|
||||
value of that property for all child ZFS datasets (for example all containers
|
||||
in the pool will opt in for small file blocks).
|
||||
|
||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>
|
||||
|
||||
Opt in for small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>/<filesystem>
|
||||
|
||||
Opt out from small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=0 <pool>/<filesystem>
|
||||
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Corrupted cachefile
|
||||
|
||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||
boot until mounted manually later.
|
||||
|
||||
For each pool, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||
|
||||
and afterwards update the `initramfs` by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u -k all
|
||||
|
||||
and finally reboot your node.
|
||||
|
||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||
doesn't import the pools that aren't present in the cachefile.
|
||||
|
||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||
which searches and imports pools via device scanning (usually slower).
|
@ -3,100 +3,110 @@
|
||||
Debian Package Repositories
|
||||
---------------------------
|
||||
|
||||
All Debian based systems use APT_ as package
|
||||
management tool. The list of repositories is defined in
|
||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
||||
the ``apt`` command line tool, or via the GUI.
|
||||
All Debian based systems use APT_ as package management tool. The list of
|
||||
repositories is defined in ``/etc/apt/sources.list`` and ``.list`` files found
|
||||
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||
with the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with
|
||||
the most preferred source listed first. Empty lines are ignored and a
|
||||
``#`` character anywhere on a line marks the remainder of that line as a
|
||||
comment. The information available from the configured sources is
|
||||
acquired by ``apt update``.
|
||||
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||
anywhere on a line marks the remainder of that line as a comment. The
|
||||
information available from the configured sources is acquired by ``apt
|
||||
update``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
|
||||
|
||||
|
||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||
|
||||
In addition, Proxmox provides three different package repositories for
|
||||
the backup server binaries.
|
||||
In addition, you need a package repositories from Proxmox to get the backup
|
||||
server updates.
|
||||
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
During the Proxmox Backup beta phase only one repository (pbstest) will be
|
||||
available. Once released, a Enterprise repository for production use and a
|
||||
no-subscription repository will be provided.
|
||||
|
||||
This is the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
.. comment
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
This will be the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
||||
will be available.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
|
||||
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
|
||||
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/bps buster pbs-no-subscription
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
|
||||
|
||||
`Proxmox Backup`_ Test Repository
|
||||
`Proxmox Backup`_ Beta Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Finally, there is a repository called ``pbstest``. This one contains the
|
||||
latest packages and is heavily used by developers to test new
|
||||
During the public beta, there is a repository called ``pbstest``. This one
|
||||
contains the latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
You can configure this using ``/etc/apt/sources.list`` by
|
||||
adding the following line:
|
||||
You can configure this using ``/etc/apt/sources.list`` by adding the following
|
||||
line:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: sources.list entry for ``pbstest``
|
||||
|
||||
deb http://download.proxmox.com/debian/bps buster pbstest
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||
|
||||
If you installed Proxmox Backup Server from the official beta ISO you should
|
||||
have this repository already configured in
|
||||
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||
|
@ -1,5 +1,5 @@
|
||||
Host System Administration
|
||||
--------------------------
|
||||
==========================
|
||||
|
||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||
distribution. That means that you have access to the whole world of
|
||||
@ -23,8 +23,4 @@ either explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
ZFS
|
||||
~~~
|
||||
|
||||
.. todo:: Add local ZFS admin guide (local.zfs.adoc)
|
||||
|
||||
.. include:: local-zfs.rst
|
||||
|
6
docs/todos.rst
Normal file
6
docs/todos.rst
Normal file
@ -0,0 +1,6 @@
|
||||
Documentation Todo List
|
||||
=======================
|
||||
|
||||
This is an auto-generated list of the todo references in the documentation.
|
||||
|
||||
.. todolist::
|
@ -7,7 +7,7 @@ DYNAMIC_UNITS := \
|
||||
proxmox-backup.service \
|
||||
proxmox-backup-proxy.service
|
||||
|
||||
all: $(UNITS) $(DYNAMIC_UNITS)
|
||||
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
||||
|
||||
clean:
|
||||
rm -f $(DYNAMIC_UNITS)
|
||||
|
1
etc/pbstest-beta.list
Normal file
1
etc/pbstest-beta.list
Normal file
@ -0,0 +1 @@
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
@ -46,20 +46,20 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||
let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for item in manifest.files() {
|
||||
result.push(BackupContent {
|
||||
filename: item.filename.clone(),
|
||||
encrypted: item.encrypted,
|
||||
crypt_mode: Some(item.crypt_mode),
|
||||
size: Some(item.size),
|
||||
});
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
encrypted: Some(false),
|
||||
crypt_mode: Some(manifest_crypt_mode),
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
@ -79,7 +79,11 @@ fn get_all_snapshot_files(
|
||||
|
||||
for file in &info.files {
|
||||
if file_set.contains(file) { continue; }
|
||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
||||
files.push(BackupContent {
|
||||
filename: file.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
@ -350,7 +354,15 @@ pub fn list_snapshots (
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
||||
info
|
||||
.files
|
||||
.iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
};
|
||||
|
||||
@ -523,7 +535,7 @@ macro_rules! add_common_prune_prameters {
|
||||
|
||||
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
||||
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||
PruneListItem::API_SCHEMA
|
||||
&PruneListItem::API_SCHEMA
|
||||
).schema();
|
||||
|
||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
@ -902,7 +914,7 @@ fn download_file_decoded(
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.encrypted == Some(true) {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,9 @@ pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
pub const ZPOOL_NAME_SCHEMA: Schema =StringSchema::new("ZFS Pool Name")
|
||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
@ -157,7 +160,7 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
schema: ZPOOL_NAME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -10,6 +10,7 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
use crate::tools::cert::CertInfo;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -46,14 +47,24 @@ use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
description: "Total CPU usage since last query.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
info: {
|
||||
type: Object,
|
||||
description: "contains node information",
|
||||
properties: {
|
||||
fingerprint: {
|
||||
description: "The SSL Fingerprint",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node memory, CPU and (root) disk usage
|
||||
fn get_usage(
|
||||
fn get_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -63,6 +74,10 @@ fn get_usage(
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
// get fingerprint
|
||||
let cert = CertInfo::new()?;
|
||||
let fp = cert.fingerprint()?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
"total": meminfo.memtotal,
|
||||
@ -74,7 +89,10 @@ fn get_usage(
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
},
|
||||
"info": {
|
||||
"fingerprint": fp,
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
@ -122,5 +140,5 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_USAGE)
|
||||
.get(&API_METHOD_GET_STATUS)
|
||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||
|
@ -5,6 +5,8 @@ use proxmox::api::{api, schema::*};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::backup::CryptMode;
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
@ -76,6 +78,8 @@ const_regex!{
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -496,6 +500,10 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
"filename": {
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -503,9 +511,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted (or empty if we do not have that info)
|
||||
/// Info if file is encrypted, signed, or neither.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub encrypted: Option<bool>,
|
||||
pub crypt_mode: Option<CryptMode>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
|
@ -80,8 +80,9 @@ impl ChunkStore {
|
||||
|
||||
let default_options = CreateOptions::new();
|
||||
|
||||
if let Err(err) = create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||
bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
|
||||
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||
}
|
||||
|
||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
||||
|
@ -6,12 +6,30 @@
|
||||
//! See the Wikipedia Artikel for [Authenticated
|
||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||
//! for a short introduction.
|
||||
use anyhow::{bail, Error};
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
/// Encryption Configuration with secret key
|
||||
///
|
||||
@ -26,7 +44,6 @@ pub struct CryptConfig {
|
||||
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
||||
// The private key used by the cipher.
|
||||
enc_key: [u8; 32],
|
||||
|
||||
}
|
||||
|
||||
impl CryptConfig {
|
||||
@ -63,10 +80,9 @@ impl CryptConfig {
|
||||
/// chunk digest values do not clash with values computed for
|
||||
/// other sectret keys.
|
||||
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
||||
// FIXME: use HMAC-SHA256 instead??
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
hasher.update(&self.id_key);
|
||||
hasher.update(data);
|
||||
hasher.update(&self.id_key); // at the end, to avoid length extensions attacks
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
@ -203,7 +219,7 @@ impl CryptConfig {
|
||||
created: DateTime<Local>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
@ -3,10 +3,10 @@ use std::convert::TryInto;
|
||||
|
||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
use super::file_formats::*;
|
||||
use super::CryptConfig;
|
||||
use super::{CryptConfig, CryptMode};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
/// Encoded data chunk with digest and positional information
|
||||
pub struct ChunkInfo {
|
||||
@ -166,6 +166,19 @@ impl DataBlob {
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Get the encryption mode for this blob.
|
||||
pub fn crypt_mode(&self) -> Result<CryptMode, Error> {
|
||||
let magic = self.magic();
|
||||
|
||||
Ok(if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 || magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::None
|
||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::Encrypt
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
@ -194,75 +207,11 @@ impl DataBlob {
|
||||
} else {
|
||||
bail!("unable to decrypt blob - missing CryptConfig");
|
||||
}
|
||||
} else if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 || magic == &AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let head = unsafe {
|
||||
(&self.raw_data[..header_len]).read_le_value::<AuthenticatedDataBlobHeader>()?
|
||||
};
|
||||
|
||||
let data_start = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
|
||||
// Note: only verify if we have a crypt config
|
||||
if let Some(config) = config {
|
||||
let signature = config.compute_auth_tag(&self.raw_data[data_start..]);
|
||||
if signature != head.tag {
|
||||
bail!("verifying blob signature failed");
|
||||
}
|
||||
}
|
||||
|
||||
if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], 16*1024*1024)?;
|
||||
Ok(data)
|
||||
} else {
|
||||
Ok(self.raw_data[data_start..].to_vec())
|
||||
}
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a signed DataBlob, optionally compressed
|
||||
pub fn create_signed(
|
||||
data: &[u8],
|
||||
config: &CryptConfig,
|
||||
compress: bool,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
if data.len() > MAX_BLOB_SIZE {
|
||||
bail!("data blob too large ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let compr_data;
|
||||
let (_compress, data, magic) = if compress {
|
||||
compr_data = zstd::block::compress(data, 1)?;
|
||||
// Note: We only use compression if result is shorter
|
||||
if compr_data.len() < data.len() {
|
||||
(true, &compr_data[..], AUTH_COMPR_BLOB_MAGIC_1_0)
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
}
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
};
|
||||
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let mut raw_data = Vec::with_capacity(data.len() + header_len);
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic, crc: [0; 4] },
|
||||
tag: config.compute_auth_tag(data),
|
||||
};
|
||||
unsafe {
|
||||
raw_data.write_le_value(head)?;
|
||||
}
|
||||
raw_data.extend_from_slice(data);
|
||||
|
||||
let mut blob = DataBlob { raw_data };
|
||||
blob.set_crc(blob.compute_crc());
|
||||
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Load blob from ``reader``
|
||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
|
||||
@ -294,14 +243,6 @@ impl DataBlob {
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else if magic == AUTH_COMPR_BLOB_MAGIC_1_0 || magic == AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
if data.len() < std::mem::size_of::<AuthenticatedDataBlobHeader>() {
|
||||
bail!("authenticated blob too small ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else {
|
||||
bail!("unable to parse raw blob - wrong magic");
|
||||
@ -376,7 +317,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
|
||||
/// Set encryption Configuration
|
||||
///
|
||||
/// If set, chunks are encrypted.
|
||||
/// If set, chunks are encrypted
|
||||
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
||||
if self.digest_computed {
|
||||
panic!("unable to set crypt_config after compute_digest().");
|
||||
@ -415,12 +356,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
self.compute_digest();
|
||||
}
|
||||
|
||||
let chunk = DataBlob::encode(
|
||||
self.orig_data,
|
||||
self.config,
|
||||
self.compress,
|
||||
)?;
|
||||
|
||||
let chunk = DataBlob::encode(self.orig_data, self.config, self.compress)?;
|
||||
Ok((chunk, self.digest))
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufReader};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobReaderState<R: Read> {
|
||||
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
||||
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Signed { expected_crc: u32, expected_hmac: [u8; 32], csum_reader: ChecksumReader<R> },
|
||||
SignedCompressed { expected_crc: u32, expected_hmac: [u8; 32], decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
||||
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
||||
}
|
||||
@ -41,40 +39,26 @@ impl <R: Read> DataBlobReader<R> {
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||
}
|
||||
AUTHENTICATED_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
Ok(Self { state: BlobReaderState::Signed { expected_crc, expected_hmac, csum_reader }})
|
||||
}
|
||||
AUTH_COMPR_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr }})
|
||||
}
|
||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut iv = [0u8; 16];
|
||||
let mut expected_tag = [0u8; 16];
|
||||
reader.read_exact(&mut iv)?;
|
||||
reader.read_exact(&mut expected_tag)?;
|
||||
let csum_reader = ChecksumReader::new(reader, None);
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
||||
}
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
||||
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut iv = [0u8; 16];
|
||||
let mut expected_tag = [0u8; 16];
|
||||
reader.read_exact(&mut iv)?;
|
||||
reader.read_exact(&mut expected_tag)?;
|
||||
let csum_reader = ChecksumReader::new(reader, None);
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
||||
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
||||
}
|
||||
@ -99,31 +83,6 @@ impl <R: Read> DataBlobReader<R> {
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, expected_crc, expected_hmac } => {
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr } => {
|
||||
let csum_reader = decompr.finish().into_inner();
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
||||
let csum_reader = decrypt_reader.finish()?.into_inner();
|
||||
let (reader, crc, _) = csum_reader.finish()?;
|
||||
@ -155,12 +114,6 @@ impl <R: Read> Read for DataBlobReader<R> {
|
||||
BlobReaderState::Compressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, .. } => {
|
||||
csum_reader.read(buf)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
||||
decrypt_reader.read(buf)
|
||||
}
|
||||
|
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobWriterState<W: Write> {
|
||||
Uncompressed { csum_writer: ChecksumWriter<W> },
|
||||
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Signed { csum_writer: ChecksumWriter<W> },
|
||||
SignedCompressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
||||
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
||||
}
|
||||
@ -42,33 +40,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_signed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
Ok(Self { state: BlobWriterState::Signed { csum_writer }})
|
||||
}
|
||||
|
||||
pub fn new_signed_compressed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?;
|
||||
Ok(Self { state: BlobWriterState::SignedCompressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = EncryptedDataBlobHeader {
|
||||
@ -129,37 +100,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Signed { csum_writer } => {
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { compr } => {
|
||||
let csum_writer = compr.finish()?;
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Encrypted { crypt_writer } => {
|
||||
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
||||
let (mut writer, crc, _) = csum_writer.finish()?;
|
||||
@ -203,12 +143,6 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.write(buf)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.write(buf)
|
||||
}
|
||||
@ -226,13 +160,7 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.flush()
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.flush()
|
||||
}
|
||||
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
||||
|
@ -15,6 +15,7 @@ use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::backup::CryptMode;
|
||||
use crate::config::datastore;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
@ -494,9 +495,13 @@ impl DataStore {
|
||||
Ok((blob, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
pub fn load_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, CryptMode, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let crypt_mode = blob.crypt_mode()?;
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
Ok((manifest, crypt_mode, raw_size))
|
||||
}
|
||||
}
|
||||
|
@ -17,12 +17,6 @@ pub const ENCRYPTED_BLOB_MAGIC_1_0: [u8; 8] = [123, 103, 133, 190, 34, 45, 76, 2
|
||||
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
||||
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup authenticated blob v1.0")[0..8]
|
||||
pub const AUTHENTICATED_BLOB_MAGIC_1_0: [u8; 8] = [31, 135, 238, 226, 145, 206, 5, 2];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup zstd compressed authenticated blob v1.0")[0..8]
|
||||
pub const AUTH_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [126, 166, 15, 190, 145, 31, 169, 96];
|
||||
|
||||
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
||||
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
||||
|
||||
@ -50,19 +44,6 @@ pub struct DataBlobHeader {
|
||||
pub crc: [u8; 4],
|
||||
}
|
||||
|
||||
/// Authenticated data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for authenticated blobs additionally contains
|
||||
/// a 16 byte HMAC tag, followed by the data:
|
||||
///
|
||||
/// (MAGIC || CRC32 || TAG || Data).
|
||||
#[derive(Endian)]
|
||||
#[repr(C,packed)]
|
||||
pub struct AuthenticatedDataBlobHeader {
|
||||
pub head: DataBlobHeader,
|
||||
pub tag: [u8; 32],
|
||||
}
|
||||
|
||||
/// Encrypted data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
||||
@ -87,8 +68,6 @@ pub fn header_size(magic: &[u8; 8]) -> usize {
|
||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&AUTHENTICATED_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
&AUTH_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
_ => panic!("unknown blob magic"),
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
@ -146,12 +146,26 @@ pub fn encrypt_key_with_passphrase(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load_and_decrypt_key(path: &std::path::Path, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
|
||||
let raw = file_get_contents(&path)?;
|
||||
let data = String::from_utf8(raw)?;
|
||||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
let created = key_config.created;
|
||||
|
@ -3,22 +3,61 @@ use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::backup::BackupDir;
|
||||
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
mod hex_csum {
|
||||
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||
|
||||
pub fn serialize<S>(
|
||||
csum: &[u8; 32],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = proxmox::tools::digest_to_hex(csum);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
fn crypt_mode_none() -> CryptMode { CryptMode::None }
|
||||
fn empty_value() -> Value { json!({}) }
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub encrypted: Option<bool>,
|
||||
#[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
|
||||
pub crypt_mode: CryptMode,
|
||||
pub size: u64,
|
||||
#[serde(with = "hex_csum")]
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct BackupManifest {
|
||||
snapshot: BackupDir,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
files: Vec<FileInfo>,
|
||||
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||
pub unprotected: Value,
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
@ -46,12 +85,18 @@ pub fn archive_type<P: AsRef<Path>>(
|
||||
impl BackupManifest {
|
||||
|
||||
pub fn new(snapshot: BackupDir) -> Self {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -84,31 +129,111 @@ impl BackupManifest {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_json(self) -> Value {
|
||||
json!({
|
||||
"backup-type": self.snapshot.group().backup_type(),
|
||||
"backup-id": self.snapshot.group().backup_id(),
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
let mut value = json!({
|
||||
"filename": info.filename,
|
||||
"encrypted": info.encrypted,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
});
|
||||
|
||||
if let Some(encrypted) = info.encrypted {
|
||||
value["encrypted"] = encrypted.into();
|
||||
}
|
||||
|
||||
acc.push(value);
|
||||
acc
|
||||
})
|
||||
})
|
||||
// Generate cannonical json
|
||||
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||
let mut data = Vec::new();
|
||||
Self::write_canonical_json(value, &mut data)?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn write_canonical_json(value: &Value, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||
match value {
|
||||
Value::Null => bail!("got unexpected null value"),
|
||||
Value::String(_) | Value::Number(_) | Value::Bool(_) => {
|
||||
serde_json::to_writer(output, &value)?;
|
||||
}
|
||||
Value::Array(list) => {
|
||||
output.push(b'[');
|
||||
let mut iter = list.iter();
|
||||
if let Some(item) = iter.next() {
|
||||
Self::write_canonical_json(item, output)?;
|
||||
for item in iter {
|
||||
output.push(b',');
|
||||
Self::write_canonical_json(item, output)?;
|
||||
}
|
||||
}
|
||||
output.push(b']');
|
||||
}
|
||||
Value::Object(map) => {
|
||||
output.push(b'{');
|
||||
let mut keys: Vec<&str> = map.keys().map(String::as_str).collect();
|
||||
keys.sort();
|
||||
let mut iter = keys.into_iter();
|
||||
if let Some(key) = iter.next() {
|
||||
output.extend(key.as_bytes());
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
for key in iter {
|
||||
output.push(b',');
|
||||
output.extend(key.as_bytes());
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
}
|
||||
}
|
||||
output.push(b'}');
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compute manifest signature
|
||||
///
|
||||
/// By generating a HMAC SHA256 over the canonical json
|
||||
/// representation, The 'unpreotected' property is excluded.
|
||||
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||
}
|
||||
|
||||
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
|
||||
let mut signed_data = data.clone();
|
||||
|
||||
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||
signed_data.as_object_mut().unwrap().remove("signature"); // exclude
|
||||
|
||||
let canonical = Self::to_canonical_json(&signed_data)?;
|
||||
|
||||
let sig = crypt_config.compute_auth_tag(&canonical);
|
||||
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||
|
||||
let mut manifest = serde_json::to_value(&self)?;
|
||||
|
||||
if let Some(crypt_config) = crypt_config {
|
||||
let sig = self.signature(crypt_config)?;
|
||||
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||
}
|
||||
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||
let json: Value = serde_json::from_slice(data)?;
|
||||
let signature = json["signature"].as_str().map(String::from);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
if let Some(signature) = signature {
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||
if signature != expected_signature {
|
||||
bail!("wrong signature in manifest");
|
||||
}
|
||||
} else {
|
||||
// not signed: warn/fail?
|
||||
}
|
||||
}
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
@ -117,41 +242,50 @@ impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])
|
||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||
BackupManifest::try_from(json)
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Value> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(data: Value) -> Result<Self, Error> {
|
||||
#[test]
|
||||
fn test_manifest_signature() -> Result<(), Error> {
|
||||
|
||||
use crate::tools::{required_string_property, required_integer_property, required_array_property};
|
||||
use crate::backup::{KeyDerivationConfig};
|
||||
|
||||
proxmox::try_block!({
|
||||
let backup_type = required_string_property(&data, "backup-type")?;
|
||||
let backup_id = required_string_property(&data, "backup-id")?;
|
||||
let backup_time = required_integer_property(&data, "backup-time")?;
|
||||
let pw = b"test";
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
for item in required_array_property(&data, "files")?.iter() {
|
||||
let filename = required_string_property(item, "filename")?.to_owned();
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
let encrypted = item["encrypted"].as_bool();
|
||||
manifest.add_file(filename, size, csum, encrypted)?;
|
||||
}
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
bail!("manifest does not list any files.");
|
||||
}
|
||||
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||
|
||||
Ok(manifest)
|
||||
}).map_err(|err: Error| format_err!("unable to parse backup manifest - {}", err))
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
}
|
||||
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||
|
||||
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||
|
||||
let text = manifest.to_string(Some(&crypt_config))?;
|
||||
|
||||
let manifest: Value = serde_json::from_str(&text)?;
|
||||
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||
|
||||
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||
|
||||
assert_eq!(signature, expected_signature);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Ok((manifest, _crypt_mode, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
|
@ -1,5 +1,7 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io::{self, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::io::{self, Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@ -23,11 +25,12 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::api2::version;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
load_and_decrypt_key,
|
||||
decrypt_key,
|
||||
verify_chunk_size,
|
||||
ArchiveType,
|
||||
AsyncReadChunk,
|
||||
@ -35,11 +38,12 @@ use proxmox_backup::backup::{
|
||||
BackupGroup,
|
||||
BackupManifest,
|
||||
BufferedDynamicReader,
|
||||
CATALOG_NAME,
|
||||
CatalogReader,
|
||||
CatalogWriter,
|
||||
CATALOG_NAME,
|
||||
ChunkStream,
|
||||
CryptConfig,
|
||||
CryptMode,
|
||||
DataBlob,
|
||||
DynamicIndexReader,
|
||||
FixedChunkStream,
|
||||
@ -65,9 +69,9 @@ pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||
"Path to encryption key. All data will be encrypted using this key.")
|
||||
.schema();
|
||||
|
||||
pub const ENCRYPTION_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Explicitly enable or disable encryption. \
|
||||
(Allows disabling encryption when a default key file is present.)")
|
||||
pub const KEYFD_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pass an encryption key via an already opened file descriptor.")
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
|
||||
@ -270,6 +274,8 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
exclude_pattern: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let pxar_stream = PxarBackupStream::open(
|
||||
@ -296,7 +302,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
});
|
||||
|
||||
let stats = client
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None, compress, encrypt)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -309,6 +315,8 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
archive_name: &str,
|
||||
image_size: u64,
|
||||
chunk_size: Option<usize>,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
_verbose: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
@ -322,7 +330,7 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||
|
||||
let stats = client
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size), compress, encrypt)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -545,6 +553,56 @@ fn api_logout(param: Value) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show client and optional server version
|
||||
async fn api_version(param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut version_info = json!({
|
||||
"client": {
|
||||
"version": version::PROXMOX_PKG_VERSION,
|
||||
"release": version::PROXMOX_PKG_RELEASE,
|
||||
"repoid": version::PROXMOX_PKG_REPOID,
|
||||
}
|
||||
});
|
||||
|
||||
let repo = extract_repository_from_value(¶m);
|
||||
if let Ok(repo) = repo {
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
match client.get("api2/json/version", None).await {
|
||||
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||
Err(e) => eprintln!("could not connect to server - {}", e),
|
||||
}
|
||||
}
|
||||
if output_format == "text" {
|
||||
println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
|
||||
if let Some(server) = version_info["server"].as_object() {
|
||||
let server_version = server["version"].as_str().unwrap();
|
||||
let server_release = server["release"].as_str().unwrap();
|
||||
println!("server version: {}.{}", server_version, server_release);
|
||||
}
|
||||
} else {
|
||||
format_and_print_result(&version_info, &output_format);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -632,7 +690,8 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
|
||||
fn spawn_catalog_upload(
|
||||
client: Arc<BackupWriter>
|
||||
client: Arc<BackupWriter>,
|
||||
encrypt: bool,
|
||||
) -> Result<
|
||||
(
|
||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
@ -650,7 +709,7 @@ fn spawn_catalog_upload(
|
||||
|
||||
tokio::spawn(async move {
|
||||
let catalog_upload_result = client
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None, true, encrypt)
|
||||
.await;
|
||||
|
||||
if let Err(ref err) = catalog_upload_result {
|
||||
@ -664,34 +723,71 @@ fn spawn_catalog_upload(
|
||||
Ok((catalog, catalog_result_rx))
|
||||
}
|
||||
|
||||
fn keyfile_parameters(param: &Value) -> Result<Option<PathBuf>, Error> {
|
||||
Ok(match (param.get("keyfile"), param.get("encryption")) {
|
||||
fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Error> {
|
||||
let keyfile = match param.get("keyfile") {
|
||||
Some(Value::String(keyfile)) => Some(keyfile),
|
||||
Some(_) => bail!("bad --keyfile parameter type"),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let key_fd = match param.get("keyfd") {
|
||||
Some(Value::Number(key_fd)) => Some(
|
||||
RawFd::try_from(key_fd
|
||||
.as_i64()
|
||||
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
||||
)
|
||||
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
||||
),
|
||||
Some(_) => bail!("bad --keyfd parameter type"),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let crypt_mode: Option<CryptMode> = match param.get("crypt-mode") {
|
||||
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let keydata = match (keyfile, key_fd) {
|
||||
(None, None) => None,
|
||||
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
||||
(Some(keyfile), None) => Some(file_get_contents(keyfile)?),
|
||||
(None, Some(fd)) => {
|
||||
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
||||
let mut data = Vec::new();
|
||||
let _len: usize = { input }.read_to_end(&mut data)
|
||||
.map_err(|err| {
|
||||
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
||||
})?;
|
||||
Some(data)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(match (keydata, crypt_mode) {
|
||||
// no parameters:
|
||||
(None, None) => key::optional_default_key_path()?,
|
||||
(None, None) => match key::read_optional_default_encryption_key()? {
|
||||
Some(key) => (Some(key), CryptMode::Encrypt),
|
||||
None => (None, CryptMode::None),
|
||||
},
|
||||
|
||||
// just --encryption=false
|
||||
(None, Some(Value::Bool(false))) => None,
|
||||
// just --crypt-mode=none
|
||||
(None, Some(CryptMode::None)) => (None, CryptMode::None),
|
||||
|
||||
// just --encryption=true
|
||||
(None, Some(Value::Bool(true))) => match key::optional_default_key_path()? {
|
||||
None => bail!("--encryption=false without --keyfile and no default key file available"),
|
||||
Some(path) => Some(path),
|
||||
// just --crypt-mode other than none
|
||||
(None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
|
||||
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
||||
Some(key) => (Some(key), crypt_mode),
|
||||
}
|
||||
|
||||
// just --keyfile
|
||||
(Some(Value::String(keyfile)), None) => Some(PathBuf::from(keyfile)),
|
||||
(Some(key), None) => (Some(key), CryptMode::Encrypt),
|
||||
|
||||
// --keyfile and --encryption=false
|
||||
(Some(Value::String(_)), Some(Value::Bool(false))) => {
|
||||
bail!("--keyfile and --encryption=false are mutually exclusive");
|
||||
// --keyfile and --crypt-mode=none
|
||||
(Some(_), Some(CryptMode::None)) => {
|
||||
bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
|
||||
}
|
||||
|
||||
// --keyfile and --encryption=true
|
||||
(Some(Value::String(keyfile)), Some(Value::Bool(true))) => Some(PathBuf::from(keyfile)),
|
||||
|
||||
// wrong value types:
|
||||
(Some(_), _) => bail!("bad --keyfile parameter"),
|
||||
(_, Some(_)) => bail!("bad --encryption parameter"),
|
||||
// --keyfile and --crypt-mode other than none
|
||||
(Some(key), Some(crypt_mode)) => (Some(key), crypt_mode),
|
||||
})
|
||||
}
|
||||
|
||||
@ -721,8 +817,12 @@ fn keyfile_parameters(param: &Value) -> Result<Option<PathBuf>, Error> {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"skip-lost-and-found": {
|
||||
@ -794,7 +894,7 @@ async fn create_backup(
|
||||
verify_chunk_size(size)?;
|
||||
}
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
|
||||
|
||||
@ -893,27 +993,25 @@ async fn create_backup(
|
||||
|
||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
|
||||
let (crypt_config, rsa_encrypted_key) = match keyfile {
|
||||
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||
None => (None, None),
|
||||
Some(path) => {
|
||||
let (key, created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
|
||||
let path = master_pubkey_path()?;
|
||||
if path.exists() {
|
||||
let pem_data = file_get_contents(&path)?;
|
||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
||||
(Some(Arc::new(crypt_config)), Some(enc_key))
|
||||
} else {
|
||||
(Some(Arc::new(crypt_config)), None)
|
||||
match key::find_master_pubkey()? {
|
||||
Some(ref path) if path.exists() => {
|
||||
let pem_data = file_get_contents(path)?;
|
||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
||||
(Some(Arc::new(crypt_config)), Some(enc_key))
|
||||
}
|
||||
_ => (Some(Arc::new(crypt_config)), None),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let is_encrypted = Some(crypt_config.is_some());
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
@ -941,21 +1039,21 @@ async fn create_backup(
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
BackupSpecificationType::PXAR => {
|
||||
// start catalog upload on first use
|
||||
if catalog.is_none() {
|
||||
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
||||
let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
|
||||
catalog = Some(cat);
|
||||
catalog_result_tx = Some(res);
|
||||
}
|
||||
@ -975,8 +1073,10 @@ async fn create_backup(
|
||||
catalog.clone(),
|
||||
pattern_list.clone(),
|
||||
entries_max as usize,
|
||||
true,
|
||||
crypt_mode == CryptMode::Encrypt,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
catalog.lock().unwrap().end_directory()?;
|
||||
}
|
||||
BackupSpecificationType::IMAGE => {
|
||||
@ -988,9 +1088,11 @@ async fn create_backup(
|
||||
&target,
|
||||
size,
|
||||
chunk_size_opt,
|
||||
true,
|
||||
crypt_mode == CryptMode::Encrypt,
|
||||
verbose,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1007,7 +1109,7 @@ async fn create_backup(
|
||||
|
||||
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||
let stats = catalog_result_rx.await??;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1015,9 +1117,9 @@ async fn create_backup(
|
||||
let target = "rsa-encrypted.key";
|
||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, false)
|
||||
.await?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, crypt_mode)?;
|
||||
|
||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||
/*
|
||||
@ -1030,12 +1132,14 @@ async fn create_backup(
|
||||
}
|
||||
|
||||
// create manifest (index.json)
|
||||
let manifest = manifest.into_json();
|
||||
// manifests are never encrypted, but include a signature
|
||||
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
|
||||
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
||||
|
||||
|
||||
println!("Upload index.json to '{:?}'", repo);
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
||||
client
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
||||
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
||||
.await?;
|
||||
|
||||
client.finish().await?;
|
||||
@ -1159,8 +1263,12 @@ We do not extraxt '.pxar' archives when writing to standard output.
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
@ -1193,12 +1301,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let target = tools::required_string_param(¶m, "target")?;
|
||||
let target = if target == "-" { None } else { Some(target) };
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
@ -1213,18 +1321,17 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
let (manifest, backup_index_data) = client.download_manifest().await?;
|
||||
|
||||
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||
|
||||
if archive_name == MANIFEST_BLOB_NAME {
|
||||
let backup_index_data = manifest.into_json().to_string();
|
||||
if let Some(target) = target {
|
||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
||||
replace_file(target, &backup_index_data, CreateOptions::new())?;
|
||||
} else {
|
||||
let stdout = std::io::stdout();
|
||||
let mut writer = stdout.lock();
|
||||
writer.write_all(backup_index_data.as_bytes())
|
||||
writer.write_all(&backup_index_data)
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
@ -1323,8 +1430,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
@ -1341,12 +1452,12 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
@ -1354,7 +1465,11 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let data = file_get_contents(logfile)?;
|
||||
|
||||
let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
|
||||
// fixme: howto sign log?
|
||||
let blob = match crypt_mode {
|
||||
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
|
||||
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
|
||||
};
|
||||
|
||||
let raw_data = blob.into_inner();
|
||||
|
||||
@ -1711,15 +1826,6 @@ fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<Stri
|
||||
result
|
||||
}
|
||||
|
||||
fn master_pubkey_path() -> Result<PathBuf, Error> {
|
||||
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||
|
||||
// usually $HOME/.config/proxmox-backup/master-public.pem
|
||||
let path = base.place_config_file("master-public.pem")?;
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
use proxmox_backup::client::RemoteChunkReader;
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
@ -1746,7 +1852,6 @@ impl ReadAt for BufferedDynamicReadAt {
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
@ -1824,6 +1929,9 @@ fn main() {
|
||||
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("backup", backup_cmd_def)
|
||||
.insert("upload-log", upload_log_cmd_def)
|
||||
@ -1841,6 +1949,7 @@ fn main() {
|
||||
.insert("mount", mount_cmd_def())
|
||||
.insert("catalog", catalog_mgmt_cli())
|
||||
.insert("task", task_mgmt_cli())
|
||||
.insert("version", version_cmd_def)
|
||||
.insert("benchmark", benchmark_cmd_def);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
|
@ -127,7 +127,7 @@ async fn garbage_collection_status(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let mut result = client.get(&path, None).await?;
|
||||
let mut data = result["data"].take();
|
||||
let schema = api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||
let schema = &api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||
|
||||
let options = default_table_format_options();
|
||||
|
||||
@ -193,7 +193,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
let schema = api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
let schema = &api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||
|
@ -4,14 +4,24 @@ use std::sync::Arc;
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{
|
||||
api,
|
||||
cli::{
|
||||
OUTPUT_FORMAT,
|
||||
ColumnConfig,
|
||||
get_output_format,
|
||||
format_and_print_result_full,
|
||||
default_table_format_options,
|
||||
},
|
||||
};
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
KeyDerivationConfig,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -23,6 +33,75 @@ use crate::{
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
/// Speed test result
|
||||
struct Speed {
|
||||
/// The meassured speed in Bytes/second
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
speed: Option<f64>,
|
||||
/// Top result we want to compare with
|
||||
top: f64,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"tls": {
|
||||
type: Speed,
|
||||
},
|
||||
"sha256": {
|
||||
type: Speed,
|
||||
},
|
||||
"compress": {
|
||||
type: Speed,
|
||||
},
|
||||
"decompress": {
|
||||
type: Speed,
|
||||
},
|
||||
"aes256_gcm": {
|
||||
type: Speed,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
/// Benchmark Results
|
||||
struct BenchmarkResult {
|
||||
/// TLS upload speed
|
||||
tls: Speed,
|
||||
/// SHA256 checksum comptation speed
|
||||
sha256: Speed,
|
||||
/// ZStd level 1 compression speed
|
||||
compress: Speed,
|
||||
/// ZStd level 1 decompression speed
|
||||
decompress: Speed,
|
||||
/// AES256 GCM encryption speed
|
||||
aes256_gcm: Speed,
|
||||
}
|
||||
|
||||
|
||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
tls: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
},
|
||||
sha256: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
compress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
decompress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
aes256_gcm: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -30,10 +109,19 @@ use crate::{
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
@ -44,10 +132,14 @@ pub async fn benchmark(
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let repo = extract_repository_from_value(¶m).ok();
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
@ -57,25 +149,178 @@ pub async fn benchmark(
|
||||
}
|
||||
};
|
||||
|
||||
let mut benchmark_result = BENCHMARK_RESULT_2020_TOP;
|
||||
|
||||
// do repo tests first, because this may prompt for a password
|
||||
if let Some(repo) = repo {
|
||||
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone(), verbose).await?;
|
||||
}
|
||||
|
||||
test_crypt_speed(&mut benchmark_result, verbose)?;
|
||||
|
||||
render_result(&output_format, &benchmark_result)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// print comparison table
|
||||
fn render_result(
|
||||
output_format: &str,
|
||||
benchmark_result: &BenchmarkResult,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut data = serde_json::to_value(benchmark_result)?;
|
||||
let schema = &BenchmarkResult::API_SCHEMA;
|
||||
|
||||
let render_speed = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||
match value["speed"].as_f64() {
|
||||
None => Ok(String::from("not tested")),
|
||||
Some(speed) => {
|
||||
let top = value["top"].as_f64().unwrap();
|
||||
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("tls")
|
||||
.header("TLS (maximal backup upload speed)")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("sha256")
|
||||
.header("SHA256 checksum comptation speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("compress")
|
||||
.header("ZStd level 1 compression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("decompress")
|
||||
.header("ZStd level 1 decompression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("aes256_gcm")
|
||||
.header("AES256 GCM encryption speed")
|
||||
.right_align(false).renderer(render_speed));
|
||||
|
||||
|
||||
format_and_print_result_full(&mut data, schema, output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn test_upload_speed(
|
||||
benchmark_result: &mut BenchmarkResult,
|
||||
repo: BackupRepository,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
if verbose { eprintln!("Connecting to backup server"); }
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
"host",
|
||||
"benshmark",
|
||||
"benchmark",
|
||||
backup_time,
|
||||
false,
|
||||
).await?;
|
||||
|
||||
println!("Start upload speed test");
|
||||
let speed = client.upload_speedtest().await?;
|
||||
if verbose { eprintln!("Start TLS speed test"); }
|
||||
let speed = client.upload_speedtest(verbose).await?;
|
||||
|
||||
println!("Upload speed: {} MiB/s", speed);
|
||||
eprintln!("TLS speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
benchmark_result.tls.speed = Some(speed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// test hash/crypt/compress speed
|
||||
fn test_crypt_speed(
|
||||
benchmark_result: &mut BenchmarkResult,
|
||||
_verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let pw = b"test";
|
||||
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
openssl::sha::sha256(&random_data);
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.sha256.speed = Some(speed);
|
||||
|
||||
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut reader = &random_data[..];
|
||||
zstd::stream::encode_all(&mut reader, 1)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 3_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.compress.speed = Some(speed);
|
||||
|
||||
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let compressed_data = {
|
||||
let mut reader = &random_data[..];
|
||||
zstd::stream::encode_all(&mut reader, 1)?
|
||||
};
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut reader = &compressed_data[..];
|
||||
let data = zstd::stream::decode_all(&mut reader)?;
|
||||
bytes += data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.decompress.speed = Some(speed);
|
||||
|
||||
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut out = Vec::new();
|
||||
crypt_config.encrypt_to(&random_data, &mut out)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -14,9 +13,12 @@ use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
KEYFD_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
record_repository,
|
||||
load_and_decrypt_key,
|
||||
keyfile_parameters,
|
||||
key::get_encryption_key_password,
|
||||
decrypt_key,
|
||||
api_datastore_latest_snapshot,
|
||||
complete_repository,
|
||||
complete_backup_snapshot,
|
||||
@ -35,8 +37,6 @@ use crate::{
|
||||
Shell,
|
||||
};
|
||||
|
||||
use crate::key::get_encryption_key_password;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -48,6 +48,15 @@ use crate::key::get_encryption_key_password;
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
"keyfile": {
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Path to encryption key.",
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
@ -59,13 +68,14 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
Some(key) => {
|
||||
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
@ -81,7 +91,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||
|
||||
@ -131,7 +141,11 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
type: String,
|
||||
description: "Path to encryption key.",
|
||||
},
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Shell to interactively inspect and restore snapshots.
|
||||
@ -149,12 +163,14 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
||||
let crypt_config = match keyfile {
|
||||
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
Some(key) => {
|
||||
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
@ -180,7 +196,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
@ -1,9 +1,8 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, TimeZone};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::cli::{CliCommand, CliCommandMap};
|
||||
@ -15,22 +14,29 @@ use proxmox_backup::backup::{
|
||||
};
|
||||
use proxmox_backup::tools;
|
||||
|
||||
pub fn master_pubkey_path() -> Result<PathBuf, Error> {
|
||||
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||
|
||||
// usually $HOME/.config/proxmox-backup/master-public.pem
|
||||
let path = base.place_config_file("master-public.pem")?;
|
||||
|
||||
Ok(path)
|
||||
pub fn find_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
||||
super::find_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||
}
|
||||
|
||||
pub fn default_encryption_key_path() -> Result<PathBuf, Error> {
|
||||
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||
pub fn place_master_pubkey() -> Result<PathBuf, Error> {
|
||||
super::place_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||
}
|
||||
|
||||
// usually $HOME/.config/proxmox-backup/encryption-key.json
|
||||
let path = base.place_config_file("encryption-key.json")?;
|
||||
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
||||
super::find_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||
}
|
||||
|
||||
Ok(path)
|
||||
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
||||
super::place_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||
}
|
||||
|
||||
pub fn read_optional_default_encryption_key() -> Result<Option<Vec<u8>>, Error> {
|
||||
find_default_encryption_key()?
|
||||
.map(file_get_contents)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||
@ -53,16 +59,6 @@ pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||
bail!("no password input mechanism available");
|
||||
}
|
||||
|
||||
/// Convenience helper to get the default key file path only if it exists.
|
||||
pub fn optional_default_key_path() -> Result<Option<PathBuf>, Error> {
|
||||
let path = default_encryption_key_path()?;
|
||||
Ok(if path.exists() {
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
default: "scrypt",
|
||||
)]
|
||||
@ -103,7 +99,11 @@ impl Default for Kdf {
|
||||
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => default_encryption_key_path()?,
|
||||
None => {
|
||||
let path = place_default_encryption_key()?;
|
||||
println!("creating default key at: {:?}", path);
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
@ -160,7 +160,14 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => default_encryption_key_path()?,
|
||||
None => {
|
||||
let path = find_default_encryption_key()?
|
||||
.ok_or_else(|| {
|
||||
format_err!("no encryption file provided and no default file found")
|
||||
})?;
|
||||
println!("updating default key at: {:?}", path);
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
@ -217,7 +224,7 @@ fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||
bail!("Unable to decode PEM data - {}", err);
|
||||
}
|
||||
|
||||
let target_path = master_pubkey_path()?;
|
||||
let target_path = place_master_pubkey()?;
|
||||
|
||||
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
use anyhow::{Context, Error};
|
||||
|
||||
mod benchmark;
|
||||
pub use benchmark::*;
|
||||
mod mount;
|
||||
@ -8,3 +10,30 @@ mod catalog;
|
||||
pub use catalog::*;
|
||||
|
||||
pub mod key;
|
||||
|
||||
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
||||
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Convenience helper for better error messages:
|
||||
pub fn find_xdg_file(
|
||||
file_name: impl AsRef<std::path::Path>,
|
||||
description: &'static str,
|
||||
) -> Result<Option<std::path::PathBuf>, Error> {
|
||||
let file_name = file_name.as_ref();
|
||||
base_directories()
|
||||
.map(|base| base.find_config_file(file_name))
|
||||
.with_context(|| format!("error searching for {}", description))
|
||||
}
|
||||
|
||||
pub fn place_xdg_file(
|
||||
file_name: impl AsRef<std::path::Path>,
|
||||
description: &'static str,
|
||||
) -> Result<std::path::PathBuf, Error> {
|
||||
let file_name = file_name.as_ref();
|
||||
base_directories()
|
||||
.and_then(|base| {
|
||||
base.place_config_file(file_name).map_err(Error::from)
|
||||
})
|
||||
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
|
@ -1,32 +1,18 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
use proxmox_backup::tools::cert::CertInfo;
|
||||
|
||||
#[api]
|
||||
/// Display node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
let cert = CertInfo::new()?;
|
||||
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||
|
||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||
|
||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
||||
println!("Subject: {}", cert.subject_name()?);
|
||||
|
||||
if let Some(san) = cert.subject_alt_names() {
|
||||
for name in san.iter() {
|
||||
@ -42,17 +28,12 @@ fn cert_info() -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
||||
println!("Issuer: {}", cert.issuer_name()?);
|
||||
println!("Validity:");
|
||||
println!(" Not Before: {}", cert.not_before());
|
||||
println!(" Not After : {}", cert.not_after());
|
||||
|
||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
|
||||
println!("Fingerprint (sha256): {}", fp_string);
|
||||
println!("Fingerprint (sha256): {}", cert.fingerprint()?);
|
||||
|
||||
let pubkey = cert.public_key()?;
|
||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||
|
@ -123,18 +123,19 @@ impl BackupReader {
|
||||
}
|
||||
|
||||
/// Download backup manifest (index.json)
|
||||
pub async fn download_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
///
|
||||
/// The manifest signature is verified if we have a crypt_config.
|
||||
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])?;
|
||||
let data = blob.decode(None)?;
|
||||
|
||||
BackupManifest::try_from(json)
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
Ok((manifest, data))
|
||||
}
|
||||
|
||||
/// Download a .blob file
|
||||
|
@ -3,7 +3,7 @@ use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::*;
|
||||
use futures::stream::Stream;
|
||||
@ -163,21 +163,12 @@ impl BackupWriter {
|
||||
data: Vec<u8>,
|
||||
file_name: &str,
|
||||
compress: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
||||
if let Some(encrypt) = crypt_or_sign {
|
||||
if encrypt {
|
||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||
} else {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let blob = match (encrypt, &self.crypt_config) {
|
||||
(false, _) => DataBlob::encode(&data, None, compress)?,
|
||||
(true, None) => bail!("requested encryption without a crypt config"),
|
||||
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), compress)?,
|
||||
};
|
||||
|
||||
let raw_data = blob.into_inner();
|
||||
@ -194,8 +185,8 @@ impl BackupWriter {
|
||||
src_path: P,
|
||||
file_name: &str,
|
||||
compress: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let src_path = src_path.as_ref();
|
||||
|
||||
@ -209,7 +200,7 @@ impl BackupWriter {
|
||||
.await
|
||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||
|
||||
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
||||
self.upload_blob_from_data(contents, file_name, compress, encrypt).await
|
||||
}
|
||||
|
||||
pub async fn upload_stream(
|
||||
@ -219,6 +210,8 @@ impl BackupWriter {
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
prefix: &str,
|
||||
fixed_size: Option<u64>,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||
|
||||
@ -227,6 +220,10 @@ impl BackupWriter {
|
||||
param["size"] = size.into();
|
||||
}
|
||||
|
||||
if encrypt && self.crypt_config.is_none() {
|
||||
bail!("requested encryption without a crypt config");
|
||||
}
|
||||
|
||||
let index_path = format!("{}_index", prefix);
|
||||
let close_path = format!("{}_close", prefix);
|
||||
|
||||
@ -252,7 +249,8 @@ impl BackupWriter {
|
||||
stream,
|
||||
&prefix,
|
||||
known_chunks.clone(),
|
||||
self.crypt_config.clone(),
|
||||
if encrypt { self.crypt_config.clone() } else { None },
|
||||
compress,
|
||||
self.verbose,
|
||||
)
|
||||
.await?;
|
||||
@ -276,7 +274,7 @@ impl BackupWriter {
|
||||
})
|
||||
}
|
||||
|
||||
fn response_queue() -> (
|
||||
fn response_queue(verbose: bool) -> (
|
||||
mpsc::Sender<h2::client::ResponseFuture>,
|
||||
oneshot::Receiver<Result<(), Error>>
|
||||
) {
|
||||
@ -300,11 +298,11 @@ impl BackupWriter {
|
||||
tokio::spawn(
|
||||
verify_queue_rx
|
||||
.map(Ok::<_, Error>)
|
||||
.try_for_each(|response: h2::client::ResponseFuture| {
|
||||
.try_for_each(move |response: h2::client::ResponseFuture| {
|
||||
response
|
||||
.map_err(Error::from)
|
||||
.and_then(H2Client::h2api_response)
|
||||
.map_ok(|result| println!("RESPONSE: {:?}", result))
|
||||
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
|
||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||
})
|
||||
.map(|result| {
|
||||
@ -455,8 +453,6 @@ impl BackupWriter {
|
||||
/// Download backup manifest (index.json) of last backup
|
||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
@ -465,8 +461,8 @@ impl BackupWriter {
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])?;
|
||||
let manifest = BackupManifest::try_from(json)?;
|
||||
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
Ok(manifest)
|
||||
}
|
||||
@ -478,6 +474,7 @@ impl BackupWriter {
|
||||
prefix: &str,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
verbose: bool,
|
||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
||||
|
||||
@ -508,7 +505,7 @@ impl BackupWriter {
|
||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||
|
||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||
.compress(true);
|
||||
.compress(compress);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||
@ -603,7 +600,8 @@ impl BackupWriter {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn upload_speedtest(&self) -> Result<usize, Error> {
|
||||
/// Upload speed test - prints result ot stderr
|
||||
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||
|
||||
let mut data = vec![];
|
||||
// generate pseudo random byte sequence
|
||||
@ -618,7 +616,7 @@ impl BackupWriter {
|
||||
|
||||
let mut repeat = 0;
|
||||
|
||||
let (upload_queue, upload_result) = Self::response_queue();
|
||||
let (upload_queue, upload_result) = Self::response_queue(verbose);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -630,7 +628,7 @@ impl BackupWriter {
|
||||
|
||||
let mut upload_queue = upload_queue.clone();
|
||||
|
||||
println!("send test data ({} bytes)", data.len());
|
||||
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
|
||||
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
||||
|
||||
@ -641,9 +639,9 @@ impl BackupWriter {
|
||||
|
||||
let _ = upload_result.await?;
|
||||
|
||||
println!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
||||
let speed = ((item_len*1_000_000*(repeat as usize))/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
||||
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
||||
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
||||
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
|
||||
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
||||
|
||||
Ok(speed)
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ where
|
||||
|
||||
if skip_lost_and_found {
|
||||
patterns.push(MatchEntry::parse_pattern(
|
||||
"**/lost+found",
|
||||
"lost+found",
|
||||
PatternFlag::PATH_NAME,
|
||||
MatchType::Exclude,
|
||||
)?);
|
||||
@ -452,10 +452,10 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
use pxar::format::mode;
|
||||
|
||||
let file_mode = stat.st_mode & libc::S_IFMT;
|
||||
let open_mode = if !(file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR) {
|
||||
OFlag::O_PATH
|
||||
} else {
|
||||
let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR {
|
||||
OFlag::empty()
|
||||
} else {
|
||||
OFlag::O_PATH
|
||||
};
|
||||
|
||||
let fd = self.open_file(
|
||||
|
@ -493,12 +493,12 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
let (parts, body) = req.into_parts();
|
||||
|
||||
let method = parts.method.clone();
|
||||
let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||
let (_path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||
|
||||
let comp_len = components.len();
|
||||
|
||||
println!("REQUEST {} {}", method, path);
|
||||
println!("COMPO {:?}", components);
|
||||
//println!("REQUEST {} {}", method, path);
|
||||
//println!("COMPO {:?}", components);
|
||||
|
||||
let env_type = api.env_type();
|
||||
let mut rpcenv = RestEnvironment::new(env_type);
|
||||
|
@ -270,28 +270,22 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
let line = line?;
|
||||
match parse_worker_status_line(&line) {
|
||||
Err(err) => bail!("unable to parse active worker status '{}' - {}", line, err),
|
||||
Ok((upid_str, upid, state)) => {
|
||||
|
||||
let running = worker_is_active_local(&upid);
|
||||
|
||||
if running {
|
||||
Ok((upid_str, upid, state)) => match state {
|
||||
None if worker_is_active_local(&upid) => {
|
||||
active_list.push(TaskListInfo { upid, upid_str, state: None });
|
||||
} else {
|
||||
match state {
|
||||
None => {
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| String::from("unknown"));
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((Local::now().timestamp(), status))
|
||||
});
|
||||
}
|
||||
Some((endtime, status)) => {
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((endtime, status))
|
||||
})
|
||||
}
|
||||
}
|
||||
},
|
||||
None => {
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| String::from("unknown"));
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((Local::now().timestamp(), status))
|
||||
});
|
||||
},
|
||||
Some((endtime, status)) => {
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((endtime, status))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ pub use proxmox::tools::fd::Fd;
|
||||
pub mod acl;
|
||||
pub mod async_io;
|
||||
pub mod borrow;
|
||||
pub mod cert;
|
||||
pub mod daemon;
|
||||
pub mod disks;
|
||||
pub mod fs;
|
||||
|
67
src/tools/cert.rs
Normal file
67
src/tools/cert.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Error;
|
||||
use openssl::x509::{X509, GeneralName};
|
||||
use openssl::stack::Stack;
|
||||
use openssl::pkey::{Public, PKey};
|
||||
|
||||
use crate::configdir;
|
||||
|
||||
pub struct CertInfo {
|
||||
x509: X509,
|
||||
}
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
|
||||
impl CertInfo {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
Self::from_path(PathBuf::from(configdir!("/proxy.pem")))
|
||||
}
|
||||
|
||||
pub fn from_path(path: PathBuf) -> Result<Self, Error> {
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let x509 = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||
Ok(Self{
|
||||
x509
|
||||
})
|
||||
}
|
||||
|
||||
pub fn subject_alt_names(&self) -> Option<Stack<GeneralName>> {
|
||||
self.x509.subject_alt_names()
|
||||
}
|
||||
|
||||
pub fn subject_name(&self) -> Result<String, Error> {
|
||||
Ok(x509name_to_string(self.x509.subject_name())?)
|
||||
}
|
||||
|
||||
pub fn issuer_name(&self) -> Result<String, Error> {
|
||||
Ok(x509name_to_string(self.x509.issuer_name())?)
|
||||
}
|
||||
|
||||
pub fn fingerprint(&self) -> Result<String, Error> {
|
||||
let fp = self.x509.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
Ok(fp_string)
|
||||
}
|
||||
|
||||
pub fn public_key(&self) -> Result<PKey<Public>, Error> {
|
||||
let pubkey = self.x509.public_key()?;
|
||||
Ok(pubkey)
|
||||
}
|
||||
|
||||
pub fn not_before(&self) -> &openssl::asn1::Asn1TimeRef {
|
||||
self.x509.not_before()
|
||||
}
|
||||
|
||||
pub fn not_after(&self) -> &openssl::asn1::Asn1TimeRef {
|
||||
self.x509.not_after()
|
||||
}
|
||||
}
|
@ -743,7 +743,10 @@ pub fn get_disks(
|
||||
|
||||
let partition_type_map = get_partition_type_info()?;
|
||||
|
||||
let zfs_devices = zfs_devices(&partition_type_map, None)?;
|
||||
let zfs_devices = zfs_devices(&partition_type_map, None).or_else(|err| -> Result<HashSet<u64>, Error> {
|
||||
eprintln!("error getting zfs devices: {}", err);
|
||||
Ok(HashSet::new())
|
||||
})?;
|
||||
|
||||
let lvm_devices = get_lvm_devices(&partition_type_map)?;
|
||||
|
||||
|
@ -64,7 +64,7 @@ fn parse_zpool_list_header(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
||||
let (i, (text, size, alloc, free, _, _,
|
||||
frag, _, dedup, health,
|
||||
_altroot, _eol)) = tuple((
|
||||
take_while1(|c| char::is_alphanumeric(c)), // name
|
||||
take_while1(|c| char::is_alphanumeric(c) || c == '-' || c == ':' || c == '_' || c == '.'), // name
|
||||
preceded(multispace1, parse_optional_u64), // size
|
||||
preceded(multispace1, parse_optional_u64), // allocated
|
||||
preceded(multispace1, parse_optional_u64), // free
|
||||
@ -221,7 +221,7 @@ logs
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "\
|
||||
btest 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||
b-test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
||||
/dev/sda1 - - - - - - - - ONLINE
|
||||
/dev/sda2 - - - - - - - - ONLINE
|
||||
@ -235,7 +235,7 @@ logs - - - - - - - - -
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("btest"),
|
||||
name: String::from("b-test"),
|
||||
health: String::from("ONLINE"),
|
||||
usage: Some(ZFSPoolUsage {
|
||||
size: 427349245952,
|
||||
@ -261,5 +261,31 @@ logs - - - - - - - - -
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "\
|
||||
b.test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
||||
/dev/sda1 - - - - - - - - ONLINE
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("b.test"),
|
||||
health: String::from("ONLINE"),
|
||||
usage: Some(ZFSPoolUsage {
|
||||
size: 427349245952,
|
||||
alloc: 761856,
|
||||
free: 427348484096,
|
||||
dedup: 1.0,
|
||||
frag: 0,
|
||||
}),
|
||||
devices: vec![
|
||||
String::from("/dev/sda1"),
|
||||
]
|
||||
},
|
||||
];
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -430,3 +430,38 @@ errors: No known data errors
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zpool_status_parser3() -> Result<(), Error> {
|
||||
|
||||
let output = r###" pool: bt-est
|
||||
state: ONLINE
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
bt-est ONLINE 0 0 0
|
||||
mirror-0 ONLINE 0 0 0
|
||||
/dev/sda1 ONLINE 0 0 0
|
||||
/dev/sda2 ONLINE 0 0 0
|
||||
mirror-1 ONLINE 0 0 0
|
||||
/dev/sda3 ONLINE 0 0 0
|
||||
/dev/sda4 ONLINE 0 0 0
|
||||
logs
|
||||
/dev/sda5 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
"###;
|
||||
|
||||
let key_value_list = parse_zpool_status(&output)?;
|
||||
for (k, v) in key_value_list {
|
||||
println!("{} => {}", k,v);
|
||||
if k == "config" {
|
||||
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||
let _tree = vdev_list_to_tree(&vdev_list);
|
||||
//println!("TREE1 {}", serde_json::to_string_pretty(&tree)?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -78,24 +78,6 @@ fn test_compressed_blob_writer() -> Result<(), Error> {
|
||||
verify_test_blob(blob_writer.finish()?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_blob_writer() -> Result<(), Error> {
|
||||
let tmp = Cursor::new(Vec::<u8>::new());
|
||||
let mut blob_writer = DataBlobWriter::new_signed(tmp, CRYPT_CONFIG.clone())?;
|
||||
blob_writer.write_all(&TEST_DATA)?;
|
||||
|
||||
verify_test_blob(blob_writer.finish()?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_compressed_blob_writer() -> Result<(), Error> {
|
||||
let tmp = Cursor::new(Vec::<u8>::new());
|
||||
let mut blob_writer = DataBlobWriter::new_signed_compressed(tmp, CRYPT_CONFIG.clone())?;
|
||||
blob_writer.write_all(&TEST_DATA)?;
|
||||
|
||||
verify_test_blob(blob_writer.finish()?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypted_blob_writer() -> Result<(), Error> {
|
||||
let tmp = Cursor::new(Vec::<u8>::new());
|
||||
|
@ -76,6 +76,7 @@ Ext.define('PBS.Dashboard', {
|
||||
let viewmodel = me.getViewModel();
|
||||
|
||||
let res = records[0].data;
|
||||
viewmodel.set('fingerprint', res.info.fingerprint || Proxmox.Utils.unknownText);
|
||||
|
||||
let cpu = res.cpu,
|
||||
mem = res.memory,
|
||||
@ -91,6 +92,45 @@ Ext.define('PBS.Dashboard', {
|
||||
hdPanel.updateValue(root.used / root.total);
|
||||
},
|
||||
|
||||
showFingerPrint: function() {
|
||||
let me = this;
|
||||
let vm = me.getViewModel();
|
||||
let fingerprint = vm.get('fingerprint');
|
||||
Ext.create('Ext.window.Window', {
|
||||
modal: true,
|
||||
width: 600,
|
||||
title: gettext('Fingerprint'),
|
||||
layout: 'form',
|
||||
bodyPadding: '10 0',
|
||||
items: [
|
||||
{
|
||||
xtype: 'textfield',
|
||||
inputId: 'fingerprintField',
|
||||
value: fingerprint,
|
||||
editable: false,
|
||||
},
|
||||
],
|
||||
buttons: [
|
||||
{
|
||||
xtype: 'button',
|
||||
iconCls: 'fa fa-clipboard',
|
||||
handler: function(b) {
|
||||
var el = document.getElementById('fingerprintField');
|
||||
el.select();
|
||||
document.execCommand("copy");
|
||||
},
|
||||
text: gettext('Copy')
|
||||
},
|
||||
{
|
||||
text: gettext('Ok'),
|
||||
handler: function() {
|
||||
this.up('window').close();
|
||||
},
|
||||
},
|
||||
],
|
||||
}).show();
|
||||
},
|
||||
|
||||
updateTasks: function(store, records, success) {
|
||||
if (!success) return;
|
||||
let me = this;
|
||||
@ -134,11 +174,16 @@ Ext.define('PBS.Dashboard', {
|
||||
timespan: 300, // in seconds
|
||||
hours: 12, // in hours
|
||||
error_shown: false,
|
||||
fingerprint: "",
|
||||
'bytes_in': 0,
|
||||
'bytes_out': 0,
|
||||
'avg_ptime': 0.0
|
||||
},
|
||||
|
||||
formulas: {
|
||||
disableFPButton: (get) => get('fingerprint') === "",
|
||||
},
|
||||
|
||||
stores: {
|
||||
usage: {
|
||||
storeid: 'dash-usage',
|
||||
@ -211,6 +256,16 @@ Ext.define('PBS.Dashboard', {
|
||||
iconCls: 'fa fa-tasks',
|
||||
title: gettext('Server Resources'),
|
||||
bodyPadding: '0 20 0 20',
|
||||
tools: [
|
||||
{
|
||||
xtype: 'button',
|
||||
text: gettext('Show Fingerprint'),
|
||||
handler: 'showFingerPrint',
|
||||
bind: {
|
||||
disabled: '{disableFPButton}',
|
||||
},
|
||||
},
|
||||
],
|
||||
layout: {
|
||||
type: 'hbox',
|
||||
align: 'center'
|
||||
|
@ -10,28 +10,30 @@ Ext.define('pbs-data-store-snapshots', {
|
||||
},
|
||||
'files',
|
||||
'owner',
|
||||
{ name: 'size', type: 'int' },
|
||||
{ name: 'size', type: 'int', allowNull: true, },
|
||||
{
|
||||
name: 'encrypted',
|
||||
name: 'crypt-mode',
|
||||
type: 'boolean',
|
||||
calculate: function(data) {
|
||||
let encrypted = 0;
|
||||
let files = 0;
|
||||
let crypt = {
|
||||
none: 0,
|
||||
mixed: 0,
|
||||
'sign-only': 0,
|
||||
encrypt: 0,
|
||||
count: 0,
|
||||
};
|
||||
let signed = 0;
|
||||
data.files.forEach(file => {
|
||||
if (file.filename === 'index.json.blob') return; // is never encrypted
|
||||
if (file.encrypted) {
|
||||
encrypted++;
|
||||
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||
if (mode !== -1) {
|
||||
crypt[file['crypt-mode']]++;
|
||||
}
|
||||
files++;
|
||||
crypt.count++;
|
||||
});
|
||||
|
||||
if (encrypted === 0) {
|
||||
return 0;
|
||||
} else if (encrypted < files) {
|
||||
return 1;
|
||||
} else {
|
||||
return 2;
|
||||
}
|
||||
return PBS.Utils.calculateCryptMode(crypt);
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -149,12 +151,15 @@ Ext.define('PBS.DataStoreContent', {
|
||||
let children = [];
|
||||
for (const [_key, group] of Object.entries(groups)) {
|
||||
let last_backup = 0;
|
||||
let encrypted = 0;
|
||||
let crypt = {
|
||||
none: 0,
|
||||
mixed: 0,
|
||||
'sign-only': 0,
|
||||
encrypt: 0,
|
||||
};
|
||||
for (const item of group.children) {
|
||||
if (item.encrypted > 0) {
|
||||
encrypted++;
|
||||
}
|
||||
if (item["backup-time"] > last_backup) {
|
||||
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
|
||||
if (item["backup-time"] > last_backup && item.size !== null) {
|
||||
last_backup = item["backup-time"];
|
||||
group["backup-time"] = last_backup;
|
||||
group.files = item.files;
|
||||
@ -163,14 +168,9 @@ Ext.define('PBS.DataStoreContent', {
|
||||
}
|
||||
|
||||
}
|
||||
if (encrypted === 0) {
|
||||
group.encrypted = 0;
|
||||
} else if (encrypted < group.children.length) {
|
||||
group.encrypted = 1;
|
||||
} else {
|
||||
group.encrypted = 2;
|
||||
}
|
||||
group.count = group.children.length;
|
||||
crypt.count = group.count;
|
||||
group['crypt-mode'] = PBS.Utils.calculateCryptMode(crypt);
|
||||
children.push(group);
|
||||
}
|
||||
|
||||
@ -296,7 +296,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
|
||||
let encrypted = false;
|
||||
data.files.forEach(file => {
|
||||
if (file.filename === 'catalog.pcat1.didx' && file.encrypted) {
|
||||
if (file.filename === 'catalog.pcat1.didx' && file['crypt-mode'] === 'encrypt') {
|
||||
encrypted = true;
|
||||
}
|
||||
});
|
||||
@ -343,7 +343,13 @@ Ext.define('PBS.DataStoreContent', {
|
||||
header: gettext("Size"),
|
||||
sortable: true,
|
||||
dataIndex: 'size',
|
||||
renderer: Proxmox.Utils.format_size,
|
||||
renderer: (v, meta, record) => {
|
||||
if (v === undefined || v === null) {
|
||||
meta.tdCls = "x-grid-row-loading";
|
||||
return '';
|
||||
}
|
||||
return Proxmox.Utils.format_size(v);
|
||||
},
|
||||
},
|
||||
{
|
||||
xtype: 'numbercolumn',
|
||||
@ -359,15 +365,8 @@ Ext.define('PBS.DataStoreContent', {
|
||||
},
|
||||
{
|
||||
header: gettext('Encrypted'),
|
||||
dataIndex: 'encrypted',
|
||||
renderer: function(value) {
|
||||
switch (value) {
|
||||
case 0: return Proxmox.Utils.noText;
|
||||
case 1: return gettext('Mixed');
|
||||
case 2: return Proxmox.Utils.yesText;
|
||||
default: Proxmox.Utils.unknownText;
|
||||
}
|
||||
}
|
||||
dataIndex: 'crypt-mode',
|
||||
renderer: value => PBS.Utils.cryptText[value] || Proxmox.Utils.unknownText,
|
||||
},
|
||||
{
|
||||
header: gettext("Files"),
|
||||
@ -377,8 +376,10 @@ Ext.define('PBS.DataStoreContent', {
|
||||
return files.map((file) => {
|
||||
let icon = '';
|
||||
let size = '';
|
||||
if (file.encrypted) {
|
||||
icon = '<i class="fa fa-lock"></i> ';
|
||||
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||
let iconCls = PBS.Utils.cryptIconCls[mode] || '';
|
||||
if (iconCls !== '') {
|
||||
icon = `<i class="fa fa-${iconCls}"></i> `;
|
||||
}
|
||||
if (file.size) {
|
||||
size = ` (${Proxmox.Utils.format_size(file.size)})`;
|
||||
@ -396,12 +397,13 @@ Ext.define('PBS.DataStoreContent', {
|
||||
iconCls: 'fa fa-refresh',
|
||||
handler: 'reload',
|
||||
},
|
||||
'-',
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Verify'),
|
||||
disabled: true,
|
||||
parentXType: 'pbsDataStoreContent',
|
||||
enableFn: function(record) { return !!record.data; },
|
||||
enableFn: (rec) => !!rec.data && rec.data.size !== null,
|
||||
handler: 'onVerify',
|
||||
},
|
||||
{
|
||||
@ -409,7 +411,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
text: gettext('Prune'),
|
||||
disabled: true,
|
||||
parentXType: 'pbsDataStoreContent',
|
||||
enableFn: function(record) { return !record.data.leaf; },
|
||||
enableFn: (rec) => !rec.data.leaf,
|
||||
handler: 'onPrune',
|
||||
},
|
||||
{
|
||||
@ -418,24 +420,22 @@ Ext.define('PBS.DataStoreContent', {
|
||||
disabled: true,
|
||||
parentXType: 'pbsDataStoreContent',
|
||||
handler: 'onForget',
|
||||
dangerous: true,
|
||||
confirmMsg: function(record) {
|
||||
console.log(record);
|
||||
//console.log(record);
|
||||
let name = record.data.text;
|
||||
return Ext.String.format(gettext('Are you sure you want to remove snapshot {0}'), `'${name}'`);
|
||||
},
|
||||
enableFn: function(record) {
|
||||
return !!record.data.leaf;
|
||||
},
|
||||
enableFn: (rec) => !!rec.data.leaf && rec.data.size !== null,
|
||||
},
|
||||
'-',
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Download Files'),
|
||||
disabled: true,
|
||||
parentXType: 'pbsDataStoreContent',
|
||||
handler: 'openBackupFileDownloader',
|
||||
enableFn: function(record) {
|
||||
return !!record.data.leaf;
|
||||
},
|
||||
enableFn: (rec) => !!rec.data.leaf && rec.data.size !== null,
|
||||
},
|
||||
{
|
||||
xtype: "proxmoxButton",
|
||||
@ -444,7 +444,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
handler: 'openPxarBrowser',
|
||||
parentXType: 'pbsDataStoreContent',
|
||||
enableFn: function(record) {
|
||||
return !!record.data.leaf && record.data.files.some(el => el.filename.endsWith('pxar.didx'));
|
||||
return !!record.data.leaf && record.size !== null && record.data.files.some(el => el.filename.endsWith('pxar.didx'));
|
||||
},
|
||||
}
|
||||
],
|
||||
|
@ -125,7 +125,7 @@ Ext.define('PBS.MainView', {
|
||||
},
|
||||
|
||||
control: {
|
||||
'button[reference=logoutButton]': {
|
||||
'[reference=logoutButton]': {
|
||||
click: 'logout'
|
||||
}
|
||||
},
|
||||
@ -133,7 +133,8 @@ Ext.define('PBS.MainView', {
|
||||
init: function(view) {
|
||||
var me = this;
|
||||
|
||||
me.lookupReference('usernameinfo').update({username:Proxmox.UserName});
|
||||
PBS.data.RunningTasksStore.startUpdate();
|
||||
me.lookupReference('usernameinfo').setText(Proxmox.UserName);
|
||||
|
||||
// show login on requestexception
|
||||
// fixme: what about other errors
|
||||
@ -189,7 +190,7 @@ Ext.define('PBS.MainView', {
|
||||
type: 'hbox',
|
||||
align: 'middle'
|
||||
},
|
||||
margin: '2 5 2 5',
|
||||
margin: '2 0 2 5',
|
||||
height: 38,
|
||||
items: [
|
||||
{
|
||||
@ -197,7 +198,8 @@ Ext.define('PBS.MainView', {
|
||||
prefix: '',
|
||||
},
|
||||
{
|
||||
xtype: 'versioninfo'
|
||||
padding: '0 0 0 5',
|
||||
xtype: 'versioninfo',
|
||||
},
|
||||
{
|
||||
padding: 5,
|
||||
@ -208,12 +210,6 @@ Ext.define('PBS.MainView', {
|
||||
flex: 1,
|
||||
baseCls: 'x-plain',
|
||||
},
|
||||
{
|
||||
baseCls: 'x-plain',
|
||||
reference: 'usernameinfo',
|
||||
padding: '0 5',
|
||||
tpl: Ext.String.format(gettext("You are logged in as {0}"), "'{username}'")
|
||||
},
|
||||
{
|
||||
xtype: 'button',
|
||||
baseCls: 'x-btn',
|
||||
@ -224,11 +220,27 @@ Ext.define('PBS.MainView', {
|
||||
margin: '0 5 0 0',
|
||||
},
|
||||
{
|
||||
reference: 'logoutButton',
|
||||
xtype: 'pbsTaskButton',
|
||||
margin: '0 5 0 0',
|
||||
},
|
||||
{
|
||||
xtype: 'button',
|
||||
iconCls: 'fa fa-sign-out',
|
||||
text: gettext('Logout')
|
||||
}
|
||||
reference: 'usernameinfo',
|
||||
style: {
|
||||
// proxmox dark grey p light grey as border
|
||||
backgroundColor: '#464d4d',
|
||||
borderColor: '#ABBABA'
|
||||
},
|
||||
margin: '0 5 0 0',
|
||||
iconCls: 'fa fa-user',
|
||||
menu: [
|
||||
{
|
||||
reference: 'logoutButton',
|
||||
iconCls: 'fa fa-sign-out',
|
||||
text: gettext('Logout'),
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -8,6 +8,8 @@ JSSRC= \
|
||||
form/UserSelector.js \
|
||||
form/RemoteSelector.js \
|
||||
form/DataStoreSelector.js \
|
||||
data/RunningTasksStore.js \
|
||||
button/TaskButton.js \
|
||||
config/UserView.js \
|
||||
config/RemoteView.js \
|
||||
config/ACLView.js \
|
||||
|
39
www/Utils.js
39
www/Utils.js
@ -13,6 +13,45 @@ Ext.define('PBS.Utils', {
|
||||
|
||||
dataStorePrefix: 'DataStore-',
|
||||
|
||||
cryptmap: [
|
||||
'none',
|
||||
'mixed',
|
||||
'sign-only',
|
||||
'encrypt',
|
||||
],
|
||||
|
||||
cryptText: [
|
||||
Proxmox.Utils.noText,
|
||||
gettext('Mixed'),
|
||||
gettext('Signed'),
|
||||
gettext('Encrypted'),
|
||||
],
|
||||
|
||||
cryptIconCls: [
|
||||
'',
|
||||
'',
|
||||
'certificate',
|
||||
'lock',
|
||||
],
|
||||
|
||||
calculateCryptMode: function(data) {
|
||||
let mixed = data.mixed;
|
||||
let encrypted = data.encrypt;
|
||||
let signed = data['sign-only'];
|
||||
let files = data.count;
|
||||
if (mixed > 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('mixed');
|
||||
} else if (files === encrypted) {
|
||||
return PBS.Utils.cryptmap.indexOf('encrypt');
|
||||
} else if (files === signed) {
|
||||
return PBS.Utils.cryptmap.indexOf('sign-only');
|
||||
} else if ((signed+encrypted) === 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('none');
|
||||
} else {
|
||||
return PBS.Utils.cryptmap.indexOf('mixed');
|
||||
}
|
||||
},
|
||||
|
||||
getDataStoreFromPath: function(path) {
|
||||
return path.slice(PBS.Utils.dataStorePrefix.length);
|
||||
},
|
||||
|
92
www/button/TaskButton.js
Normal file
92
www/button/TaskButton.js
Normal file
@ -0,0 +1,92 @@
|
||||
Ext.define('PBS.TaskButton', {
|
||||
extend: 'Ext.button.Button',
|
||||
alias: 'widget.pbsTaskButton',
|
||||
|
||||
config: {
|
||||
badgeText: '0',
|
||||
badgeCls: '',
|
||||
},
|
||||
|
||||
iconCls: 'fa fa-list',
|
||||
userCls: 'pmx-has-badge',
|
||||
text: gettext('Tasks'),
|
||||
|
||||
setText: function(value) {
|
||||
let me = this;
|
||||
me.realText = value;
|
||||
let badgeText = me.getBadgeText();
|
||||
let badgeCls = me.getBadgeCls();
|
||||
let text = `${value} <span class="pmx-button-badge ${badgeCls}">${badgeText}</span>`;
|
||||
return me.callParent([text]);
|
||||
},
|
||||
|
||||
getText: function() {
|
||||
let me = this;
|
||||
return me.realText;
|
||||
},
|
||||
|
||||
setBadgeText: function(value) {
|
||||
let me = this;
|
||||
me.badgeText = value.toString();
|
||||
return me.setText(me.getText());
|
||||
},
|
||||
|
||||
setBadgeCls: function(value) {
|
||||
let me = this;
|
||||
let res = me.callParent([value]);
|
||||
let badgeText = me.getBadgeText();
|
||||
me.setBadgeText(badgeText);
|
||||
return res;
|
||||
},
|
||||
|
||||
handler: function() {
|
||||
let me = this;
|
||||
if (me.grid.isVisible()) {
|
||||
me.grid.setVisible(false);
|
||||
} else {
|
||||
me.grid.showBy(me, 'tr-br');
|
||||
}
|
||||
},
|
||||
|
||||
initComponent: function() {
|
||||
let me = this;
|
||||
|
||||
me.grid = Ext.create({
|
||||
xtype: 'pbsRunningTasks',
|
||||
title: '',
|
||||
hideHeaders: false,
|
||||
floating: true,
|
||||
|
||||
width: 600,
|
||||
|
||||
bbar: [
|
||||
'->',
|
||||
{
|
||||
xtype: 'button',
|
||||
text: gettext('Show All Tasks'),
|
||||
handler: function() {
|
||||
var mainview = me.up('mainview');
|
||||
mainview.getController().redirectTo('pbsServerAdministration:tasks');
|
||||
me.grid.hide();
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
listeners: {
|
||||
'taskopened': function() {
|
||||
me.grid.hide();
|
||||
},
|
||||
},
|
||||
});
|
||||
me.callParent();
|
||||
me.mon(me.grid.getStore().rstore, 'load', function(store, records, success) {
|
||||
if (!success) return;
|
||||
|
||||
let count = records.length;
|
||||
let text = count > 99 ? '99+' : count.toString();
|
||||
let cls = count > 0 ? 'active': '';
|
||||
me.setBadgeText(text);
|
||||
me.setBadgeCls(cls);
|
||||
});
|
||||
},
|
||||
});
|
@ -190,3 +190,21 @@ p.logs {
|
||||
visibility: hidden;
|
||||
width: 5px;
|
||||
}
|
||||
|
||||
.pmx-has-badge .x-btn-inner {
|
||||
padding: 0 0 0 5px;
|
||||
min-width: 24px;
|
||||
}
|
||||
|
||||
.pmx-button-badge {
|
||||
display: inline-block;
|
||||
font-weight: bold;
|
||||
border-radius: 4px;
|
||||
padding: 2px 3px;
|
||||
min-width: 24px;
|
||||
line-height: 1em;
|
||||
}
|
||||
|
||||
.pmx-button-badge.active {
|
||||
background-color: #464d4d;
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ Ext.define('PBS.RunningTasks', {
|
||||
upid: record.data.upid,
|
||||
endtime: record.data.endtime,
|
||||
}).show();
|
||||
|
||||
view.fireEvent('taskopened', view, record.data.upid);
|
||||
},
|
||||
|
||||
openTaskItemDblClick: function(grid, record) {
|
||||
@ -54,20 +56,8 @@ Ext.define('PBS.RunningTasks', {
|
||||
store: {
|
||||
type: 'diff',
|
||||
autoDestroy: true,
|
||||
autoDestroyRstore: true,
|
||||
sorters: 'starttime',
|
||||
rstore: {
|
||||
type: 'update',
|
||||
autoStart: true,
|
||||
interval: 3000,
|
||||
storeid: 'pbs-running-tasks-dash',
|
||||
model: 'proxmox-tasks',
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
// maybe separate api call?
|
||||
url: '/api2/json/nodes/localhost/tasks?running=1'
|
||||
},
|
||||
},
|
||||
rstore: PBS.data.RunningTasksStore,
|
||||
},
|
||||
|
||||
columns: [
|
||||
|
@ -9,12 +9,27 @@ Ext.define('PBS.TaskSummary', {
|
||||
|
||||
render_count: function(value, md, record, rowindex, colindex) {
|
||||
let cls = 'question';
|
||||
let color = 'faded';
|
||||
switch (colindex) {
|
||||
case 1: cls = "times-circle critical"; break;
|
||||
case 2: cls = "exclamation-circle warning"; break;
|
||||
case 3: cls = "check-circle good"; break;
|
||||
case 1:
|
||||
cls = "times-circle";
|
||||
color = "critical";
|
||||
break;
|
||||
case 2:
|
||||
cls = "exclamation-circle";
|
||||
color = "warning";
|
||||
break;
|
||||
case 3:
|
||||
cls = "check-circle";
|
||||
color = "good";
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
if (value < 1) {
|
||||
color = "faded";
|
||||
}
|
||||
cls += " " + color;
|
||||
return `<i class="fa fa-${cls}"></i> ${value}`;
|
||||
},
|
||||
},
|
||||
|
21
www/data/RunningTasksStore.js
Normal file
21
www/data/RunningTasksStore.js
Normal file
@ -0,0 +1,21 @@
|
||||
Ext.define('PBS.data.RunningTasksStore', {
|
||||
extend: 'Proxmox.data.UpdateStore',
|
||||
|
||||
singleton: true,
|
||||
|
||||
constructor: function(config) {
|
||||
let me = this;
|
||||
config = config || {};
|
||||
Ext.apply(config, {
|
||||
interval: 3000,
|
||||
storeid: 'pbs-running-tasks-dash',
|
||||
model: 'proxmox-tasks',
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
// maybe separate api call?
|
||||
url: '/api2/json/nodes/localhost/tasks?running=1',
|
||||
},
|
||||
});
|
||||
me.callParent([config]);
|
||||
},
|
||||
});
|
@ -46,8 +46,9 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
||||
let me = this;
|
||||
let combo = me.lookup('file');
|
||||
let rec = combo.getStore().findRecord('filename', value, 0, false, true, true);
|
||||
let canDownload = !rec.data.encrypted;
|
||||
let canDownload = rec.data['crypt-mode'] !== 'encrypt';
|
||||
me.lookup('encryptedHint').setVisible(!canDownload);
|
||||
me.lookup('signedHint').setVisible(rec.data['crypt-mode'] === 'sign-only');
|
||||
me.lookup('downloadBtn').setDisabled(!canDownload);
|
||||
},
|
||||
|
||||
@ -88,7 +89,7 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
||||
emptyText: gettext('No file selected'),
|
||||
fieldLabel: gettext('File'),
|
||||
store: {
|
||||
fields: ['filename', 'size', 'encrypted',],
|
||||
fields: ['filename', 'size', 'crypt-mode',],
|
||||
idProperty: ['filename'],
|
||||
},
|
||||
listConfig: {
|
||||
@ -107,12 +108,25 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
||||
},
|
||||
{
|
||||
text: gettext('Encrypted'),
|
||||
dataIndex: 'encrypted',
|
||||
renderer: Proxmox.Utils.format_boolean,
|
||||
dataIndex: 'crypt-mode',
|
||||
renderer: function(value) {
|
||||
let mode = -1;
|
||||
if (value !== undefined) {
|
||||
mode = PBS.Utils.cryptmap.indexOf(value);
|
||||
}
|
||||
return PBS.Utils.cryptText[mode] || Proxmox.Utils.unknownText;
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
xtype: 'displayfield',
|
||||
userCls: 'pmx-hint',
|
||||
reference: 'signedHint',
|
||||
hidden: true,
|
||||
value: gettext('Note: Signatures of signed files will not be verified on the server. Please use the client to do this.'),
|
||||
},
|
||||
{
|
||||
xtype: 'displayfield',
|
||||
userCls: 'pmx-hint',
|
||||
|
Reference in New Issue
Block a user