Compare commits
90 Commits
Author | SHA1 | Date | |
---|---|---|---|
edc876c58e | |||
ac383beb0a | |||
716753f1a8 | |||
890b88cbef | |||
27709b49d5 | |||
7ccbce03d3 | |||
5fb852afed | |||
60589e6066 | |||
717ce40612 | |||
75442e813e | |||
853c55a049 | |||
6ef1b649d9 | |||
e3f3359c86 | |||
0e1edf19b1 | |||
de55fff226 | |||
b3a67f1f14 | |||
3cc23ca6cc | |||
3def6bfc64 | |||
18e8bc17e4 | |||
f66d66aafe | |||
7380c48dff | |||
0191759316 | |||
dbc42e6f75 | |||
d1c3bc5350 | |||
a97301350f | |||
09340f28f5 | |||
20497c6346 | |||
d0f7d0d9c1 | |||
608806e884 | |||
48176b0a77 | |||
3483a3b3a1 | |||
347e0d4c57 | |||
ae9b5c077a | |||
747446eb50 | |||
e1c8c27f47 | |||
63cec1622a | |||
31142ef291 | |||
058b4b9708 | |||
9a1330c72e | |||
0a6df20986 | |||
6680878b5c | |||
593043ed53 | |||
038f385089 | |||
b914b94773 | |||
2194bc59c8 | |||
a98a288e2d | |||
49e25688f1 | |||
d7eedbd24b | |||
5b17a02da4 | |||
8735247f29 | |||
0d5d15c9d1 | |||
2e44983a37 | |||
c76ff4b472 | |||
aaf4f40285 | |||
e64f77b716 | |||
fd1b65cc3c | |||
11148dce43 | |||
38da8ca1bc | |||
a0ffd4a413 | |||
450105b0c3 | |||
b62edce929 | |||
67678ec39c | |||
bf95fba72e | |||
d265420025 | |||
01a080215d | |||
8cf445ecc4 | |||
20def38e96 | |||
be5b43cb87 | |||
6f0565fa60 | |||
99940358e3 | |||
53daae8e89 | |||
8a23ea4656 | |||
c95c1c83b0 | |||
b446fa14c5 | |||
6d5d305d9d | |||
af2eb422d5 | |||
bbd57396d7 | |||
0fd55b08d9 | |||
619cd5cbcb | |||
1ec0d70d09 | |||
c8449217dc | |||
f7348a23cd | |||
ae18c436dd | |||
b0e20a71e2 | |||
b9700a9fe5 | |||
81867f0539 | |||
0a33fba49c | |||
049a22a3a3 | |||
4d4f94dedf | |||
a844fa0ba0 |
24
Cargo.toml
24
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "2.0.10"
|
version = "2.0.12"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -27,6 +27,8 @@ members = [
|
|||||||
"pbs-fuse-loop",
|
"pbs-fuse-loop",
|
||||||
"pbs-runtime",
|
"pbs-runtime",
|
||||||
"proxmox-rest-server",
|
"proxmox-rest-server",
|
||||||
|
"proxmox-rrd-api-types",
|
||||||
|
"proxmox-rrd",
|
||||||
"proxmox-systemd",
|
"proxmox-systemd",
|
||||||
"pbs-tape",
|
"pbs-tape",
|
||||||
"pbs-tools",
|
"pbs-tools",
|
||||||
@ -56,6 +58,7 @@ thiserror = "1.0"
|
|||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
|
hex = "0.4.3"
|
||||||
http = "0.2"
|
http = "0.2"
|
||||||
hyper = { version = "0.14", features = [ "full" ] }
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
@ -94,11 +97,20 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
|
|||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
proxmox = { version = "0.14.0", features = [ "sortable-macro" ] }
|
||||||
|
proxmox-http = { version = "0.5.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
proxmox-io = "1"
|
||||||
|
proxmox-lang = "1"
|
||||||
|
proxmox-router = { version = "1", features = [ "cli" ] }
|
||||||
|
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||||
|
proxmox-section-config = "1"
|
||||||
|
proxmox-tfa = { version = "1", features = [ "u2f" ] }
|
||||||
|
proxmox-time = "1"
|
||||||
|
proxmox-uuid = "1"
|
||||||
|
|
||||||
proxmox-acme-rs = "0.2.1"
|
proxmox-acme-rs = "0.2.1"
|
||||||
proxmox-apt = "0.7.0"
|
proxmox-apt = "0.8.0"
|
||||||
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
proxmox-openid = "0.8.0"
|
||||||
proxmox-openid = "0.7.0"
|
|
||||||
|
|
||||||
pbs-api-types = { path = "pbs-api-types" }
|
pbs-api-types = { path = "pbs-api-types" }
|
||||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||||
@ -107,6 +119,8 @@ pbs-config = { path = "pbs-config" }
|
|||||||
pbs-datastore = { path = "pbs-datastore" }
|
pbs-datastore = { path = "pbs-datastore" }
|
||||||
pbs-runtime = { path = "pbs-runtime" }
|
pbs-runtime = { path = "pbs-runtime" }
|
||||||
proxmox-rest-server = { path = "proxmox-rest-server" }
|
proxmox-rest-server = { path = "proxmox-rest-server" }
|
||||||
|
proxmox-rrd-api-types = { path = "proxmox-rrd-api-types" }
|
||||||
|
proxmox-rrd = { path = "proxmox-rrd" }
|
||||||
proxmox-systemd = { path = "proxmox-systemd" }
|
proxmox-systemd = { path = "proxmox-systemd" }
|
||||||
pbs-tools = { path = "pbs-tools" }
|
pbs-tools = { path = "pbs-tools" }
|
||||||
pbs-tape = { path = "pbs-tape" }
|
pbs-tape = { path = "pbs-tape" }
|
||||||
|
12
Makefile
12
Makefile
@ -40,6 +40,8 @@ SUBCRATES := \
|
|||||||
pbs-fuse-loop \
|
pbs-fuse-loop \
|
||||||
pbs-runtime \
|
pbs-runtime \
|
||||||
proxmox-rest-server \
|
proxmox-rest-server \
|
||||||
|
proxmox-rrd-api-types \
|
||||||
|
proxmox-rrd \
|
||||||
proxmox-systemd \
|
proxmox-systemd \
|
||||||
pbs-tape \
|
pbs-tape \
|
||||||
pbs-tools \
|
pbs-tools \
|
||||||
@ -171,14 +173,11 @@ cargo-build:
|
|||||||
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||||
.do-cargo-build:
|
.do-cargo-build:
|
||||||
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||||
--bin proxmox-backup-api \
|
|
||||||
--bin proxmox-backup-proxy \
|
|
||||||
--bin proxmox-backup-manager \
|
|
||||||
--bin docgen \
|
|
||||||
--package proxmox-backup-banner \
|
--package proxmox-backup-banner \
|
||||||
--bin proxmox-backup-banner \
|
--bin proxmox-backup-banner \
|
||||||
--package proxmox-backup-client \
|
--package proxmox-backup-client \
|
||||||
--bin proxmox-backup-client \
|
--bin proxmox-backup-client \
|
||||||
|
--bin dump-catalog-shell-cli \
|
||||||
--bin proxmox-backup-debug \
|
--bin proxmox-backup-debug \
|
||||||
--package proxmox-file-restore \
|
--package proxmox-file-restore \
|
||||||
--bin proxmox-file-restore \
|
--bin proxmox-file-restore \
|
||||||
@ -190,7 +189,10 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
|||||||
--package proxmox-restore-daemon \
|
--package proxmox-restore-daemon \
|
||||||
--bin proxmox-restore-daemon \
|
--bin proxmox-restore-daemon \
|
||||||
--package proxmox-backup \
|
--package proxmox-backup \
|
||||||
--bin dump-catalog-shell-cli \
|
--bin docgen \
|
||||||
|
--bin proxmox-backup-api \
|
||||||
|
--bin proxmox-backup-manager \
|
||||||
|
--bin proxmox-backup-proxy \
|
||||||
--bin proxmox-daily-update \
|
--bin proxmox-daily-update \
|
||||||
--bin proxmox-file-restore \
|
--bin proxmox-file-restore \
|
||||||
--bin proxmox-tape \
|
--bin proxmox-tape \
|
||||||
|
33
debian/changelog
vendored
33
debian/changelog
vendored
@ -1,4 +1,35 @@
|
|||||||
rust-proxmox-backup (2.0.10-1) UNRELEASED; urgency=medium
|
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* proxmox-backup-proxy: clean up old tasks when their reference was rotated
|
||||||
|
out of the task-log index
|
||||||
|
|
||||||
|
* api daemons: fix sending log-reopen command
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Oct 2021 10:48:28 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.11-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* drop aritifical limits for task-UPID length
|
||||||
|
|
||||||
|
* tools: smart: only throw error for the fatal usage errors of smartctl
|
||||||
|
|
||||||
|
* api: improve returning errors for extjs formatter
|
||||||
|
|
||||||
|
* proxmox-rest-server: improve logging
|
||||||
|
|
||||||
|
* subscription: switch verification domain over to shop.proxmox.com
|
||||||
|
|
||||||
|
* rest-server/daemon: use new sd_notify_barrier helper for handling
|
||||||
|
synchronization with systemd on service reloading
|
||||||
|
|
||||||
|
* ui: datastore/Content: add empty text for no snapshots
|
||||||
|
|
||||||
|
* ui: datastore/Content: move first store-load into activate listener to
|
||||||
|
ensure we've a proper loading mask for better UX
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 05 Oct 2021 16:34:14 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.10-1) bullseye; urgency=medium
|
||||||
|
|
||||||
* ui: fix order of prune keep reasons
|
* ui: fix order of prune keep reasons
|
||||||
|
|
||||||
|
31
debian/control
vendored
31
debian/control
vendored
@ -22,6 +22,7 @@ Build-Depends: debhelper (>= 12),
|
|||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
librust-handlebars-3+default-dev,
|
librust-handlebars-3+default-dev,
|
||||||
|
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||||
librust-http-0.2+default-dev,
|
librust-http-0.2+default-dev,
|
||||||
librust-hyper-0.14+default-dev,
|
librust-hyper-0.14+default-dev,
|
||||||
librust-hyper-0.14+full-dev,
|
librust-hyper-0.14+full-dev,
|
||||||
@ -38,20 +39,24 @@ Build-Depends: debhelper (>= 12),
|
|||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-lite-0.2+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-proxmox-0.13+api-macro-dev,
|
librust-proxmox-0.14+sortable-macro-dev,
|
||||||
librust-proxmox-0.13+cli-dev,
|
|
||||||
librust-proxmox-0.13+default-dev,
|
|
||||||
librust-proxmox-0.13+router-dev,
|
|
||||||
librust-proxmox-0.13+sortable-macro-dev,
|
|
||||||
librust-proxmox-0.13+tfa-dev,
|
|
||||||
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||||
librust-proxmox-apt-0.7+default-dev,
|
librust-proxmox-apt-0.8+default-dev,
|
||||||
|
librust-proxmox-borrow-1+default-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-proxmox-http-0.4+client-dev,
|
librust-proxmox-http-0.5+client-dev,
|
||||||
librust-proxmox-http-0.4+default-dev ,
|
librust-proxmox-http-0.5+default-dev ,
|
||||||
librust-proxmox-http-0.4+http-helpers-dev,
|
librust-proxmox-http-0.5+http-helpers-dev,
|
||||||
librust-proxmox-http-0.4+websocket-dev,
|
librust-proxmox-http-0.5+websocket-dev,
|
||||||
librust-proxmox-openid-0.7+default-dev,
|
librust-proxmox-io-1+tokio-dev,
|
||||||
|
librust-proxmox-lang-1+default-dev,
|
||||||
|
librust-proxmox-openid-0.8+default-dev,
|
||||||
|
librust-proxmox-router-1+cli-dev (>= 1.1.0-~~),
|
||||||
|
librust-proxmox-schema-1+api-macro-dev,
|
||||||
|
librust-proxmox-section-config-1+default-dev,
|
||||||
|
librust-proxmox-tfa-1+u2f-dev,
|
||||||
|
librust-proxmox-time-1+default-dev,
|
||||||
|
librust-proxmox-uuid-1+default-dev,
|
||||||
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
@ -89,7 +94,7 @@ Build-Depends: debhelper (>= 12),
|
|||||||
librust-zstd-0.6+default-dev,
|
librust-zstd-0.6+default-dev,
|
||||||
libacl1-dev,
|
libacl1-dev,
|
||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev (>= 246-~~),
|
||||||
uuid-dev,
|
uuid-dev,
|
||||||
libsgutils2-dev,
|
libsgutils2-dev,
|
||||||
bash-completion,
|
bash-completion,
|
||||||
|
@ -1,31 +1,33 @@
|
|||||||
Backup Client Usage
|
Backup Client Usage
|
||||||
===================
|
===================
|
||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client for Proxmox Backup Server is called
|
||||||
|
:command:`proxmox-backup-client`.
|
||||||
|
|
||||||
.. _client_repository:
|
.. _client_repository:
|
||||||
|
|
||||||
Backup Repository Locations
|
Backup Repository Locations
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following format to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server (where username is specified in the form of user@realm):
|
||||||
|
|
||||||
[[username@]server[:port]:]datastore
|
[[username@]server[:port]:]datastore
|
||||||
|
|
||||||
The default value for ``username`` is ``root@pam``. If no server is specified,
|
The default value for ``username`` is ``root@pam``. If no server is specified,
|
||||||
the default is the local host (``localhost``).
|
the default is the local host (``localhost``).
|
||||||
|
|
||||||
You can specify a port if your backup server is only reachable on a different
|
You can specify a port if your backup server is only reachable on a non-default
|
||||||
port (e.g. with NAT and port forwarding).
|
port (for example, with NAT and port forwarding configurations).
|
||||||
|
|
||||||
Note that if the server is an IPv6 address, you have to write it with square
|
Note that if the server uses an IPv6 address, you have to write it with square
|
||||||
brackets (for example, `[fe80::01]`).
|
brackets (for example, `[fe80::01]`).
|
||||||
|
|
||||||
You can pass the repository with the ``--repository`` command line option, or
|
You can pass the repository with the ``--repository`` command line option, or
|
||||||
by setting the ``PBS_REPOSITORY`` environment variable.
|
by setting the ``PBS_REPOSITORY`` environment variable.
|
||||||
|
|
||||||
Here some examples of valid repositories and the real values
|
Below are some examples of valid repositories and their corresponding real
|
||||||
|
values:
|
||||||
|
|
||||||
================================ ================== ================== ===========
|
================================ ================== ================== ===========
|
||||||
Example User Host:Port Datastore
|
Example User Host:Port Datastore
|
||||||
@ -46,8 +48,8 @@ Environment Variables
|
|||||||
The default backup repository.
|
The default backup repository.
|
||||||
|
|
||||||
``PBS_PASSWORD``
|
``PBS_PASSWORD``
|
||||||
When set, this value is used for the password required for the backup server.
|
When set, this value is used as the password for the backup server.
|
||||||
You can also set this to a API token secret.
|
You can also set this to an API token secret.
|
||||||
|
|
||||||
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
||||||
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
||||||
@ -63,15 +65,14 @@ Environment Variables
|
|||||||
a file name or from the `stdout` of a command, respectively. The first
|
a file name or from the `stdout` of a command, respectively. The first
|
||||||
defined environment variable from the order above is preferred.
|
defined environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
``PBS_FINGERPRINT``
|
||||||
certificate (only used if the system CA certificates cannot validate the
|
When set, this value is used to verify the server certificate (only used if
|
||||||
certificate).
|
the system CA certificates cannot validate the certificate).
|
||||||
|
|
||||||
|
|
||||||
.. Note:: Passwords must be valid UTF8 an may not contain
|
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
|
||||||
newlines. For your convienience, we just use the first line as
|
convienience, Proxmox Backup Server only uses the first line as password, so
|
||||||
password, so you can add arbitrary comments after the
|
you can add arbitrary comments after the first newline.
|
||||||
first newline.
|
|
||||||
|
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
@ -86,14 +87,15 @@ Creating Backups
|
|||||||
----------------
|
----------------
|
||||||
|
|
||||||
This section explains how to create a backup from within the machine. This can
|
This section explains how to create a backup from within the machine. This can
|
||||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
be a physical host, a virtual machine, or a container. Such backups may contain
|
||||||
and image archives. There are no restrictions in this case.
|
file and image archives. There are no restrictions in this case.
|
||||||
|
|
||||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
.. Note:: If you want to backup virtual machines or containers on Proxmox VE,
|
||||||
|
see :ref:`pve-integration`.
|
||||||
|
|
||||||
For the following example you need to have a backup server set up, working
|
For the following example, you need to have a backup server set up, have working
|
||||||
credentials and need to know the repository name.
|
credentials, and know the repository name.
|
||||||
In the following examples we use ``backup-server:store1``.
|
In the following examples, we use ``backup-server:store1``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -107,12 +109,12 @@ In the following examples we use ``backup-server:store1``.
|
|||||||
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
||||||
End Time: 2019-12-03T10:36:29+01:00
|
End Time: 2019-12-03T10:36:29+01:00
|
||||||
|
|
||||||
This will prompt you for a password and then uploads a file archive named
|
This will prompt you for a password, then upload a file archive named
|
||||||
``root.pxar`` containing all the files in the ``/`` directory.
|
``root.pxar`` containing all the files in the ``/`` directory.
|
||||||
|
|
||||||
.. Caution:: Please note that the proxmox-backup-client does not
|
.. Caution:: Please note that proxmox-backup-client does not
|
||||||
automatically include mount points. Instead, you will see a short
|
automatically include mount points. Instead, you will see a short
|
||||||
``skip mount point`` notice for each of them. The idea is to
|
``skip mount point`` message for each of them. The idea is to
|
||||||
create a separate file archive for each mounted disk. You can
|
create a separate file archive for each mounted disk. You can
|
||||||
explicitly include them using the ``--include-dev`` option
|
explicitly include them using the ``--include-dev`` option
|
||||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||||
@ -120,19 +122,19 @@ This will prompt you for a password and then uploads a file archive named
|
|||||||
|
|
||||||
The ``--repository`` option can get quite long and is used by all
|
The ``--repository`` option can get quite long and is used by all
|
||||||
commands. You can avoid having to enter this value by setting the
|
commands. You can avoid having to enter this value by setting the
|
||||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
environment variable ``PBS_REPOSITORY``. Note that if you would like this to
|
||||||
over multiple sessions, you should instead add the below line to your
|
remain set over multiple sessions, you should instead add the below line to your
|
||||||
``.bashrc`` file.
|
``.bashrc`` file.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# export PBS_REPOSITORY=backup-server:store1
|
# export PBS_REPOSITORY=backup-server:store1
|
||||||
|
|
||||||
After this you can execute all commands without specifying the ``--repository``
|
After this, you can execute all commands without having to specify the
|
||||||
option.
|
``--repository`` option.
|
||||||
|
|
||||||
One single backup is allowed to contain more than one archive. For example, if
|
A single backup is allowed to contain more than one archive. For example, if
|
||||||
you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -146,26 +148,26 @@ archive source at the client. The format is:
|
|||||||
|
|
||||||
<archive-name>.<type>:<source-path>
|
<archive-name>.<type>:<source-path>
|
||||||
|
|
||||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||||
device images. To create a backup of a block device run the following command:
|
device images. To create a backup of a block device, run the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
||||||
|
|
||||||
|
|
||||||
Excluding files/folders from a backup
|
Excluding Files/Directories from a Backup
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
Sometimes it is desired to exclude certain files or directories from a backup archive.
|
||||||
To tell the Proxmox Backup client when and how to ignore files and directories,
|
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
place a text file named ``.pxarexclude`` in the filesystem hierarchy.
|
||||||
Whenever the backup client encounters such a file in a directory, it interprets
|
Whenever the backup client encounters such a file in a directory, it interprets
|
||||||
each line as glob match patterns for files and directories that are to be excluded
|
each line as a glob match pattern for files and directories that are to be excluded
|
||||||
from the backup.
|
from the backup.
|
||||||
|
|
||||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
The file must contain a single glob pattern per line. Empty lines and lines
|
||||||
The same is true for lines starting with ``#``, which indicates a comment.
|
starting with ``#`` (indicating a comment) are ignored.
|
||||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||||
directory except for a few single files/subdirectories.
|
directory except for a few single files/subdirectories.
|
||||||
@ -176,23 +178,24 @@ the given patterns. It is only possible to match files in this directory and its
|
|||||||
``\`` is used to escape special glob characters.
|
``\`` is used to escape special glob characters.
|
||||||
``?`` matches any single character.
|
``?`` matches any single character.
|
||||||
``*`` matches any character, including an empty string.
|
``*`` matches any character, including an empty string.
|
||||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
``**`` is used to match current directory and subdirectories. For example, with
|
||||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
|
||||||
following pattern ``**/*.tmp``.
|
a directory and its subdirectories.
|
||||||
``[...]`` matches a single character from any of the provided characters within
|
``[...]`` matches a single character from any of the provided characters within
|
||||||
the brackets. ``[!...]`` does the complementary and matches any single character
|
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||||
not contained within the brackets. It is also possible to specify ranges with two
|
not contained within the brackets. It is also possible to specify ranges with two
|
||||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
alphabetic character, and ``[0-9]`` matches any single digit.
|
||||||
|
|
||||||
The order of the glob match patterns defines whether a file is included or
|
The order of the glob match patterns defines whether a file is included or
|
||||||
excluded, that is to say later entries override previous ones.
|
excluded, that is to say, later entries override earlier ones.
|
||||||
This is also true for match patterns encountered deeper down the directory tree,
|
This is also true for match patterns encountered deeper down the directory tree,
|
||||||
which can override a previous exclusion.
|
which can override a previous exclusion.
|
||||||
Be aware that excluded directories will **not** be read by the backup client.
|
|
||||||
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
.. Note:: Excluded directories will **not** be read by the backup client. Thus,
|
||||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||||
backup archive.
|
``.pxarexclude`` files are treated as regular files and will be included in
|
||||||
|
the backup archive.
|
||||||
|
|
||||||
For example, consider the following directory structure:
|
For example, consider the following directory structure:
|
||||||
|
|
||||||
@ -280,7 +283,7 @@ You can avoid entering the passwords by setting the environment
|
|||||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||||
|
|
||||||
|
|
||||||
Using a master key to store and recover encryption keys
|
Using a Master Key to Store and Recover Encryption Keys
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||||
@ -360,7 +363,7 @@ To set up a master key:
|
|||||||
keep keys ordered and in a place that is separate from the contents being
|
keep keys ordered and in a place that is separate from the contents being
|
||||||
backed up. It can happen, for example, that you back up an entire system, using
|
backed up. It can happen, for example, that you back up an entire system, using
|
||||||
a key on that system. If the system then becomes inaccessible for any reason
|
a key on that system. If the system then becomes inaccessible for any reason
|
||||||
and needs to be restored, this will not be possible as the encryption key will be
|
and needs to be restored, this will not be possible, as the encryption key will be
|
||||||
lost along with the broken system.
|
lost along with the broken system.
|
||||||
|
|
||||||
It is recommended that you keep your master key safe, but easily accessible, in
|
It is recommended that you keep your master key safe, but easily accessible, in
|
||||||
@ -382,10 +385,10 @@ version of your master key. The following command sends the output of the
|
|||||||
Restoring Data
|
Restoring Data
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
The regular creation of backups is a necessary step to avoiding data
|
The regular creation of backups is a necessary step in avoiding data loss. More
|
||||||
loss. More importantly, however, is the restoration. It is good practice to perform
|
importantly, however, is the restoration. It is good practice to perform
|
||||||
periodic recovery tests to ensure that you can access the data in
|
periodic recovery tests to ensure that you can access the data in case of
|
||||||
case of problems.
|
disaster.
|
||||||
|
|
||||||
First, you need to find the snapshot which you want to restore. The snapshot
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
list command provides a list of all the snapshots on the server:
|
list command provides a list of all the snapshots on the server:
|
||||||
@ -444,23 +447,22 @@ to use the interactive recovery shell.
|
|||||||
|
|
||||||
The interactive recovery shell is a minimal command line interface that
|
The interactive recovery shell is a minimal command line interface that
|
||||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||||
search files in a file archive.
|
search for files in a file archive.
|
||||||
To restore files, you can select them individually or match them with a glob
|
To restore files, you can select them individually or match them with a glob
|
||||||
pattern.
|
pattern.
|
||||||
|
|
||||||
Using the catalog for navigation reduces the overhead considerably because only
|
Using the catalog for navigation reduces the overhead considerably because only
|
||||||
the catalog needs to be downloaded and, optionally, decrypted.
|
the catalog needs to be downloaded and, optionally, decrypted.
|
||||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
The actual chunks are only accessed if the metadata in the catalog is
|
||||||
or for the actual restore.
|
insufficient or for the actual restore.
|
||||||
|
|
||||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
Similar to common UNIX shells, ``cd`` and ``ls`` are the commands used to change
|
||||||
working directory and list directory contents in the archive.
|
working directory and list directory contents in the archive.
|
||||||
``pwd`` shows the full path of the current working directory with respect to the
|
``pwd`` shows the full path of the current working directory with respect to the
|
||||||
archive root.
|
archive root.
|
||||||
|
|
||||||
Being able to quickly search the contents of the archive is a commonly needed feature.
|
The ability to quickly search the contents of the archive is a commonly required
|
||||||
That's where the catalog is most valuable.
|
feature. That's where the catalog is most valuable. For example:
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -471,8 +473,8 @@ For example:
|
|||||||
pxar:/ > restore-selected /target/path
|
pxar:/ > restore-selected /target/path
|
||||||
...
|
...
|
||||||
|
|
||||||
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
|
This will find and print all files ending in ``.txt`` located in ``etc/`` or its
|
||||||
subdirectory and add the corresponding pattern to the list for subsequent restores.
|
subdirectories, and add the corresponding pattern to the list for subsequent restores.
|
||||||
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
||||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||||
host. This will scan the whole archive.
|
host. This will scan the whole archive.
|
||||||
@ -497,7 +499,7 @@ Mounting of Archives via FUSE
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
||||||
file archive as a read-only filesystem to a mountpoint on your host.
|
file archive as a read-only filesystem to a mount point on your host.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -513,7 +515,7 @@ This allows you to access the full contents of the archive in a seamless manner.
|
|||||||
load on your host, depending on the operations you perform on the mounted
|
load on your host, depending on the operations you perform on the mounted
|
||||||
filesystem.
|
filesystem.
|
||||||
|
|
||||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
To unmount the filesystem, use the ``umount`` command on the mount point:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -522,7 +524,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
|
|||||||
Login and Logout
|
Login and Logout
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
The client tool prompts you to enter the logon password as soon as you
|
The client tool prompts you to enter the login password as soon as you
|
||||||
want to access the backup server. The server checks your credentials
|
want to access the backup server. The server checks your credentials
|
||||||
and responds with a ticket that is valid for two hours. The client
|
and responds with a ticket that is valid for two hours. The client
|
||||||
tool automatically stores that ticket and uses it for further requests
|
tool automatically stores that ticket and uses it for further requests
|
||||||
@ -551,7 +553,7 @@ Changing the Owner of a Backup Group
|
|||||||
By default, the owner of a backup group is the user which was used to originally
|
By default, the owner of a backup group is the user which was used to originally
|
||||||
create that backup group (or in the case of sync jobs, ``root@pam``). This
|
create that backup group (or in the case of sync jobs, ``root@pam``). This
|
||||||
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
|
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
|
||||||
can not be used to create backups in that same backup group. In case you want
|
can not be used to create backups in that same backup group. In case you want
|
||||||
to change the owner of a backup, you can do so with the below command, using a
|
to change the owner of a backup, you can do so with the below command, using a
|
||||||
user that has ``Datastore.Modify`` privileges on the datastore.
|
user that has ``Datastore.Modify`` privileges on the datastore.
|
||||||
|
|
||||||
@ -677,7 +679,7 @@ unused data blocks are removed.
|
|||||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||||
by default. This results in a better performance by only updating the
|
by default. This results in a better performance by only updating the
|
||||||
``atime`` property if the last access has been at least 24 hours ago. The
|
``atime`` property if the last access has been at least 24 hours ago. The
|
||||||
downside is, that touching a chunk within these 24 hours will not always
|
downside is that touching a chunk within these 24 hours will not always
|
||||||
update its ``atime`` property.
|
update its ``atime`` property.
|
||||||
|
|
||||||
Chunks in the grace period will be logged at the end of the garbage
|
Chunks in the grace period will be logged at the end of the garbage
|
||||||
@ -701,8 +703,8 @@ unused data blocks are removed.
|
|||||||
Average chunk size: 2486565
|
Average chunk size: 2486565
|
||||||
TASK OK
|
TASK OK
|
||||||
|
|
||||||
|
Garbage collection can also be scheduled using ``promxox-backup-manager`` or
|
||||||
.. todo:: howto run garbage-collection at regular intervals (cron)
|
from the Proxmox Backup Server's web interface.
|
||||||
|
|
||||||
Benchmarking
|
Benchmarking
|
||||||
------------
|
------------
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
.. Epilog (included at top of each file)
|
.. Epilog (included at top of each file)
|
||||||
|
|
||||||
We use this file to define external links and common replacement
|
We use this file to define external links and common replacement
|
||||||
patterns.
|
patterns.
|
||||||
|
|
||||||
@ -13,7 +13,6 @@
|
|||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. FIXME
|
|
||||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
@ -23,6 +22,7 @@
|
|||||||
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
|
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
|
||||||
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
|
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
|
||||||
.. _QEMU: https://www.qemu.org/
|
.. _QEMU: https://www.qemu.org/
|
||||||
|
.. _LXC: https://linuxcontainers.org/lxc/introduction/
|
||||||
|
|
||||||
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
|
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
|
||||||
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption
|
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption
|
||||||
|
54
docs/gui.rst
54
docs/gui.rst
@ -8,8 +8,9 @@ tools. The web interface also provides a built-in console, so if you prefer the
|
|||||||
command line or need some extra control, you have this option.
|
command line or need some extra control, you have this option.
|
||||||
|
|
||||||
The web interface can be accessed via https://youripaddress:8007. The default
|
The web interface can be accessed via https://youripaddress:8007. The default
|
||||||
login is `root`, and the password is the one specified during the installation
|
login is `root`, and the password is either the one specified during the
|
||||||
process.
|
installation process or the password of the root user, in case of installation
|
||||||
|
on top of Debian.
|
||||||
|
|
||||||
|
|
||||||
Features
|
Features
|
||||||
@ -48,12 +49,13 @@ GUI Overview
|
|||||||
|
|
||||||
The Proxmox Backup Server web interface consists of 3 main sections:
|
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||||
|
|
||||||
* **Header**: At the top. This shows version information, and contains buttons to view
|
* **Header**: At the top. This shows version information and contains buttons to
|
||||||
documentation, monitor running tasks, set the language and logout.
|
view documentation, monitor running tasks, set the language, configure various
|
||||||
* **Sidebar**: On the left. This contains the configuration options for
|
display settings, and logout.
|
||||||
|
* **Sidebar**: On the left. This contains the administration options for
|
||||||
the server.
|
the server.
|
||||||
* **Configuration Panel**: In the center. This contains the control interface for the
|
* **Configuration Panel**: In the center. This contains the respective control
|
||||||
configuration options in the *Sidebar*.
|
interfaces for the administration options in the *Sidebar*.
|
||||||
|
|
||||||
|
|
||||||
Sidebar
|
Sidebar
|
||||||
@ -74,12 +76,14 @@ previous and currently running tasks, and subscription information.
|
|||||||
Configuration
|
Configuration
|
||||||
^^^^^^^^^^^^^
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
The Configuration section contains some system configuration options, such as
|
The Configuration section contains some system options, such as time, network,
|
||||||
time and network configuration. It also contains the following subsections:
|
WebAuthn, and HTTP proxy configuration. It also contains the following
|
||||||
|
subsections:
|
||||||
|
|
||||||
* **Access Control**: Add and manage users, API tokens, and the permissions
|
* **Access Control**: Add and manage users, API tokens, and the permissions
|
||||||
associated with these items
|
associated with these items
|
||||||
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||||
|
* **Certificates**: Manage ACME accounts and create SSL certificates.
|
||||||
* **Subscription**: Upload a subscription key, view subscription status and
|
* **Subscription**: Upload a subscription key, view subscription status and
|
||||||
access a text-based system report.
|
access a text-based system report.
|
||||||
|
|
||||||
@ -98,6 +102,7 @@ tasks and information. These are:
|
|||||||
resource usage statistics
|
resource usage statistics
|
||||||
* **Services**: Manage and monitor system services
|
* **Services**: Manage and monitor system services
|
||||||
* **Updates**: An interface for upgrading packages
|
* **Updates**: An interface for upgrading packages
|
||||||
|
* **Repositories**: An interface for configuring APT repositories
|
||||||
* **Syslog**: View log messages from the server
|
* **Syslog**: View log messages from the server
|
||||||
* **Tasks**: Task history with multiple filter options
|
* **Tasks**: Task history with multiple filter options
|
||||||
|
|
||||||
@ -110,7 +115,7 @@ The administration menu item also contains a disk management subsection:
|
|||||||
* **Disks**: View information on available disks
|
* **Disks**: View information on available disks
|
||||||
|
|
||||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
* **ZFS**: Create and view information on *ZFS* disks
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
Tape Backup
|
Tape Backup
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
@ -119,11 +124,20 @@ Tape Backup
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Tape Backup: Tape changer overview
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
The `Tape Backup`_ section contains a top panel, with options for managing tape
|
||||||
inventories, drives, changers and the tape backup jobs itself.
|
media sets, inventories, drives, changers, encryption keys, and the tape backup
|
||||||
|
jobs itself. The tabs are as follows:
|
||||||
|
|
||||||
It also contains a subsection per standalone drive and per changer, with a
|
* **Content**: Information on the contents of the tape backup
|
||||||
status and management view for those devices.
|
* **Inventory**: Manage the tapes attached to the system
|
||||||
|
* **Changers**: Manage tape loading devices
|
||||||
|
* **Drives**: Manage drives used for reading and writing to tapes
|
||||||
|
* **Media Pools**: Manage logical pools of tapes
|
||||||
|
* **Encryption Keys**: Manage tape backup encryption keys
|
||||||
|
* **Backup Jobs**: Manage tape backup jobs
|
||||||
|
|
||||||
|
The section also contains a subsection per standalone drive and per changer,
|
||||||
|
with a status and management view for those devices.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
@ -133,9 +147,9 @@ Datastore
|
|||||||
:alt: Datastore Configuration
|
:alt: Datastore Configuration
|
||||||
|
|
||||||
The Datastore section contains interfaces for creating and managing
|
The Datastore section contains interfaces for creating and managing
|
||||||
datastores. It contains a button to create a new datastore on the server, as
|
datastores. It also contains a button for creating a new datastore on the
|
||||||
well as a subsection for each datastore on the system, in which you can use the
|
server, as well as a subsection for each datastore on the system, in which you
|
||||||
top panel to view:
|
can use the top panel to view:
|
||||||
|
|
||||||
* **Summary**: Access a range of datastore usage statistics
|
* **Summary**: Access a range of datastore usage statistics
|
||||||
* **Content**: Information on the datastore's backup groups and their respective
|
* **Content**: Information on the datastore's backup groups and their respective
|
||||||
@ -144,5 +158,7 @@ top panel to view:
|
|||||||
collection <client_garbage-collection>` operations, and run garbage collection
|
collection <client_garbage-collection>` operations, and run garbage collection
|
||||||
manually
|
manually
|
||||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs
|
||||||
datastore
|
on the datastore
|
||||||
|
* **Options**: Configure notification and verification settings
|
||||||
|
* **Permissions**: Manage permissions on the datastore
|
||||||
|
@ -19,24 +19,24 @@ for various management tasks such as disk management.
|
|||||||
`Proxmox Backup`_ without the server part.
|
`Proxmox Backup`_ without the server part.
|
||||||
|
|
||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||||
as well as all necessary packages for the `Proxmox Backup`_ server.
|
as well as all necessary packages for the `Proxmox Backup`_ Server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allow
|
The installer will guide you through the setup process and allow
|
||||||
you to partition the local disk(s), apply basic system configurations
|
you to partition the local disk(s), apply basic system configuration
|
||||||
(e.g. timezone, language, network), and install all required packages.
|
(for example timezone, language, network), and install all required packages.
|
||||||
The provided ISO will get you started in just a few minutes, and is the
|
The provided ISO will get you started in just a few minutes, and is the
|
||||||
recommended method for new and existing users.
|
recommended method for new and existing users.
|
||||||
|
|
||||||
Alternatively, `Proxmox Backup`_ server can be installed on top of an
|
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
|
||||||
existing Debian system.
|
existing Debian system.
|
||||||
|
|
||||||
Install `Proxmox Backup`_ with the Installer
|
Install `Proxmox Backup`_ Server using the Installer
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Download the ISO from |DOWNLOADS|.
|
Download the ISO from |DOWNLOADS|.
|
||||||
It includes the following:
|
It includes the following:
|
||||||
|
|
||||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
* The `Proxmox Backup`_ Server installer, which partitions the local
|
||||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||||
|
|
||||||
* Complete operating system (Debian Linux, 64-bit)
|
* Complete operating system (Debian Linux, 64-bit)
|
||||||
@ -63,7 +63,7 @@ standard Debian installation. After configuring the
|
|||||||
# apt-get update
|
# apt-get update
|
||||||
# apt-get install proxmox-backup-server
|
# apt-get install proxmox-backup-server
|
||||||
|
|
||||||
The commands above keep the current (Debian) kernel and install a minimal
|
The above commands keep the current (Debian) kernel and install a minimal
|
||||||
set of required packages.
|
set of required packages.
|
||||||
|
|
||||||
If you want to install the same set of packages as the installer
|
If you want to install the same set of packages as the installer
|
||||||
|
@ -4,15 +4,15 @@ Introduction
|
|||||||
What is Proxmox Backup Server?
|
What is Proxmox Backup Server?
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
Proxmox Backup Server is an enterprise-class, client-server backup solution that
|
||||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
is capable of backing up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||||
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||||
platform and allows you to back up your data securely, even between remote
|
platform and allows you to back up your data securely, even between remote
|
||||||
sites, providing easy management with a web-based user interface.
|
sites, providing easy management through a web-based user interface.
|
||||||
|
|
||||||
It supports deduplication, compression, and authenticated
|
It supports deduplication, compression, and authenticated
|
||||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees
|
||||||
performance, low resource usage, and a safe, high-quality codebase.
|
high performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
Proxmox Backup uses state of the art cryptography for both client-server
|
Proxmox Backup uses state of the art cryptography for both client-server
|
||||||
communication and backup content :ref:`encryption <client_encryption>`. All
|
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||||
@ -28,22 +28,23 @@ Proxmox Backup Server uses a `client-server model`_. The server stores the
|
|||||||
backup data and provides an API to create and manage datastores. With the
|
backup data and provides an API to create and manage datastores. With the
|
||||||
API, it's also possible to manage disks and other server-side resources.
|
API, it's also possible to manage disks and other server-side resources.
|
||||||
|
|
||||||
The backup client uses this API to access the backed up data. With the command
|
The backup client uses this API to access the backed up data. You can use the
|
||||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
``proxmox-backup-client`` command line tool to create and restore file backups.
|
||||||
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
|
||||||
|
integrated client.
|
||||||
|
|
||||||
A single backup is allowed to contain several archives. For example, when you
|
A single backup is allowed to contain several archives. For example, when you
|
||||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||||
inside that backup. The VM configuration itself is stored as an extra file.
|
inside that backup. The VM configuration itself is stored as an extra file.
|
||||||
This way, it's easy to access and restore only important parts of the backup,
|
This way, it's easy to access and restore only the important parts of the
|
||||||
without the need to scan the whole backup.
|
backup, without the need to scan the whole backup.
|
||||||
|
|
||||||
|
|
||||||
Main Features
|
Main Features
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||||
supported and you can easily backup :term:`virtual machine`\ s and
|
supported, and you can easily backup :term:`virtual machine`\ s and
|
||||||
:term:`container`\ s.
|
:term:`container`\ s.
|
||||||
|
|
||||||
:Performance: The whole software stack is written in :term:`Rust`,
|
:Performance: The whole software stack is written in :term:`Rust`,
|
||||||
@ -70,6 +71,10 @@ Main Features
|
|||||||
modern hardware. In addition to client-side encryption, all data is
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
|
:Tape backup: For long-term archiving of data, Proxmox Backup Server also
|
||||||
|
provides extensive support for backing up to tape and managing tape
|
||||||
|
libraries.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
|
|
||||||
@ -80,7 +85,7 @@ Main Features
|
|||||||
backup-clients.
|
backup-clients.
|
||||||
|
|
||||||
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
form of `Proxmox Backup Server Subscription Plans
|
the form of `Proxmox Backup Server Subscription Plans
|
||||||
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
@ -173,7 +178,7 @@ Bug Tracker
|
|||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||||
issue appears, file your report there. An issue can be a bug as well as a
|
issue appears, file your report there. An issue can be a bug, as well as a
|
||||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||||
of the issue and will send a notification once it has been solved.
|
of the issue and will send a notification once it has been solved.
|
||||||
|
|
||||||
@ -224,5 +229,6 @@ requirements.
|
|||||||
|
|
||||||
In July 2020, we released the first beta version of Proxmox Backup
|
In July 2020, we released the first beta version of Proxmox Backup
|
||||||
Server, followed by the first stable version in November 2020. With support for
|
Server, followed by the first stable version in November 2020. With support for
|
||||||
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
encryption and incremental, fully deduplicated backups, Proxmox Backup offers a
|
||||||
network load and saves valuable storage space.
|
secure environment, which significantly reduces network load and saves valuable
|
||||||
|
storage space.
|
||||||
|
@ -4,17 +4,17 @@
|
|||||||
ZFS on Linux
|
ZFS on Linux
|
||||||
------------
|
------------
|
||||||
|
|
||||||
ZFS is a combined file system and logical volume manager designed by
|
ZFS is a combined file system and logical volume manager, designed by
|
||||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||||
packages are included.
|
packages are included.
|
||||||
|
|
||||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||||
low budget hardware, but also high performance systems by leveraging
|
low budget hardware, and also high performance systems by leveraging
|
||||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
SSD caching or even SSD only setups. ZFS can replace expensive
|
||||||
hardware raid cards by moderate CPU and memory load combined with easy
|
hardware raid cards with moderate CPU and memory load, combined with easy
|
||||||
management.
|
management.
|
||||||
|
|
||||||
General ZFS advantages
|
General advantages of ZFS:
|
||||||
|
|
||||||
* Easy configuration and management with GUI and CLI.
|
* Easy configuration and management with GUI and CLI.
|
||||||
* Reliable
|
* Reliable
|
||||||
@ -34,18 +34,18 @@ General ZFS advantages
|
|||||||
Hardware
|
Hardware
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
ZFS depends heavily on memory, so it's recommended to have at least 8GB to
|
||||||
practice, use as much you can get for your hardware/budget. To prevent
|
start. In practice, use as much you can get for your hardware/budget. To prevent
|
||||||
data corruption, we recommend the use of high quality ECC RAM.
|
data corruption, we recommend the use of high quality ECC RAM.
|
||||||
|
|
||||||
If you use a dedicated cache and/or log disk, you should use an
|
If you use a dedicated cache and/or log disk, you should use an
|
||||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
enterprise class SSD (for example, Intel SSD DC S3700 Series). This can
|
||||||
increase the overall performance significantly.
|
increase the overall performance significantly.
|
||||||
|
|
||||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
IMPORTANT: Do not use ZFS on top of a hardware controller which has its
|
||||||
own cache management. ZFS needs to directly communicate with disks. An
|
own cache management. ZFS needs to directly communicate with disks. An
|
||||||
HBA adapter is the way to go, or something like LSI controller flashed
|
HBA adapter or something like an LSI controller flashed in ``IT`` mode is
|
||||||
in ``IT`` mode.
|
recommended.
|
||||||
|
|
||||||
|
|
||||||
ZFS Administration
|
ZFS Administration
|
||||||
@ -53,7 +53,7 @@ ZFS Administration
|
|||||||
|
|
||||||
This section gives you some usage examples for common tasks. ZFS
|
This section gives you some usage examples for common tasks. ZFS
|
||||||
itself is really powerful and provides many options. The main commands
|
itself is really powerful and provides many options. The main commands
|
||||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
to manage ZFS are `zfs` and `zpool`. Both commands come with extensive
|
||||||
manual pages, which can be read with:
|
manual pages, which can be read with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -123,7 +123,7 @@ Create a new pool with cache (L2ARC)
|
|||||||
It is possible to use a dedicated cache drive partition to increase
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
the performance (use SSD).
|
the performance (use SSD).
|
||||||
|
|
||||||
As `<device>` it is possible to use more devices, like it's shown in
|
For `<device>`, you can use multiple devices, as is shown in
|
||||||
"Create a new pool with RAID*".
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -136,7 +136,7 @@ Create a new pool with log (ZIL)
|
|||||||
It is possible to use a dedicated cache drive partition to increase
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
the performance (SSD).
|
the performance (SSD).
|
||||||
|
|
||||||
As `<device>` it is possible to use more devices, like it's shown in
|
For `<device>`, you can use multiple devices, as is shown in
|
||||||
"Create a new pool with RAID*".
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -146,8 +146,9 @@ As `<device>` it is possible to use more devices, like it's shown in
|
|||||||
Add cache and log to an existing pool
|
Add cache and log to an existing pool
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If you have a pool without cache and log. First partition the SSD in
|
You can add cache and log devices to a pool after its creation. In this example,
|
||||||
2 partition with `parted` or `gdisk`
|
we will use a single drive for both cache and log. First, you need to create
|
||||||
|
2 partitions on the SSD with `parted` or `gdisk`
|
||||||
|
|
||||||
.. important:: Always use GPT partition tables.
|
.. important:: Always use GPT partition tables.
|
||||||
|
|
||||||
@ -171,12 +172,12 @@ Changing a failed device
|
|||||||
Changing a failed bootable device
|
Changing a failed bootable device
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
Depending on how Proxmox Backup was installed, it is either using `grub` or
|
||||||
as bootloader.
|
`systemd-boot` as a bootloader.
|
||||||
|
|
||||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
In either case, the first steps of copying the partition table, reissuing GUIDs
|
||||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
and replacing the ZFS partition are the same. To make the system bootable from
|
||||||
different steps are needed which depend on the bootloader in use.
|
the new disk, different steps are needed which depend on the bootloader in use.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -207,7 +208,7 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
|||||||
# grub-mkconfig -o /path/to/grub.cfg
|
# grub-mkconfig -o /path/to/grub.cfg
|
||||||
|
|
||||||
|
|
||||||
Activate E-Mail Notification
|
Activate e-mail notification
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
ZFS comes with an event daemon, which monitors events generated by the
|
ZFS comes with an event daemon, which monitors events generated by the
|
||||||
@ -219,24 +220,24 @@ and you can install it using `apt-get`:
|
|||||||
|
|
||||||
# apt-get install zfs-zed
|
# apt-get install zfs-zed
|
||||||
|
|
||||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
To activate the daemon, it is necessary to to uncomment the ZED_EMAIL_ADDR
|
||||||
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
setting, in the file `/etc/zfs/zed.d/zed.rc`.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
ZED_EMAIL_ADDR="root"
|
ZED_EMAIL_ADDR="root"
|
||||||
|
|
||||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
Please note that Proxmox Backup forwards mails to `root` to the email address
|
||||||
configured for the root user.
|
configured for the root user.
|
||||||
|
|
||||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||||
other settings are optional.
|
other settings are optional.
|
||||||
|
|
||||||
Limit ZFS Memory Usage
|
Limit ZFS memory usage
|
||||||
^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
It is good to use at most 50 percent (which is the default) of the
|
It is good to use at most 50 percent (which is the default) of the
|
||||||
system memory for ZFS ARC to prevent performance shortage of the
|
system memory for ZFS ARC, to prevent performance degradation of the
|
||||||
host. Use your preferred editor to change the configuration in
|
host. Use your preferred editor to change the configuration in
|
||||||
`/etc/modprobe.d/zfs.conf` and insert:
|
`/etc/modprobe.d/zfs.conf` and insert:
|
||||||
|
|
||||||
@ -244,27 +245,42 @@ host. Use your preferred editor to change the configuration in
|
|||||||
|
|
||||||
options zfs zfs_arc_max=8589934592
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
This example setting limits the usage to 8GB.
|
The above example limits the usage to 8 GiB ('8 * 2^30^').
|
||||||
|
|
||||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
.. IMPORTANT:: In case your desired `zfs_arc_max` value is lower than or equal
|
||||||
|
to `zfs_arc_min` (which defaults to 1/32 of the system memory), `zfs_arc_max`
|
||||||
|
will be ignored. Thus, for it to work in this case, you must set
|
||||||
|
`zfs_arc_min` to at most `zfs_arc_max - 1`. This would require updating the
|
||||||
|
configuration in `/etc/modprobe.d/zfs.conf`, with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
options zfs zfs_arc_min=8589934591
|
||||||
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
|
This example setting limits the usage to 8 GiB ('8 * 2^30^') on
|
||||||
|
systems with more than 256 GiB of total memory, where simply setting
|
||||||
|
`zfs_arc_max` alone would not work.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If your root file system is ZFS, you must update your initramfs
|
||||||
|
every time this value changes.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# update-initramfs -u
|
# update-initramfs -u
|
||||||
|
|
||||||
|
|
||||||
SWAP on ZFS
|
Swap on ZFS
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
|
||||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
Swap-space created on a zvol may cause some issues, such as blocking the
|
||||||
server or generating a high IO load, often seen when starting a Backup
|
server or generating a high IO load, often seen when starting a Backup
|
||||||
to an external Storage.
|
to an external Storage.
|
||||||
|
|
||||||
We strongly recommend to use enough memory, so that you normally do not
|
We strongly recommend using enough memory, so that you normally do not
|
||||||
run into low memory situations. Should you need or want to add swap, it is
|
run into low memory situations. Should you need or want to add swap, it is
|
||||||
preferred to create a partition on a physical disk and use it as swap device.
|
preferred to create a partition on a physical disk and use it as a swap device.
|
||||||
You can leave some space free for this purpose in the advanced options of the
|
You can leave some space free for this purpose in the advanced options of the
|
||||||
installer. Additionally, you can lower the `swappiness` value.
|
installer. Additionally, you can lower the `swappiness` value.
|
||||||
A good value for servers is 10:
|
A good value for servers is 10:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -291,7 +307,7 @@ an editor of your choice and add the following line:
|
|||||||
vm.swappiness = 100 The kernel will swap aggressively.
|
vm.swappiness = 100 The kernel will swap aggressively.
|
||||||
==================== ===============================================================
|
==================== ===============================================================
|
||||||
|
|
||||||
ZFS Compression
|
ZFS compression
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
To activate compression:
|
To activate compression:
|
||||||
@ -300,10 +316,11 @@ To activate compression:
|
|||||||
# zpool set compression=lz4 <pool>
|
# zpool set compression=lz4 <pool>
|
||||||
|
|
||||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer from `1-9`
|
||||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
representing the compression ratio, where 1 is fastest and 9 is best
|
||||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
compression) are also available. Depending on the algorithm and how
|
||||||
I/O performance.
|
compressible the data is, having compression enabled can even increase I/O
|
||||||
|
performance.
|
||||||
|
|
||||||
You can disable compression at any time with:
|
You can disable compression at any time with:
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -314,26 +331,26 @@ Only new blocks will be affected by this change.
|
|||||||
|
|
||||||
.. _local_zfs_special_device:
|
.. _local_zfs_special_device:
|
||||||
|
|
||||||
ZFS Special Device
|
ZFS special device
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
Since version 0.8.0, ZFS supports `special` devices. A `special` device in a
|
||||||
pool is used to store metadata, deduplication tables, and optionally small
|
pool is used to store metadata, deduplication tables, and optionally small
|
||||||
file blocks.
|
file blocks.
|
||||||
|
|
||||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||||
hard disks with a lot of metadata changes. For example workloads that involve
|
hard disks with a lot of metadata changes. For example, workloads that involve
|
||||||
creating, updating or deleting a large number of files will benefit from the
|
creating, updating or deleting a large number of files will benefit from the
|
||||||
presence of a `special` device. ZFS datasets can also be configured to store
|
presence of a `special` device. ZFS datasets can also be configured to store
|
||||||
whole small files on the `special` device which can further improve the
|
small files on the `special` device, which can further improve the
|
||||||
performance. Use fast SSDs for the `special` device.
|
performance. Use fast SSDs for the `special` device.
|
||||||
|
|
||||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||||
pool, since the `special` device is a point of failure for the whole pool.
|
pool, since the `special` device is a point of failure for the entire pool.
|
||||||
|
|
||||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||||
|
|
||||||
Create a pool with `special` device and RAID-1:
|
To create a pool with `special` device and RAID-1:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -346,8 +363,8 @@ Adding a `special` device to an existing pool with RAID-1:
|
|||||||
# zpool add <pool> special mirror <device1> <device2>
|
# zpool add <pool> special mirror <device1> <device2>
|
||||||
|
|
||||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||||
`0` to disable storing small file blocks on the `special` device or a power of
|
`0` to disable storing small file blocks on the `special` device, or a power of
|
||||||
two in the range between `512B` to `128K`. After setting the property new file
|
two in the range between `512B` to `128K`. After setting this property, new file
|
||||||
blocks smaller than `size` will be allocated on the `special` device.
|
blocks smaller than `size` will be allocated on the `special` device.
|
||||||
|
|
||||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||||
@ -355,10 +372,10 @@ blocks smaller than `size` will be allocated on the `special` device.
|
|||||||
the `special` device, so be careful!
|
the `special` device, so be careful!
|
||||||
|
|
||||||
Setting the `special_small_blocks` property on a pool will change the default
|
Setting the `special_small_blocks` property on a pool will change the default
|
||||||
value of that property for all child ZFS datasets (for example all containers
|
value of that property for all child ZFS datasets (for example, all containers
|
||||||
in the pool will opt in for small file blocks).
|
in the pool will opt in for small file blocks).
|
||||||
|
|
||||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
Opt in for all files smaller than 4K-blocks pool-wide:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -379,10 +396,15 @@ Opt out from small file blocks for a single dataset:
|
|||||||
Troubleshooting
|
Troubleshooting
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Corrupted cachefile
|
Corrupt cache file
|
||||||
|
""""""""""""""""""
|
||||||
|
|
||||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
`zfs-import-cache.service` imports ZFS pools using the ZFS cache file. If this
|
||||||
boot until mounted manually later.
|
file becomes corrupted, the service won't be able to import the pools that it's
|
||||||
|
unable to read from it.
|
||||||
|
|
||||||
|
As a result, in case of a corrupted ZFS cache file, some volumes may not be
|
||||||
|
mounted during boot and must be mounted manually later.
|
||||||
|
|
||||||
For each pool, run:
|
For each pool, run:
|
||||||
|
|
||||||
@ -390,16 +412,13 @@ For each pool, run:
|
|||||||
|
|
||||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||||
|
|
||||||
and afterwards update the `initramfs` by running:
|
then, update the `initramfs` by running:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# update-initramfs -u -k all
|
# update-initramfs -u -k all
|
||||||
|
|
||||||
and finally reboot your node.
|
and finally, reboot the node.
|
||||||
|
|
||||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
|
||||||
doesn't import the pools that aren't present in the cachefile.
|
|
||||||
|
|
||||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||||
which searches and imports pools via device scanning (usually slower).
|
which searches and imports pools via device scanning (usually slower).
|
||||||
|
@ -14,15 +14,15 @@ following retention options are available:
|
|||||||
|
|
||||||
``keep-hourly <N>``
|
``keep-hourly <N>``
|
||||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||||
backup for a single hour, only the latest is kept.
|
backup for a single hour, only the latest is retained.
|
||||||
|
|
||||||
``keep-daily <N>``
|
``keep-daily <N>``
|
||||||
Keep backups for the last ``<N>`` days. If there is more than one
|
Keep backups for the last ``<N>`` days. If there is more than one
|
||||||
backup for a single day, only the latest is kept.
|
backup for a single day, only the latest is retained.
|
||||||
|
|
||||||
``keep-weekly <N>``
|
``keep-weekly <N>``
|
||||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||||
backup for a single week, only the latest is kept.
|
backup for a single week, only the latest is retained.
|
||||||
|
|
||||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||||
uses the `ISO week date`_ system and handles weeks at
|
uses the `ISO week date`_ system and handles weeks at
|
||||||
@ -30,17 +30,17 @@ following retention options are available:
|
|||||||
|
|
||||||
``keep-monthly <N>``
|
``keep-monthly <N>``
|
||||||
Keep backups for the last ``<N>`` months. If there is more than one
|
Keep backups for the last ``<N>`` months. If there is more than one
|
||||||
backup for a single month, only the latest is kept.
|
backup for a single month, only the latest is retained.
|
||||||
|
|
||||||
``keep-yearly <N>``
|
``keep-yearly <N>``
|
||||||
Keep backups for the last ``<N>`` years. If there is more than one
|
Keep backups for the last ``<N>`` years. If there is more than one
|
||||||
backup for a single year, only the latest is kept.
|
backup for a single year, only the latest is retained.
|
||||||
|
|
||||||
The retention options are processed in the order given above. Each option
|
The retention options are processed in the order given above. Each option
|
||||||
only covers backups within its time period. The next option does not take care
|
only covers backups within its time period. The next option does not take care
|
||||||
of already covered backups. It will only consider older backups.
|
of already covered backups. It will only consider older backups.
|
||||||
|
|
||||||
Unfinished and incomplete backups will be removed by the prune command unless
|
Unfinished and incomplete backups will be removed by the prune command, unless
|
||||||
they are newer than the last successful backup. In this case, the last failed
|
they are newer than the last successful backup. In this case, the last failed
|
||||||
backup is retained.
|
backup is retained.
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ Prune Simulator
|
|||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
||||||
to explore the effect of different retetion options with various backup
|
to explore the effect of different retention options with various backup
|
||||||
schedules.
|
schedules.
|
||||||
|
|
||||||
Manual Pruning
|
Manual Pruning
|
||||||
@ -59,10 +59,10 @@ Manual Pruning
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Prune and garbage collection options
|
:alt: Prune and garbage collection options
|
||||||
|
|
||||||
To access pruning functionality for a specific backup group, you can use the
|
To manually prune a specific backup group, you can use
|
||||||
prune command line option discussed in :ref:`backup-pruning`, or navigate to
|
``proxmox-backup-client``'s ``prune`` subcommand, discussed in
|
||||||
the **Content** tab of the datastore and click the scissors icon in the
|
:ref:`backup-pruning`, or navigate to the **Content** tab of the datastore and
|
||||||
**Actions** column of the relevant backup group.
|
click the scissors icon in the **Actions** column of the relevant backup group.
|
||||||
|
|
||||||
Prune Schedules
|
Prune Schedules
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
@ -81,7 +81,7 @@ Retention Settings Example
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
The backup frequency and retention of old backups may depend on how often data
|
The backup frequency and retention of old backups may depend on how often data
|
||||||
changes, and how important an older state may be, in a specific work load.
|
changes and how important an older state may be in a specific workload.
|
||||||
When backups act as a company's document archive, there may also be legal
|
When backups act as a company's document archive, there may also be legal
|
||||||
requirements for how long backup snapshots must be kept.
|
requirements for how long backup snapshots must be kept.
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ start garbage collection on an entire datastore and the ``status`` subcommand to
|
|||||||
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||||
|
|
||||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
GC** from the top panel of a datastore. From here, you can edit the schedule at
|
||||||
collection runs and manually start the operation.
|
which garbage collection runs and manually start the operation.
|
||||||
|
|
||||||
|
|
||||||
.. _maintenance_verification:
|
.. _maintenance_verification:
|
||||||
@ -139,13 +139,13 @@ Verification
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Adding a verify job
|
:alt: Adding a verify job
|
||||||
|
|
||||||
Proxmox Backup offers various verification options to ensure that backup data is
|
Proxmox Backup Server offers various verification options to ensure that backup
|
||||||
intact. Verification is generally carried out through the creation of verify
|
data is intact. Verification is generally carried out through the creation of
|
||||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
verify jobs. These are scheduled tasks that run verification at a given interval
|
||||||
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
(see :ref:`calendar-event-scheduling`). With these, you can also set whether
|
||||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
already verified snapshots are ignored, as well as set a time period, after
|
||||||
are checked again. The interface for creating verify jobs can be found under the
|
which snapshots are checked again. The interface for creating verify jobs can be
|
||||||
**Verify Jobs** tab of the datastore.
|
found under the **Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
if a previous verification was successful. This is because physical drives
|
if a previous verification was successful. This is because physical drives
|
||||||
@ -158,9 +158,9 @@ are checked again. The interface for creating verify jobs can be found under the
|
|||||||
data.
|
data.
|
||||||
|
|
||||||
Aside from using verify jobs, you can also run verification manually on entire
|
Aside from using verify jobs, you can also run verification manually on entire
|
||||||
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
|
datastores, backup groups or snapshots. To do this, navigate to the **Content**
|
||||||
tab of the datastore and either click *Verify All*, or select the *V.* icon from
|
tab of the datastore and either click *Verify All* or select the *V.* icon from
|
||||||
the *Actions* column in the table.
|
the **Actions** column in the table.
|
||||||
|
|
||||||
.. _maintenance_notification:
|
.. _maintenance_notification:
|
||||||
|
|
||||||
@ -170,8 +170,8 @@ Notifications
|
|||||||
Proxmox Backup Server can send you notification emails about automatically
|
Proxmox Backup Server can send you notification emails about automatically
|
||||||
scheduled verification, garbage-collection and synchronization tasks results.
|
scheduled verification, garbage-collection and synchronization tasks results.
|
||||||
|
|
||||||
By default, notifications are send to the email address configured for the
|
By default, notifications are sent to the email address configured for the
|
||||||
`root@pam` user. You can set that user for each datastore.
|
`root@pam` user. You can instead set this user for each datastore.
|
||||||
|
|
||||||
You can also change the level of notification received per task type, the
|
You can also change the level of notification received per task type, the
|
||||||
following options are available:
|
following options are available:
|
||||||
@ -179,6 +179,6 @@ following options are available:
|
|||||||
* Always: send a notification for any scheduled task, independent of the
|
* Always: send a notification for any scheduled task, independent of the
|
||||||
outcome
|
outcome
|
||||||
|
|
||||||
* Errors: send a notification for any scheduled task resulting in an error
|
* Errors: send a notification for any scheduled task that results in an error
|
||||||
|
|
||||||
* Never: do not send any notification at all
|
* Never: do not send any notification at all
|
||||||
|
@ -17,8 +17,8 @@ configuration information for remotes is stored in the file
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a remote
|
:alt: Add a remote
|
||||||
|
|
||||||
To add a remote, you need its hostname or IP, a userid and password on the
|
To add a remote, you need its hostname or IP address, a userid and password on
|
||||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
the remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||||
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||||
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||||
|
|
||||||
@ -60,12 +60,13 @@ Sync Jobs
|
|||||||
|
|
||||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||||
a local datastore. You can manage sync jobs in the web interface, from the
|
a local datastore. You can manage sync jobs in the web interface, from the
|
||||||
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
|
**Sync Jobs** tab of the **Datastore** panel or from that of the Datastore
|
||||||
the ``proxmox-backup-manager sync-job`` command. The configuration information
|
itself. Alternatively, you can manage them with the ``proxmox-backup-manager
|
||||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
sync-job`` command. The configuration information for sync jobs is stored at
|
||||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||||
After creating a sync job, you can either start it manually from the GUI or
|
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||||
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
either start it manually from the GUI or provide it with a schedule (see
|
||||||
|
:ref:`calendar-event-scheduling`) to run regularly.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -79,14 +80,14 @@ provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regular
|
|||||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
# proxmox-backup-manager sync-job remove pbs2-local
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
For setting up sync jobs, the configuring user needs the following permissions:
|
To set up sync jobs, the configuring user needs the following permissions:
|
||||||
|
|
||||||
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
||||||
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
||||||
|
|
||||||
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
||||||
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
||||||
``root@pam``) or set to something other than the configuring user,
|
``root@pam``) or is set to something other than the configuring user,
|
||||||
``Datastore.Modify`` is required as well.
|
``Datastore.Modify`` is required as well.
|
||||||
|
|
||||||
.. note:: A sync job can only sync backup groups that the configured remote's
|
.. note:: A sync job can only sync backup groups that the configured remote's
|
||||||
|
@ -82,7 +82,8 @@ is:
|
|||||||
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||||
command, from the package ``ifupdown2``. This package is included within the
|
command, from the package ``ifupdown2``. This package is included within the
|
||||||
Proxmox Backup Server installation, however, you may have to install it yourself,
|
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||||
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
if you have installed Proxmox Backup Server on top of Debian or a Proxmox VE
|
||||||
|
version prior to version 7.
|
||||||
|
|
||||||
You can also configure DNS settings, from the **DNS** section
|
You can also configure DNS settings, from the **DNS** section
|
||||||
of **Configuration** or by using the ``dns`` subcommand of
|
of **Configuration** or by using the ``dns`` subcommand of
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||||
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||||
permissions. Operation requiring more permissions are forwarded to
|
permissions. Operations requiring more permissions are forwarded to
|
||||||
the local ``proxmox-backup`` service.
|
the local ``proxmox-backup`` service.
|
||||||
|
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
`Proxmox VE`_ Integration
|
`Proxmox VE`_ Integration
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
|
||||||
former as a storage in a Proxmox VE standalone or cluster setup.
|
setup, by adding it as a storage in Proxmox VE.
|
||||||
|
|
||||||
See also the `Proxmox VE Storage - Proxmox Backup Server
|
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||||
@ -14,8 +14,8 @@ of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
|||||||
Using the Proxmox VE Web-Interface
|
Using the Proxmox VE Web-Interface
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
Proxmox VE has native API and web interface integration of Proxmox Backup
|
||||||
Server since the `Proxmox VE 6.3 release
|
Server as of `Proxmox VE 6.3
|
||||||
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||||
|
|
||||||
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||||
@ -24,8 +24,8 @@ Using the Proxmox VE Command-Line
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
node. The following example uses ``store2`` as storage name, and
|
node. The following example uses ``store2`` as the storage's name, and
|
||||||
assumes the server address is ``localhost``, and you want to connect
|
assumes the server address is ``localhost`` and you want to connect
|
||||||
as ``user1@pbs``.
|
as ``user1@pbs``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -33,7 +33,7 @@ as ``user1@pbs``.
|
|||||||
# pvesm add pbs store2 --server localhost --datastore store2
|
# pvesm add pbs store2 --server localhost --datastore store2
|
||||||
# pvesm set store2 --username user1@pbs --password <secret>
|
# pvesm set store2 --username user1@pbs --password <secret>
|
||||||
|
|
||||||
.. note:: If you would rather not pass your password as plain text, you can pass
|
.. note:: If you would rather not enter your password as plain text, you can pass
|
||||||
the ``--password`` parameter, without any arguments. This will cause the
|
the ``--password`` parameter, without any arguments. This will cause the
|
||||||
program to prompt you for a password upon entering the command.
|
program to prompt you for a password upon entering the command.
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ relationship:
|
|||||||
|
|
||||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
After that you should be able to see storage status with:
|
After that, you should be able to view storage status with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
``pxar`` is a command line utility to create and manipulate archives in the
|
``pxar`` is a command line utility for creating and manipulating archives in the
|
||||||
:ref:`pxar-format`.
|
:ref:`pxar-format`.
|
||||||
It is inspired by `casync file archive format
|
It is inspired by `casync file archive format
|
||||||
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
||||||
which caters to a similar use-case.
|
which caters to a similar use-case.
|
||||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||||
Backup Server, for example, efficient storage of hardlinks.
|
Backup Server, for example, efficient storage of hard links.
|
||||||
The format is designed to reduce storage space needed on the server by achieving
|
The format is designed to reduce the required storage on the server by
|
||||||
a high level of deduplication.
|
achieving a high level of deduplication.
|
||||||
|
|
||||||
Creating an Archive
|
Creating an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
@ -24,10 +24,10 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
|||||||
the same name is already present in the target folder, the creation will
|
the same name is already present in the target folder, the creation will
|
||||||
fail.
|
fail.
|
||||||
|
|
||||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
By default, ``pxar`` will skip certain mount points and will not follow device
|
||||||
boundaries. This design decision is based on the primary use case of creating
|
boundaries. This design decision is based on the primary use case of creating
|
||||||
archives for backups. It makes sense to not back up the contents of certain
|
archives for backups. It makes sense to ignore the contents of certain
|
||||||
temporary or system specific files.
|
temporary or system specific files in a backup.
|
||||||
To alter this behavior and follow device boundaries, use the
|
To alter this behavior and follow device boundaries, use the
|
||||||
``--all-file-systems`` flag.
|
``--all-file-systems`` flag.
|
||||||
|
|
||||||
@ -41,40 +41,38 @@ by running:
|
|||||||
|
|
||||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||||
|
|
||||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
Be aware that the shell itself will try to expand glob patterns before invoking
|
||||||
invoking ``pxar``.
|
``pxar``. In order to avoid this, all globs have to be quoted correctly.
|
||||||
In order to avoid this, all globs have to be quoted correctly.
|
|
||||||
|
|
||||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||||
match more than one pattern. This allows you to use more complex
|
match more than one pattern. This allows you to use more complex
|
||||||
file exclusion/inclusion behavior. However, it is recommended to use
|
file inclusion/exclusion behavior. However, it is recommended to use
|
||||||
``.pxarexclude`` files instead for such cases.
|
``.pxarexclude`` files instead for such cases.
|
||||||
|
|
||||||
For example you might want to exclude all ``.txt`` files except for a specific
|
For example you might want to exclude all ``.txt`` files except a specific
|
||||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
one from the archive. This would be achieved via the negated match pattern,
|
||||||
by ``!``.
|
prefixed by ``!``. All the glob patterns are relative to the ``source``
|
||||||
All the glob patterns are relative to the ``source`` directory.
|
directory.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||||
|
|
||||||
.. NOTE:: The order of the glob match patterns matters as later ones override
|
.. NOTE:: The order of the glob match patterns matters, as later ones override
|
||||||
previous ones. Permutations of the same patterns lead to different results.
|
earlier ones. Permutations of the same patterns lead to different results.
|
||||||
|
|
||||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||||
command line, in a file called ``.pxarexclude-cli`` at the root of
|
command line, in a file called ``.pxarexclude-cli``, at the root of the archive.
|
||||||
the archive.
|
|
||||||
If a file with this name is already present in the source folder during archive
|
If a file with this name is already present in the source folder during archive
|
||||||
creation, this file is not included in the archive and the file containing the
|
creation, this file is not included in the archive, and the file containing the
|
||||||
new patterns is added to the archive instead, the original file is not altered.
|
new patterns is added to the archive instead. The original file is not altered.
|
||||||
|
|
||||||
A more convenient and persistent way to exclude files from the archive is by
|
A more convenient and persistent way to exclude files from the archive is by
|
||||||
placing the glob match patterns in ``.pxarexclude`` files.
|
placing the glob match patterns in ``.pxarexclude`` files.
|
||||||
It is possible to create and place these files in any directory of the filesystem
|
It is possible to create and place these files in any directory of the filesystem
|
||||||
tree.
|
tree.
|
||||||
These files must contain one pattern per line, again later patterns win over
|
These files must contain one pattern per line, and later patterns override
|
||||||
previous ones.
|
earlier ones.
|
||||||
The patterns control file exclusions of files present within the given directory
|
The patterns control file exclusions of files present within the given directory
|
||||||
or further below it in the tree.
|
or further below it in the tree.
|
||||||
The behavior is the same as described in :ref:`client_creating_backups`.
|
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||||
@ -89,7 +87,7 @@ with the following command:
|
|||||||
|
|
||||||
# pxar extract archive.pxar /path/to/target
|
# pxar extract archive.pxar /path/to/target
|
||||||
|
|
||||||
If no target is provided, the content of the archive is extracted to the current
|
If no target is provided, the contents of the archive is extracted to the current
|
||||||
working directory.
|
working directory.
|
||||||
|
|
||||||
In order to restore only parts of an archive, single files, and/or folders,
|
In order to restore only parts of an archive, single files, and/or folders,
|
||||||
@ -116,13 +114,13 @@ run the following command:
|
|||||||
# pxar list archive.pxar
|
# pxar list archive.pxar
|
||||||
|
|
||||||
This displays the full path of each file or directory with respect to the
|
This displays the full path of each file or directory with respect to the
|
||||||
archives root.
|
archive's root.
|
||||||
|
|
||||||
Mounting an Archive
|
Mounting an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
||||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
|
In order to mount an archive named ``archive.pxar`` to the mount point ``/mnt``,
|
||||||
run the command:
|
run the command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -130,7 +128,7 @@ run the command:
|
|||||||
# pxar mount archive.pxar /mnt
|
# pxar mount archive.pxar /mnt
|
||||||
|
|
||||||
Once the archive is mounted, you can access its content under the given
|
Once the archive is mounted, you can access its content under the given
|
||||||
mountpoint.
|
mount point.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
|||||||
disks, create various filesystems, and get information about the disks.
|
disks, create various filesystems, and get information about the disks.
|
||||||
|
|
||||||
To view the disks connected to the system, navigate to **Administration ->
|
To view the disks connected to the system, navigate to **Administration ->
|
||||||
Disks** in the web interface or use the ``list`` subcommand of
|
Storage/Disks** in the web interface or use the ``list`` subcommand of
|
||||||
``disk``:
|
``disk``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -42,9 +42,9 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
|||||||
:alt: Create a directory
|
:alt: Create a directory
|
||||||
|
|
||||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||||
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
|
||||||
web interface and creating one from there. The following command creates an
|
in the web interface and creating one from there. The following command creates
|
||||||
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||||
automatically create a datastore on the disk (in this case ``sdd``). This will
|
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||||
create a datastore at the location ``/mnt/datastore/store1``:
|
create a datastore at the location ``/mnt/datastore/store1``:
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
|
|||||||
:alt: Create ZFS
|
:alt: Create ZFS
|
||||||
|
|
||||||
You can also create a ``zpool`` with various raid levels from **Administration
|
You can also create a ``zpool`` with various raid levels from **Administration
|
||||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
|
||||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||||
mounts it under ``/mnt/datastore/zpool1``:
|
mounts it under ``/mnt/datastore/zpool1``:
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
|||||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||||
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||||
directory entries. This requirement excludes certain filesystems and
|
directory entries. This requirement excludes certain filesystems and
|
||||||
filesystem configuration from being supported for a datastore. For example,
|
filesystem configurations from being supported for a datastore. For example,
|
||||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
|
|
||||||
@ -113,14 +113,15 @@ Datastore Configuration
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Datastore Overview
|
:alt: Datastore Overview
|
||||||
|
|
||||||
You can configure multiple datastores. Minimum one datastore needs to be
|
You can configure multiple datastores. A minimum of one datastore needs to be
|
||||||
configured. The datastore is identified by a simple *name* and points to a
|
configured. The datastore is identified by a simple *name* and points to a
|
||||||
directory on the filesystem. Each datastore also has associated retention
|
directory on the filesystem. Each datastore also has associated retention
|
||||||
settings of how many backup snapshots for each interval of ``hourly``,
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
:ref:`garbage collection <client_garbage-collection>` can also be configured to
|
||||||
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
run periodically, based on a configured schedule (see
|
||||||
|
:ref:`calendar-event-scheduling`) per datastore.
|
||||||
|
|
||||||
|
|
||||||
.. _storage_datastore_create:
|
.. _storage_datastore_create:
|
||||||
@ -146,7 +147,8 @@ window:
|
|||||||
* *Comment* can be used to add some contextual information to the datastore.
|
* *Comment* can be used to add some contextual information to the datastore.
|
||||||
|
|
||||||
Alternatively you can create a new datastore from the command line. The
|
Alternatively you can create a new datastore from the command line. The
|
||||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
following command creates a new datastore called ``store1`` on
|
||||||
|
:file:`/backup/disk1/store1`
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -156,7 +158,7 @@ following command creates a new datastore called ``store1`` on :file:`/backup/di
|
|||||||
Managing Datastores
|
Managing Datastores
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
To list existing datastores from the command line run:
|
To list existing datastores from the command line, run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -216,8 +218,9 @@ After creating a datastore, the following default layout will appear:
|
|||||||
|
|
||||||
`.lock` is an empty file used for process locking.
|
`.lock` is an empty file used for process locking.
|
||||||
|
|
||||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
The `.chunks` directory contains folders, starting from `0000` and increasing in
|
||||||
directories will store the chunked data after a backup operation has been executed.
|
hexadecimal values until `ffff`. These directories will store the chunked data,
|
||||||
|
categorized by checksum, after a backup operation has been executed.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -4,8 +4,8 @@ Host System Administration
|
|||||||
==========================
|
==========================
|
||||||
|
|
||||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||||
distribution. That means that you have access to the whole world of
|
distribution. This means that you have access to the entire range of
|
||||||
Debian packages, and the base system is well documented. The `Debian
|
Debian packages, and that the base system is well documented. The `Debian
|
||||||
Administrator's Handbook`_ is available online, and provides a
|
Administrator's Handbook`_ is available online, and provides a
|
||||||
comprehensive introduction to the Debian operating system.
|
comprehensive introduction to the Debian operating system.
|
||||||
|
|
||||||
@ -17,11 +17,11 @@ updates to some Debian packages when necessary.
|
|||||||
|
|
||||||
We also deliver a specially optimized Linux kernel, where we enable
|
We also deliver a specially optimized Linux kernel, where we enable
|
||||||
all required virtualization and container features. That kernel
|
all required virtualization and container features. That kernel
|
||||||
includes drivers for ZFS_, and several hardware drivers. For example,
|
includes drivers for ZFS_, as well as several hardware drivers. For example,
|
||||||
we ship Intel network card drivers to support their newest hardware.
|
we ship Intel network card drivers to support their newest hardware.
|
||||||
|
|
||||||
The following sections will concentrate on backup related topics. They
|
The following sections will concentrate on backup related topics. They
|
||||||
either explain things which are different on `Proxmox Backup`_, or
|
will explain things which are different on `Proxmox Backup`_, or
|
||||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||||
please refer to the standard Debian documentation.
|
please refer to the standard Debian documentation.
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ Datastores
|
|||||||
|
|
||||||
A Datastore is the logical place where :ref:`Backup Snapshots
|
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||||
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
manifest, blobs, and dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||||
stored in the following directory structure:
|
stored in the following directory structure:
|
||||||
|
|
||||||
<datastore-root>/<type>/<id>/<time>/
|
<datastore-root>/<type>/<id>/<time>/
|
||||||
@ -32,8 +32,8 @@ The chunks of a datastore are found in
|
|||||||
|
|
||||||
<datastore-root>/.chunks/
|
<datastore-root>/.chunks/
|
||||||
|
|
||||||
This chunk directory is further subdivided by the first four byte of the chunks
|
This chunk directory is further subdivided by the first four bytes of the
|
||||||
checksum, so the chunk with the checksum
|
chunk's checksum, so a chunk with the checksum
|
||||||
|
|
||||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ per directory can be bad for file system performance.
|
|||||||
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
||||||
is created.
|
is created.
|
||||||
|
|
||||||
Fixed-sized Chunks
|
Fixed-Sized Chunks
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
For block based backups (like VMs), fixed-sized chunks are used. The content
|
For block based backups (like VMs), fixed-sized chunks are used. The content
|
||||||
@ -58,10 +58,10 @@ often tries to allocate files in contiguous pieces, so new files get new
|
|||||||
blocks, and changing existing files changes only their own blocks.
|
blocks, and changing existing files changes only their own blocks.
|
||||||
|
|
||||||
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
||||||
can track the changed blocks of an image. Since these bitmap are also a
|
can track the changed blocks of an image. Since these bitmaps are also a
|
||||||
representation of the image split into chunks, there is a direct relation
|
representation of the image split into chunks, there is a direct relation
|
||||||
between dirty blocks of the image and chunks which need to get uploaded, so
|
between the dirty blocks of the image and chunks which need to be uploaded.
|
||||||
only modified chunks of the disk have to be uploaded for a backup.
|
Thus, only modified chunks of the disk need to be uploaded to a backup.
|
||||||
|
|
||||||
Since the image is always split into chunks of the same size, unchanged blocks
|
Since the image is always split into chunks of the same size, unchanged blocks
|
||||||
will result in identical checksums for those chunks, so such chunks do not need
|
will result in identical checksums for those chunks, so such chunks do not need
|
||||||
@ -71,24 +71,24 @@ changed blocks.
|
|||||||
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
||||||
does not rely on storage snapshots either.
|
does not rely on storage snapshots either.
|
||||||
|
|
||||||
Dynamically sized Chunks
|
Dynamically Sized Chunks
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If one does not want to backup block-based systems but rather file-based
|
When working with file-based systems rather than block-based systems,
|
||||||
systems, using fixed-sized chunks is not a good idea, since every time a file
|
using fixed-sized chunks is not a good idea, since every time a file
|
||||||
would change in size, the remaining data gets shifted around and this would
|
would change in size, the remaining data would be shifted around,
|
||||||
result in many chunks changing, reducing the amount of deduplication.
|
resulting in many chunks changing and the amount of deduplication being reduced.
|
||||||
|
|
||||||
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
||||||
instead. Instead of splitting an image into fixed sizes, it first generates a
|
instead. Instead of splitting an image into fixed sizes, it first generates a
|
||||||
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
|
||||||
over this on-the-fly generated archive to calculate chunk boundaries.
|
over this on-the-fly generated archive to calculate chunk boundaries.
|
||||||
|
|
||||||
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
||||||
by continuously calculating a checksum while iterating over the data, and on
|
by continuously calculating a checksum while iterating over the data, and on
|
||||||
certain conditions it triggers a hash boundary.
|
certain conditions, it triggers a hash boundary.
|
||||||
|
|
||||||
Assuming that most files of the system that is to be backed up have not
|
Assuming that most files on the system that is to be backed up have not
|
||||||
changed, eventually the algorithm triggers the boundary on the same data as a
|
changed, eventually the algorithm triggers the boundary on the same data as a
|
||||||
previous backup, resulting in chunks that can be reused.
|
previous backup, resulting in chunks that can be reused.
|
||||||
|
|
||||||
@ -100,8 +100,8 @@ can be encrypted, and they are handled in a slightly different manner than
|
|||||||
normal chunks.
|
normal chunks.
|
||||||
|
|
||||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
chunk content, but with the plain-text content concatenated with the encryption
|
chunk content, but with the plain-text content, concatenated with the encryption
|
||||||
key. This way, two chunks of the same data encrypted with different keys
|
key. This way, two chunks with the same data but encrypted with different keys
|
||||||
generate two different checksums and no collisions occur for multiple
|
generate two different checksums and no collisions occur for multiple
|
||||||
encryption keys.
|
encryption keys.
|
||||||
|
|
||||||
@ -112,14 +112,14 @@ the previous backup, do not need to be encrypted and uploaded.
|
|||||||
Caveats and Limitations
|
Caveats and Limitations
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
Notes on hash collisions
|
Notes on Hash Collisions
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
||||||
more) inputs generate the same checksum. For SHA-256, this chance is
|
more) inputs generate the same checksum. For SHA-256, this chance is
|
||||||
negligible. To calculate such a collision, one can use the ideas of the
|
negligible. To calculate the chances of such a collision, one can use the ideas
|
||||||
'birthday problem' from probability theory. For big numbers, this is actually
|
of the 'birthday problem' from probability theory. For big numbers, this is
|
||||||
infeasible to calculate with regular computers, but there is a good
|
actually unfeasible to calculate with regular computers, but there is a good
|
||||||
approximation:
|
approximation:
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
@ -127,7 +127,7 @@ approximation:
|
|||||||
p(n, d) = 1 - e^{-n^2/(2d)}
|
p(n, d) = 1 - e^{-n^2/(2d)}
|
||||||
|
|
||||||
Where `n` is the number of tries, and `d` is the number of possibilities.
|
Where `n` is the number of tries, and `d` is the number of possibilities.
|
||||||
For a concrete example lets assume a large datastore of 1 PiB, and an average
|
For a concrete example, lets assume a large datastore of 1 PiB and an average
|
||||||
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
||||||
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
||||||
will see that the probability of a collision in that scenario is:
|
will see that the probability of a collision in that scenario is:
|
||||||
@ -136,94 +136,96 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
3.1115 * 10^{-61}
|
3.1115 * 10^{-61}
|
||||||
|
|
||||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||||
a collision is about the same as winning 13 such lotto games *in a row*.
|
chance of a collision is about the same as winning 13 such lottery games *in a
|
||||||
|
row*.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
|
|
||||||
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
||||||
an upper limit for how big the chunk are, this is not a problem, since a
|
an upper limit for how big the chunks are, this is not a problem, because a
|
||||||
potential attacker cannot arbitrarily add content to the data beyond that
|
potential attacker cannot arbitrarily add content to the data beyond that
|
||||||
limit.
|
limit.
|
||||||
|
|
||||||
File-based Backup
|
File-Based Backup
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Since dynamically sized chunks (for file-based backups) are created on a custom
|
Since dynamically sized chunks (for file-based backups) are created on a custom
|
||||||
archive format (pxar) and not over the files directly, there is no relation
|
archive format (pxar) and not over the files directly, there is no relation
|
||||||
between files and the chunks. This means that the Proxmox Backup client has to
|
between the files and chunks. This means that the Proxmox Backup Client has to
|
||||||
read all files again for every backup, otherwise it would not be possible to
|
read all files again for every backup, otherwise it would not be possible to
|
||||||
generate a consistent independent pxar archive where the original chunks can be
|
generate a consistent, independent pxar archive where the original chunks can be
|
||||||
reused. Note that there will be still only new or change chunks be uploaded.
|
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
||||||
|
|
||||||
Verification of encrypted chunks
|
Verification of Encrypted Chunks
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||||
available, making it impossible for the server (without the encryption key), to
|
available, making it impossible for the server (without the encryption key) to
|
||||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Index files(.fidx, .didx) contain information about how to rebuild a file, more
|
Index files(*.fidx*, *.didx*) contain information about how to rebuild a file.
|
||||||
precisely, they contain an ordered list of references to the chunks the original
|
More precisely, they contain an ordered list of references to the chunks that
|
||||||
file was split up in. If there is something wrong with a snapshot it might be
|
the original file was split into. If there is something wrong with a snapshot,
|
||||||
useful to find out which chunks are referenced in this specific snapshot, and
|
it might be useful to find out which chunks are referenced in it, and check
|
||||||
check wheather all of them are present and intact. The command for getting the
|
whether they are present and intact. The ``proxmox-backup-debug`` command line
|
||||||
list of referenced chunks could look something like this:
|
tool can be used to inspect such files and recover their contents. For example,
|
||||||
|
to get a list of the referenced chunks of a *.fidx* index:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
||||||
|
|
||||||
The same command can be used to look at .blob file, without ``--decode`` just
|
The same command can be used to inspect *.blob* files. Without the ``--decode``
|
||||||
the size and the encryption type, if any, is printed. If ``--decode`` is set the
|
parameter, just the size and the encryption type, if any, are printed. If
|
||||||
blob file is decoded into the specified file('-' will decode it directly into
|
``--decode`` is set, the blob file is decoded into the specified file ('-' will
|
||||||
stdout).
|
decode it directly to stdout).
|
||||||
|
|
||||||
|
The following example would print the decoded contents of
|
||||||
|
`qemu-server.conf.blob`. If the file you're trying to inspect is encrypted, a
|
||||||
|
path to the key file must be provided using ``--keyfile``.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
||||||
|
|
||||||
would print the decoded contents of `qemu-server.conf.blob`. If the file you're
|
You can also check in which index files a specific chunk file is referenced
|
||||||
trying to inspect is encrypted, a path to the keyfile has to be provided using
|
|
||||||
``--keyfile``.
|
|
||||||
|
|
||||||
Checking in which index files a specific chunk file is referenced can be done
|
|
||||||
with:
|
with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
||||||
|
|
||||||
Here ``--reference-filter`` specifies where index files should be searched, this
|
Here ``--reference-filter`` specifies where index files should be searched. This
|
||||||
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
||||||
changed you can explicitly specify the digest using ``--digest``, by default the
|
changed, you can explicitly specify the digest using ``--digest``. By default, the
|
||||||
chunk filename is used as the digest to look for. Specifying no
|
chunk filename is used as the digest to look for. If no ``--reference-filter``
|
||||||
``--reference-filter`` will just print the CRC and encryption status of the
|
is specified, it will only print the CRC and encryption status of the chunk. You
|
||||||
chunk. You can also decode chunks, to do so ``--decode`` has to be set. If the
|
can also decode chunks, by setting the ``--decode`` flag. If the chunk is
|
||||||
chunk is encrypted a ``--keyfile`` has to be provided for decoding.
|
encrypted, a ``--keyfile`` must be provided, in order to decode it.
|
||||||
|
|
||||||
Restore without a running PBS
|
Restore without a Running Proxmox Backup Server
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
It is possible to restore spefiic files of snapshots without a running PBS using
|
It's possible to restore specific files from a snapshot, without a running
|
||||||
the `recover` sub-command, provided you have access to the intact index and
|
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
|
||||||
chunk files. Note that you also need the corresponding key file if the backup
|
have access to the intact index and chunk files. Note that you also need the
|
||||||
was encrypted.
|
corresponding key file if the backup was encrypted.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
||||||
|
|
||||||
In above example the `/path/to/.chunks` argument is the path to the directory
|
In the above example, the `/path/to/.chunks` argument is the path to the
|
||||||
that contains contains the chunks, and `drive-scsi0.img.fidx` is the index-file
|
directory that contains the chunks, and `drive-scsi0.img.fidx` is the index file
|
||||||
of the file you'd lile to restore. Both paths can be absolute or relative. With
|
of the file you'd like to restore. Both paths can be absolute or relative. With
|
||||||
``--skip-crc`` it is possible to disable the crc checks of the chunks, this will
|
``--skip-crc``, it's possible to disable the CRC checks of the chunks. This
|
||||||
speed up the process slightly and allows for trying to restore (partially)
|
will speed up the process slightly and allow for trying to restore (partially)
|
||||||
corrupt chunks. It's recommended to always try without the skip-CRC option
|
corrupt chunks. It's recommended to always try without the skip-CRC option
|
||||||
first.
|
first.
|
||||||
|
|
||||||
|
@ -41,23 +41,23 @@ Binary Data (BLOBs)
|
|||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This type is used to store smaller (< 16MB) binary data such as
|
This type is used to store smaller (< 16MB) binary data such as
|
||||||
configuration files. Larger files should be stored as image archive.
|
configuration files. Larger files should be stored as image archives.
|
||||||
|
|
||||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||||
file archive to store whole directory trees.
|
file archive to store entire directory trees.
|
||||||
|
|
||||||
|
|
||||||
Catalog File: ``catalog.pcat1``
|
Catalog File: ``catalog.pcat1``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The catalog file is an index for file archives. It contains
|
The catalog file is an index for file archives. It contains
|
||||||
the list of files and is used to speed up search operations.
|
the list of included files and is used to speed up search operations.
|
||||||
|
|
||||||
|
|
||||||
The Manifest: ``index.json``
|
The Manifest: ``index.json``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The manifest contains the list of all backup files, their
|
The manifest contains a list of all backed up files, and their
|
||||||
sizes and checksums. It is used to verify the consistency of a
|
sizes and checksums. It is used to verify the consistency of a
|
||||||
backup.
|
backup.
|
||||||
|
|
||||||
@ -68,18 +68,19 @@ Backup Type
|
|||||||
The backup server groups backups by *type*, where *type* is one of:
|
The backup server groups backups by *type*, where *type* is one of:
|
||||||
|
|
||||||
``vm``
|
``vm``
|
||||||
This type is used for :term:`virtual machine`\ s. Typically
|
This type is used for :term:`virtual machine`\ s. It typically
|
||||||
consists of the virtual machine's configuration file and an image archive
|
consists of the virtual machine's configuration file and an image archive
|
||||||
for each disk.
|
for each disk.
|
||||||
|
|
||||||
``ct``
|
``ct``
|
||||||
This type is used for :term:`container`\ s. Consists of the container's
|
This type is used for :term:`container`\ s. It consists of the container's
|
||||||
configuration and a single file archive for the filesystem content.
|
configuration and a single file archive for the filesystem's contents.
|
||||||
|
|
||||||
``host``
|
``host``
|
||||||
This type is used for backups created from within the backed up machine.
|
This type is used for file/directory backups created from within a machine.
|
||||||
Typically this would be a physical host but could also be a virtual machine
|
Typically this would be a physical host, but could also be a virtual machine
|
||||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
or container. Such backups may contain file and image archives; there are no
|
||||||
|
restrictions in this regard.
|
||||||
|
|
||||||
|
|
||||||
Backup ID
|
Backup ID
|
||||||
|
@ -15,17 +15,19 @@ Proxmox Backup Server supports several authentication realms, and you need to
|
|||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as Linux system user (Users need to exist on the
|
authenticate as a Linux system user (users need to exist on the
|
||||||
system).
|
system).
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
``/etc/proxmox-backup/shadow.json``.
|
``/etc/proxmox-backup/shadow.json``.
|
||||||
|
|
||||||
After installation, there is a single user ``root@pam``, which
|
:openid: OpenID Connect server. Users can authenticate against an external
|
||||||
corresponds to the Unix superuser. User configuration information is stored in the file
|
OpenID Connect server.
|
||||||
``/etc/proxmox-backup/user.cfg``. You can use the
|
|
||||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
After installation, there is a single user, ``root@pam``, which corresponds to
|
||||||
users:
|
the Unix superuser. User configuration information is stored in the file
|
||||||
|
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
|
||||||
|
command line tool to list or manipulate users:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -40,13 +42,13 @@ users:
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
The superuser has full administration rights on everything, so you
|
The superuser has full administration rights on everything, so it's recommended
|
||||||
normally want to add other users with less privileges. You can add a new
|
to add other users with less privileges. You can add a new
|
||||||
user with the ``user create`` subcommand or through the web
|
user with the ``user create`` subcommand or through the web
|
||||||
interface, under the **User Management** tab of **Configuration -> Access
|
interface, under the **User Management** tab of **Configuration -> Access
|
||||||
Control**. The ``create`` subcommand lets you specify many options like
|
Control**. The ``create`` subcommand lets you specify many options like
|
||||||
``--email`` or ``--password``. You can update or change any user properties
|
``--email`` or ``--password``. You can update or change any user properties
|
||||||
using the ``update`` subcommand later (**Edit** in the GUI):
|
using the ``user update`` subcommand later (**Edit** in the GUI):
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -71,16 +73,16 @@ The resulting user list looks like this:
|
|||||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
Newly created users do not have any permissions. Please read the Access Control
|
Newly created users do not have any permissions. Please read the :ref:`user_acl`
|
||||||
section to learn how to set access permissions.
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
You can disable a user account by setting ``--enable`` to ``0``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user update john@pbs --enable 0
|
# proxmox-backup-manager user update john@pbs --enable 0
|
||||||
|
|
||||||
Or completely remove the user with:
|
Or completely remove a user with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -95,7 +97,7 @@ API Tokens
|
|||||||
:align: right
|
:align: right
|
||||||
:alt: API Token Overview
|
:alt: API Token Overview
|
||||||
|
|
||||||
Any authenticated user can generate API tokens which can in turn be used to
|
Any authenticated user can generate API tokens, which can in turn be used to
|
||||||
configure various clients, instead of directly providing the username and
|
configure various clients, instead of directly providing the username and
|
||||||
password.
|
password.
|
||||||
|
|
||||||
@ -117,7 +119,7 @@ The API token is passed from the client to the server by setting the
|
|||||||
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
||||||
``TOKENID:TOKENSECRET``.
|
``TOKENID:TOKENSECRET``.
|
||||||
|
|
||||||
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
|
You can generate tokens from the GUI or by using ``proxmox-backup-manager``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -154,9 +156,9 @@ section to learn how to set access permissions.
|
|||||||
Access Control
|
Access Control
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
By default new users and API tokens do not have any permission. Instead you
|
By default, new users and API tokens do not have any permissions. Instead you
|
||||||
need to specify what is allowed and what is not. You can do this by assigning
|
need to specify what is allowed and what is not. You can do this by assigning
|
||||||
roles to users/tokens on specific objects like datastores or remotes. The
|
roles to users/tokens on specific objects, like datastores or remotes. The
|
||||||
following roles exist:
|
following roles exist:
|
||||||
|
|
||||||
**NoAccess**
|
**NoAccess**
|
||||||
@ -176,7 +178,7 @@ following roles exist:
|
|||||||
is not allowed to read the actual data.
|
is not allowed to read the actual data.
|
||||||
|
|
||||||
**DatastoreReader**
|
**DatastoreReader**
|
||||||
Can Inspect datastore content and can do restores.
|
Can Inspect datastore content and do restores.
|
||||||
|
|
||||||
**DatastoreBackup**
|
**DatastoreBackup**
|
||||||
Can backup and restore owned backups.
|
Can backup and restore owned backups.
|
||||||
@ -193,6 +195,18 @@ following roles exist:
|
|||||||
**RemoteSyncOperator**
|
**RemoteSyncOperator**
|
||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
|
**TapeAudit**
|
||||||
|
Can view tape related configuration and status
|
||||||
|
|
||||||
|
**TapeAdministrat**
|
||||||
|
Can do anything related to tape backup
|
||||||
|
|
||||||
|
**TapeOperator**
|
||||||
|
Can do tape backup and restore (but no configuration changes)
|
||||||
|
|
||||||
|
**TapeReader**
|
||||||
|
Can read and inspect tape configuration and media content
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Add permissions for user
|
:alt: Add permissions for user
|
||||||
@ -236,7 +250,8 @@ You can list the ACLs of each user/token using the following command:
|
|||||||
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
||||||
└──────────┴───────────────────┴───────────┴────────────────┘
|
└──────────┴───────────────────┴───────────┴────────────────┘
|
||||||
|
|
||||||
A single user/token can be assigned multiple permission sets for different datastores.
|
A single user/token can be assigned multiple permission sets for different
|
||||||
|
datastores.
|
||||||
|
|
||||||
.. Note::
|
.. Note::
|
||||||
Naming convention is important here. For datastores on the host,
|
Naming convention is important here. For datastores on the host,
|
||||||
@ -247,11 +262,11 @@ A single user/token can be assigned multiple permission sets for different datas
|
|||||||
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||||
the remote.
|
the remote.
|
||||||
|
|
||||||
API Token permissions
|
API Token Permissions
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
API token permissions are calculated based on ACLs containing their ID
|
API token permissions are calculated based on ACLs containing their ID,
|
||||||
independent of those of their corresponding user. The resulting permission set
|
independently of those of their corresponding user. The resulting permission set
|
||||||
on a given path is then intersected with that of the corresponding user.
|
on a given path is then intersected with that of the corresponding user.
|
||||||
|
|
||||||
In practice this means:
|
In practice this means:
|
||||||
@ -259,17 +274,17 @@ In practice this means:
|
|||||||
#. API tokens require their own ACL entries
|
#. API tokens require their own ACL entries
|
||||||
#. API tokens can never do more than their corresponding user
|
#. API tokens can never do more than their corresponding user
|
||||||
|
|
||||||
Effective permissions
|
Effective Permissions
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To calculate and display the effective permission set of a user or API token
|
To calculate and display the effective permission set of a user or API token,
|
||||||
you can use the ``proxmox-backup-manager user permission`` command:
|
you can use the ``proxmox-backup-manager user permission`` command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
|
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
|
||||||
Privileges with (*) have the propagate flag set
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
Path: /datastore/store1
|
Path: /datastore/store1
|
||||||
- Datastore.Audit (*)
|
- Datastore.Audit (*)
|
||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
@ -277,17 +292,17 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
|||||||
- Datastore.Prune (*)
|
- Datastore.Prune (*)
|
||||||
- Datastore.Read (*)
|
- Datastore.Read (*)
|
||||||
- Datastore.Verify (*)
|
- Datastore.Verify (*)
|
||||||
|
|
||||||
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
|
||||||
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
|
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
|
||||||
Privileges with (*) have the propagate flag set
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
Path: /datastore/store1
|
Path: /datastore/store1
|
||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
|
|
||||||
.. _user_tfa:
|
.. _user_tfa:
|
||||||
|
|
||||||
Two-factor authentication
|
Two-Factor Authentication
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
@ -296,7 +311,7 @@ Introduction
|
|||||||
With simple authentication, only a password (single factor) is required to
|
With simple authentication, only a password (single factor) is required to
|
||||||
successfully claim an identity (authenticate), for example, to be able to log in
|
successfully claim an identity (authenticate), for example, to be able to log in
|
||||||
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||||
the password gets stolen or leaked, anybody can use it to log in - even if they
|
the password gets leaked or stolen, anybody can use it to log in - even if they
|
||||||
should not be allowed to do so.
|
should not be allowed to do so.
|
||||||
|
|
||||||
With two-factor authentication (TFA), a user is asked for an additional factor
|
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||||
@ -359,16 +374,18 @@ WebAuthn
|
|||||||
|
|
||||||
For WebAuthn to work, you need to have two things:
|
For WebAuthn to work, you need to have two things:
|
||||||
|
|
||||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
* A trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||||
While it probably works with an untrusted certificate, some browsers may warn
|
While it probably works with an untrusted certificate, some browsers may warn
|
||||||
or refuse WebAuthn operations if it is not trusted.
|
or refuse WebAuthn operations if it is not trusted.
|
||||||
|
|
||||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
* Setup the WebAuthn configuration (see **Configuration -> Authentication** in
|
||||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
the Proxmox Backup Server web interface). This can be auto-filled in most
|
||||||
|
setups.
|
||||||
|
|
||||||
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||||
configuration in the *Access Control* panel.
|
configuration in the **Two Factor Authentication** tab of the **Access Control**
|
||||||
|
panel.
|
||||||
|
|
||||||
.. _user_tfa_setup_recovery_keys:
|
.. _user_tfa_setup_recovery_keys:
|
||||||
|
|
||||||
@ -380,7 +397,8 @@ Recovery Keys
|
|||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
Recovery key codes do not need any preparation; you can simply create a set of
|
Recovery key codes do not need any preparation; you can simply create a set of
|
||||||
recovery keys in the *Access Control* panel.
|
recovery keys in the **Two Factor Authentication** tab of the **Access Control**
|
||||||
|
panel.
|
||||||
|
|
||||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||||
time.
|
time.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
// chacha20-poly1305
|
// chacha20-poly1305
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox::api::{*, cli::*};
|
use proxmox_schema::*;
|
||||||
|
use proxmox_router::cli::*;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
||||||
@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||||
|
|
||||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
// tar handle files that shrink during backup, by simply padding with zeros.
|
// tar handle files that shrink during backup, by simply padding with zeros.
|
||||||
//
|
//
|
||||||
// this binary run multiple thread which writes some large files, then truncates
|
// this binary run multiple thread which writes some large files, then truncates
|
||||||
|
@ -16,7 +16,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
|
|
||||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::epoch_i64();
|
let backup_time = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||||
|
|
||||||
|
@ -14,7 +14,12 @@ openssl = "0.10"
|
|||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
proxmox = "0.14.0"
|
||||||
|
proxmox-lang = "1.0.0"
|
||||||
|
proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] }
|
||||||
|
proxmox-time = "1.0.0"
|
||||||
|
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
|
||||||
|
|
||||||
|
proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" }
|
||||||
proxmox-systemd = { path = "../proxmox-systemd" }
|
proxmox-systemd = { path = "../proxmox-systemd" }
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde::de::{value, IntoDeserializer};
|
use serde::de::{value, IntoDeserializer};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_lang::constnamedbitmap;
|
||||||
use proxmox::api::schema::{
|
use proxmox_schema::{
|
||||||
ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||||
};
|
};
|
||||||
use proxmox::{constnamedbitmap, const_regex};
|
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
@ -76,7 +75,7 @@ constnamedbitmap! {
|
|||||||
|
|
||||||
/// Admin always has all privileges. It can do everything except a few actions
|
/// Admin always has all privileges. It can do everything except a few actions
|
||||||
/// which are limited to the 'root@pam` superuser
|
/// which are limited to the 'root@pam` superuser
|
||||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
@ -222,7 +221,6 @@ pub enum Role {
|
|||||||
TapeReader = ROLE_TAPE_READER,
|
TapeReader = ROLE_TAPE_READER,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl FromStr for Role {
|
impl FromStr for Role {
|
||||||
type Err = value::Error;
|
type Err = value::Error;
|
||||||
|
|
||||||
@ -231,26 +229,24 @@ impl FromStr for Role {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||||
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
|
||||||
|
|
||||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||||
"Access control path.")
|
|
||||||
.format(&ACL_PATH_FORMAT)
|
.format(&ACL_PATH_FORMAT)
|
||||||
.min_length(1)
|
.min_length(1)
|
||||||
.max_length(128)
|
.max_length(128)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||||
"Allow to propagate (inherit) permissions.")
|
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||||
.default(true)
|
.default(true)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||||
"Type of 'ugid' property.")
|
|
||||||
.format(&ApiStringFormat::Enum(&[
|
.format(&ApiStringFormat::Enum(&[
|
||||||
EnumEntry::new("user", "User"),
|
EnumEntry::new("user", "User"),
|
||||||
EnumEntry::new("group", "Group")]))
|
EnumEntry::new("group", "Group"),
|
||||||
|
]))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -3,7 +3,7 @@ use std::fmt::{self, Display};
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
||||||
|
|
||||||
|
@ -1,13 +1,10 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::{
|
||||||
use proxmox::api::schema::{
|
api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
|
||||||
ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema,
|
Schema, StringSchema, Updater,
|
||||||
StringSchema, Updater,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
||||||
Fingerprint, Userid, Authid,
|
Fingerprint, Userid, Authid,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
use proxmox_schema::*;
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||||
|
|
||||||
|
@ -3,9 +3,9 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::{
|
||||||
use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema};
|
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
|
||||||
use proxmox::const_regex;
|
};
|
||||||
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
@ -60,8 +60,7 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN
|
|||||||
mod user;
|
mod user;
|
||||||
pub use user::*;
|
pub use user::*;
|
||||||
|
|
||||||
pub mod upid;
|
pub use proxmox_schema::upid::*;
|
||||||
pub use upid::*;
|
|
||||||
|
|
||||||
mod crypto;
|
mod crypto;
|
||||||
pub use crypto::{CryptMode, Fingerprint};
|
pub use crypto::{CryptMode, Fingerprint};
|
||||||
@ -359,33 +358,6 @@ pub struct APTUpdateInfo {
|
|||||||
pub extra_info: Option<String>,
|
pub extra_info: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "UPPERCASE")]
|
|
||||||
pub enum RRDMode {
|
|
||||||
/// Maximum
|
|
||||||
Max,
|
|
||||||
/// Average
|
|
||||||
Average,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[repr(u64)]
|
|
||||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum RRDTimeFrameResolution {
|
|
||||||
/// 1 min => last 70 minutes
|
|
||||||
Hour = 60,
|
|
||||||
/// 30 min => last 35 hours
|
|
||||||
Day = 60*30,
|
|
||||||
/// 3 hours => about 8 days
|
|
||||||
Week = 60*180,
|
|
||||||
/// 12 hours => last 35 days
|
|
||||||
Month = 60*720,
|
|
||||||
/// 1 week => last 490 days
|
|
||||||
Year = 60*10080,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
@ -397,3 +369,59 @@ pub enum NodePowerCommand {
|
|||||||
/// Shutdown the server
|
/// Shutdown the server
|
||||||
Shutdown,
|
Shutdown,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum TaskStateType {
|
||||||
|
/// Ok
|
||||||
|
OK,
|
||||||
|
/// Warning
|
||||||
|
Warning,
|
||||||
|
/// Error
|
||||||
|
Error,
|
||||||
|
/// Unknown
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: { schema: UPID::API_SCHEMA },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct TaskListItem {
|
||||||
|
pub upid: String,
|
||||||
|
/// The node name where the task is running on.
|
||||||
|
pub node: String,
|
||||||
|
/// The Unix PID
|
||||||
|
pub pid: i64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub pstart: u64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub starttime: i64,
|
||||||
|
/// Worker type (arbitrary ASCII string)
|
||||||
|
pub worker_type: String,
|
||||||
|
/// Worker ID (arbitrary ASCII string)
|
||||||
|
pub worker_id: Option<String>,
|
||||||
|
/// The authenticated entity who started the task
|
||||||
|
pub user: String,
|
||||||
|
/// The task end time (Epoch)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub endtime: Option<i64>,
|
||||||
|
/// Task end status
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub status: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"A list of tasks.",
|
||||||
|
&TaskListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub use proxmox_rrd_api_types::{RRDMode, RRDTimeFrameResolution};
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox_schema::*;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_REGEX,
|
PROXMOX_SAFE_ID_REGEX,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox_schema::*;
|
||||||
|
|
||||||
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
|
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
|
||||||
.format(&PASSWORD_FORMAT)
|
.format(&PASSWORD_FORMAT)
|
||||||
|
@ -2,22 +2,11 @@
|
|||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{
|
||||||
api,
|
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||||
schema::{
|
|
||||||
Schema,
|
|
||||||
ApiStringFormat,
|
|
||||||
ArraySchema,
|
|
||||||
IntegerSchema,
|
|
||||||
StringSchema,
|
|
||||||
Updater,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
OptionalDeviceIdentification,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
@ -25,9 +14,8 @@ pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifi
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
||||||
"Path to Linux generic SCSI device (e.g. '/dev/sg4')")
|
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
@ -36,16 +24,18 @@ pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
|||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
"Slot list.", &IntegerSchema::new("Slot number")
|
"Slot list.",
|
||||||
.minimum(1)
|
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
||||||
.schema())
|
)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new("\
|
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"\
|
||||||
A list of slot numbers, comma separated. Those slots are reserved for
|
A list of slot numbers, comma separated. Those slots are reserved for
|
||||||
Import/Export, i.e. any media in those slots are considered to be
|
Import/Export, i.e. any media in those slots are considered to be
|
||||||
'offline'.
|
'offline'.
|
||||||
")
|
",
|
||||||
|
)
|
||||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
@ -63,14 +53,14 @@ Import/Export, i.e. any media in those slots are considered to be
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Updater)]
|
#[derive(Serialize, Deserialize, Updater)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// SCSI tape changer
|
/// SCSI tape changer
|
||||||
pub struct ScsiTapeChanger {
|
pub struct ScsiTapeChanger {
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub export_slots: Option<String>,
|
pub export_slots: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +74,7 @@ pub struct ScsiTapeChanger {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Changer config with optional device identification attributes
|
/// Changer config with optional device identification attributes
|
||||||
pub struct ChangerListEntry {
|
pub struct ChangerListEntry {
|
||||||
@ -95,7 +85,7 @@ pub struct ChangerListEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Mtx Entry Kind
|
/// Mtx Entry Kind
|
||||||
pub enum MtxEntryKind {
|
pub enum MtxEntryKind {
|
||||||
@ -118,7 +108,7 @@ pub enum MtxEntryKind {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Mtx Status Entry
|
/// Mtx Status Entry
|
||||||
pub struct MtxStatusEntry {
|
pub struct MtxStatusEntry {
|
||||||
@ -126,12 +116,12 @@ pub struct MtxStatusEntry {
|
|||||||
/// The ID of the slot or drive
|
/// The ID of the slot or drive
|
||||||
pub entry_id: u64,
|
pub entry_id: u64,
|
||||||
/// The media label (volume tag) if the slot/drive is full
|
/// The media label (volume tag) if the slot/drive is full
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub label_text: Option<String>,
|
pub label_text: Option<String>,
|
||||||
/// The slot the drive was loaded from
|
/// The slot the drive was loaded from
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub loaded_slot: Option<u64>,
|
pub loaded_slot: Option<u64>,
|
||||||
/// The current state of the drive
|
/// The current state of the drive
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub state: Option<String>,
|
pub state: Option<String>,
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
|
@ -4,10 +4,7 @@ use std::convert::TryFrom;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
|
||||||
api,
|
|
||||||
schema::{Schema, IntegerSchema, StringSchema, Updater},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::*;
|
||||||
api::{api, schema::*},
|
use proxmox_uuid::Uuid;
|
||||||
tools::Uuid,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
UUID_FORMAT,
|
UUID_FORMAT,
|
||||||
|
@ -1,18 +1,8 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{parse_simple_value, ApiStringFormat, Schema, StringSchema};
|
||||||
schema::{
|
|
||||||
Schema,
|
|
||||||
StringSchema,
|
|
||||||
ApiStringFormat,
|
|
||||||
parse_simple_value,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
CHANGER_NAME_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
@ -35,28 +25,27 @@ pub enum MediaLocation {
|
|||||||
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
||||||
proxmox::forward_serialize_to_display!(MediaLocation);
|
proxmox::forward_serialize_to_display!(MediaLocation);
|
||||||
|
|
||||||
impl proxmox::api::schema::ApiType for MediaLocation {
|
impl proxmox_schema::ApiType for MediaLocation {
|
||||||
const API_SCHEMA: Schema = StringSchema::new(
|
const API_SCHEMA: Schema = StringSchema::new(
|
||||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
||||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
)
|
||||||
let location: MediaLocation = text.parse()?;
|
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||||
match location {
|
let location: MediaLocation = text.parse()?;
|
||||||
MediaLocation::Online(ref changer) => {
|
match location {
|
||||||
parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?;
|
MediaLocation::Online(ref changer) => {
|
||||||
}
|
parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?;
|
||||||
MediaLocation::Vault(ref vault) => {
|
|
||||||
parse_simple_value(vault, &VAULT_NAME_SCHEMA)?;
|
|
||||||
}
|
|
||||||
MediaLocation::Offline => { /* OK */}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
MediaLocation::Vault(ref vault) => {
|
||||||
}))
|
parse_simple_value(vault, &VAULT_NAME_SCHEMA)?;
|
||||||
.schema();
|
}
|
||||||
|
MediaLocation::Offline => { /* OK */ }
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}))
|
||||||
|
.schema();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl std::fmt::Display for MediaLocation {
|
impl std::fmt::Display for MediaLocation {
|
||||||
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
MediaLocation::Offline => {
|
MediaLocation::Offline => {
|
||||||
|
@ -9,10 +9,7 @@ use std::str::FromStr;
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater};
|
||||||
api,
|
|
||||||
schema::{Schema, StringSchema, ApiStringFormat, Updater},
|
|
||||||
};
|
|
||||||
|
|
||||||
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
/// Media status
|
/// Media status
|
||||||
|
@ -22,13 +22,10 @@ pub use media_location::*;
|
|||||||
mod media;
|
mod media;
|
||||||
pub use media::*;
|
pub use media::*;
|
||||||
|
|
||||||
use ::serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat};
|
||||||
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat};
|
use proxmox_uuid::Uuid;
|
||||||
use proxmox::tools::Uuid;
|
|
||||||
|
|
||||||
use proxmox::const_regex;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
|
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||||
|
@ -1,203 +0,0 @@
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox::api::api;
|
|
||||||
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType};
|
|
||||||
use proxmox::const_regex;
|
|
||||||
use proxmox::sys::linux::procfs;
|
|
||||||
|
|
||||||
use crate::Authid;
|
|
||||||
|
|
||||||
/// Unique Process/Task Identifier
|
|
||||||
///
|
|
||||||
/// We use this to uniquely identify worker task. UPIDs have a short
|
|
||||||
/// string repesentaion, which gives additional information about the
|
|
||||||
/// type of the task. for example:
|
|
||||||
/// ```text
|
|
||||||
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}:
|
|
||||||
/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam:
|
|
||||||
/// ```
|
|
||||||
/// Please note that we use tokio, so a single thread can run multiple
|
|
||||||
/// tasks.
|
|
||||||
// #[api] - manually implemented API type
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct UPID {
|
|
||||||
/// The Unix PID
|
|
||||||
pub pid: libc::pid_t,
|
|
||||||
/// The Unix process start time from `/proc/pid/stat`
|
|
||||||
pub pstart: u64,
|
|
||||||
/// The task start time (Epoch)
|
|
||||||
pub starttime: i64,
|
|
||||||
/// The task ID (inside the process/thread)
|
|
||||||
pub task_id: usize,
|
|
||||||
/// Worker type (arbitrary ASCII string)
|
|
||||||
pub worker_type: String,
|
|
||||||
/// Worker ID (arbitrary ASCII string)
|
|
||||||
pub worker_id: Option<String>,
|
|
||||||
/// The authenticated entity who started the task
|
|
||||||
pub auth_id: Authid,
|
|
||||||
/// The node name.
|
|
||||||
pub node: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
proxmox::forward_serialize_to_display!(UPID);
|
|
||||||
proxmox::forward_deserialize_to_from_str!(UPID);
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
pub PROXMOX_UPID_REGEX = concat!(
|
|
||||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
|
||||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
|
||||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const PROXMOX_UPID_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX);
|
|
||||||
|
|
||||||
pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier")
|
|
||||||
.min_length("UPID:N:12345678:12345678:12345678:::".len())
|
|
||||||
.max_length(128) // arbitrary
|
|
||||||
.format(&PROXMOX_UPID_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
impl ApiType for UPID {
|
|
||||||
const API_SCHEMA: Schema = UPID_SCHEMA;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UPID {
|
|
||||||
/// Create a new UPID
|
|
||||||
pub fn new(
|
|
||||||
worker_type: &str,
|
|
||||||
worker_id: Option<String>,
|
|
||||||
auth_id: Authid,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
let pid = unsafe { libc::getpid() };
|
|
||||||
|
|
||||||
let bad: &[_] = &['/', ':', ' '];
|
|
||||||
|
|
||||||
if worker_type.contains(bad) {
|
|
||||||
bail!("illegal characters in worker type '{}'", worker_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
|
|
||||||
let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
|
||||||
|
|
||||||
Ok(UPID {
|
|
||||||
pid,
|
|
||||||
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
|
||||||
starttime: proxmox::tools::time::epoch_i64(),
|
|
||||||
task_id,
|
|
||||||
worker_type: worker_type.to_owned(),
|
|
||||||
worker_id,
|
|
||||||
auth_id,
|
|
||||||
node: proxmox::tools::nodename().to_owned(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl std::str::FromStr for UPID {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
|
||||||
|
|
||||||
let worker_id = if cap["wid"].is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let wid = proxmox_systemd::unescape_unit(&cap["wid"])?;
|
|
||||||
Some(wid)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(UPID {
|
|
||||||
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
|
||||||
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
|
||||||
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
|
||||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
|
||||||
worker_type: cap["wtype"].to_string(),
|
|
||||||
worker_id,
|
|
||||||
auth_id: cap["authid"].parse()?,
|
|
||||||
node: cap["node"].to_string(),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
bail!("unable to parse UPID '{}'", s);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for UPID {
|
|
||||||
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
|
|
||||||
let wid = if let Some(ref id) = self.worker_id {
|
|
||||||
proxmox_systemd::escape_unit(id, false)
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
|
||||||
// more that 8 characters for pstart
|
|
||||||
|
|
||||||
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
|
||||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum TaskStateType {
|
|
||||||
/// Ok
|
|
||||||
OK,
|
|
||||||
/// Warning
|
|
||||||
Warning,
|
|
||||||
/// Error
|
|
||||||
Error,
|
|
||||||
/// Unknown
|
|
||||||
Unknown,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
upid: { schema: UPID::API_SCHEMA },
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// Task properties.
|
|
||||||
pub struct TaskListItem {
|
|
||||||
pub upid: String,
|
|
||||||
/// The node name where the task is running on.
|
|
||||||
pub node: String,
|
|
||||||
/// The Unix PID
|
|
||||||
pub pid: i64,
|
|
||||||
/// The task start time (Epoch)
|
|
||||||
pub pstart: u64,
|
|
||||||
/// The task start time (Epoch)
|
|
||||||
pub starttime: i64,
|
|
||||||
/// Worker type (arbitrary ASCII string)
|
|
||||||
pub worker_type: String,
|
|
||||||
/// Worker ID (arbitrary ASCII string)
|
|
||||||
pub worker_id: Option<String>,
|
|
||||||
/// The authenticated entity who started the task
|
|
||||||
pub user: Authid,
|
|
||||||
/// The task end time (Epoch)
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub endtime: Option<i64>,
|
|
||||||
/// Task end status
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub status: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
|
||||||
optional: false,
|
|
||||||
schema: &ArraySchema::new(
|
|
||||||
"A list of tasks.",
|
|
||||||
&TaskListItem::API_SCHEMA,
|
|
||||||
).schema(),
|
|
||||||
};
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::{
|
||||||
use proxmox::api::schema::{
|
api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||||
BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||||
@ -133,7 +132,7 @@ impl ApiToken {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if let Some(expire) = self.expire {
|
if let Some(expire) = self.expire {
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox_time::epoch_i64();
|
||||||
if expire > 0 && expire <= now {
|
if expire > 0 && expire <= now {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -198,7 +197,7 @@ impl User {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if let Some(expire) = self.expire {
|
if let Some(expire) = self.expire {
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox_time::epoch_i64();
|
||||||
if expire > 0 && expire <= now {
|
if expire > 0 && expire <= now {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -29,9 +29,9 @@ use anyhow::{bail, format_err, Error};
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::{
|
||||||
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
api, const_regex, ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType,
|
||||||
use proxmox::const_regex;
|
};
|
||||||
|
|
||||||
// we only allow a limited set of characters
|
// we only allow a limited set of characters
|
||||||
// colon is not allowed, because we store usernames in
|
// colon is not allowed, because we store usernames in
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox_schema::*;
|
||||||
|
|
||||||
use proxmox::const_regex;
|
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "pbs-buildcfg"
|
name = "pbs-buildcfg"
|
||||||
version = "2.0.10"
|
version = "2.0.12"
|
||||||
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "macros used for pbs related paths such as configdir and rundir"
|
description = "macros used for pbs related paths such as configdir and rundir"
|
||||||
|
@ -28,9 +28,14 @@ tower-service = "0.3.0"
|
|||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
|
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
proxmox = "0.14.0"
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
proxmox-http = { version = "0.5.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
proxmox-io = { version = "1", features = [ "tokio" ] }
|
||||||
|
proxmox-lang = "1"
|
||||||
|
proxmox-router = { version = "1", features = [ "cli" ] }
|
||||||
|
proxmox-schema = "1"
|
||||||
|
proxmox-time = "1"
|
||||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use proxmox::api::schema::*;
|
use proxmox_schema::*;
|
||||||
|
|
||||||
proxmox::const_regex! {
|
const_regex! {
|
||||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ impl BackupWriter {
|
|||||||
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||||
let size: HumanByte = upload_stats.size.into();
|
let size: HumanByte = upload_stats.size.into();
|
||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name
|
||||||
} else {
|
} else {
|
||||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||||
};
|
};
|
||||||
|
@ -3,6 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::ops::ControlFlow;
|
||||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
@ -13,14 +14,13 @@ use nix::fcntl::OFlag;
|
|||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
||||||
use proxmox::api::api;
|
|
||||||
use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
|
||||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
|
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
||||||
|
use proxmox_schema::api;
|
||||||
use pxar::{EntryKind, Metadata};
|
use pxar::{EntryKind, Metadata};
|
||||||
|
|
||||||
use pbs_runtime::block_in_place;
|
use pbs_runtime::block_in_place;
|
||||||
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||||
use pbs_tools::ops::ControlFlow;
|
|
||||||
|
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||||
@ -1100,7 +1100,7 @@ impl<'a> ExtractorState<'a> {
|
|||||||
|
|
||||||
self.extractor.leave_directory()?;
|
self.extractor.leave_directory()?;
|
||||||
|
|
||||||
Ok(ControlFlow::CONTINUE)
|
Ok(ControlFlow::Continue(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_new_directory(
|
async fn handle_new_directory(
|
||||||
|
@ -1,230 +0,0 @@
|
|||||||
use std::io::{self, Seek, SeekFrom};
|
|
||||||
use std::ops::Range;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::task::Context;
|
|
||||||
use std::pin::Pin;
|
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
|
||||||
|
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
|
||||||
|
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
|
||||||
use pbs_datastore::read_chunk::ReadChunk;
|
|
||||||
use pbs_datastore::index::IndexFile;
|
|
||||||
use pbs_tools::lru_cache::LruCache;
|
|
||||||
|
|
||||||
struct CachedChunk {
|
|
||||||
range: Range<u64>,
|
|
||||||
data: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedChunk {
|
|
||||||
/// Perform sanity checks on the range and data size:
|
|
||||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
|
||||||
if data.len() as u64 != range.end - range.start {
|
|
||||||
bail!(
|
|
||||||
"read chunk with wrong size ({} != {})",
|
|
||||||
data.len(),
|
|
||||||
range.end - range.start,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(Self { range, data })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BufferedDynamicReader<S> {
|
|
||||||
store: S,
|
|
||||||
index: DynamicIndexReader,
|
|
||||||
archive_size: u64,
|
|
||||||
read_buffer: Vec<u8>,
|
|
||||||
buffered_chunk_idx: usize,
|
|
||||||
buffered_chunk_start: u64,
|
|
||||||
read_offset: u64,
|
|
||||||
lru_cache: LruCache<usize, CachedChunk>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ChunkCacher<'a, S> {
|
|
||||||
store: &'a mut S,
|
|
||||||
index: &'a DynamicIndexReader,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
|
||||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
|
||||||
let info = match self.index.chunk_info(index) {
|
|
||||||
Some(info) => info,
|
|
||||||
None => bail!("chunk index out of range"),
|
|
||||||
};
|
|
||||||
let range = info.range;
|
|
||||||
let data = self.store.read_chunk(&info.digest)?;
|
|
||||||
CachedChunk::new(range, data).map(Some)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
|
||||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
|
||||||
let archive_size = index.index_bytes();
|
|
||||||
Self {
|
|
||||||
store,
|
|
||||||
index,
|
|
||||||
archive_size,
|
|
||||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
|
||||||
buffered_chunk_idx: 0,
|
|
||||||
buffered_chunk_start: 0,
|
|
||||||
read_offset: 0,
|
|
||||||
lru_cache: LruCache::new(32),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn archive_size(&self) -> u64 {
|
|
||||||
self.archive_size
|
|
||||||
}
|
|
||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
|
||||||
//let (start, end, data) = self.lru_cache.access(
|
|
||||||
let cached_chunk = self.lru_cache.access(
|
|
||||||
idx,
|
|
||||||
&mut ChunkCacher {
|
|
||||||
store: &mut self.store,
|
|
||||||
index: &self.index,
|
|
||||||
},
|
|
||||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
|
||||||
|
|
||||||
// fixme: avoid copy
|
|
||||||
self.read_buffer.clear();
|
|
||||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
|
||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
|
||||||
|
|
||||||
self.buffered_chunk_start = cached_chunk.range.start;
|
|
||||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> pbs_tools::io::BufferedRead for BufferedDynamicReader<S> {
|
|
||||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
|
||||||
if offset == self.archive_size {
|
|
||||||
return Ok(&self.read_buffer[0..0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_len = self.read_buffer.len();
|
|
||||||
let index = &self.index;
|
|
||||||
|
|
||||||
// optimization for sequential read
|
|
||||||
if buffer_len > 0
|
|
||||||
&& ((self.buffered_chunk_idx + 1) < index.index().len())
|
|
||||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let next_idx = self.buffered_chunk_idx + 1;
|
|
||||||
let next_end = index.chunk_end(next_idx);
|
|
||||||
if offset < next_end {
|
|
||||||
self.buffer_chunk(next_idx)?;
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
return Ok(&self.read_buffer[buffer_offset..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buffer_len == 0)
|
|
||||||
|| (offset < self.buffered_chunk_start)
|
|
||||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let end_idx = index.index().len() - 1;
|
|
||||||
let end = index.chunk_end(end_idx);
|
|
||||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
|
||||||
self.buffer_chunk(idx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
Ok(&self.read_buffer[buffer_offset..])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
|
||||||
use pbs_tools::io::BufferedRead;
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
|
|
||||||
let data = match self.buffered_read(self.read_offset) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let n = if data.len() > buf.len() {
|
|
||||||
buf.len()
|
|
||||||
} else {
|
|
||||||
data.len()
|
|
||||||
};
|
|
||||||
|
|
||||||
buf[0..n].copy_from_slice(&data[0..n]);
|
|
||||||
|
|
||||||
self.read_offset += n as u64;
|
|
||||||
|
|
||||||
Ok(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
|
||||||
let new_offset = match pos {
|
|
||||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
|
||||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
|
||||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
|
||||||
};
|
|
||||||
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
|
||||||
return Err(Error::new(
|
|
||||||
ErrorKind::Other,
|
|
||||||
format!(
|
|
||||||
"seek is out of range {} ([0..{}])",
|
|
||||||
new_offset, self.archive_size
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
self.read_offset = new_offset as u64;
|
|
||||||
|
|
||||||
Ok(self.read_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
|
||||||
/// async use!
|
|
||||||
///
|
|
||||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
|
||||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
|
||||||
/// duplicate simultaneous reads over http.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
|
||||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
|
||||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: Arc::new(Mutex::new(inner)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
|
||||||
fn start_read_at<'a>(
|
|
||||||
self: Pin<&'a Self>,
|
|
||||||
_cx: &mut Context,
|
|
||||||
buf: &'a mut [u8],
|
|
||||||
offset: u64,
|
|
||||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
|
||||||
use std::io::Read;
|
|
||||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
|
||||||
let mut reader = self.inner.lock().unwrap();
|
|
||||||
reader.seek(SeekFrom::Start(offset))?;
|
|
||||||
Ok(reader.read(buf)?)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_complete<'a>(
|
|
||||||
self: Pin<&'a Self>,
|
|
||||||
_op: ReadAtOperation<'a>,
|
|
||||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
|
||||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
|
||||||
}
|
|
||||||
}
|
|
@ -15,10 +15,10 @@ use percent_encoding::percent_encode;
|
|||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
api::error::HttpError,
|
|
||||||
sys::linux::tty,
|
sys::linux::tty,
|
||||||
tools::fs::{file_get_json, replace_file, CreateOptions},
|
tools::fs::{file_get_json, replace_file, CreateOptions},
|
||||||
};
|
};
|
||||||
|
use proxmox_router::HttpError;
|
||||||
|
|
||||||
use proxmox_http::client::HttpsConnector;
|
use proxmox_http::client::HttpsConnector;
|
||||||
use proxmox_http::uri::build_authority;
|
use proxmox_http::uri::build_authority;
|
||||||
@ -230,7 +230,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
|||||||
|
|
||||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||||
|
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
|||||||
// usually /run/user/<uid>/...
|
// usually /run/user/<uid>/...
|
||||||
let path = base.place_runtime_file("tickets").ok()?;
|
let path = base.place_runtime_file("tickets").ok()?;
|
||||||
let data = file_get_json(&path, None).ok()?;
|
let data = file_get_json(&path, None).ok()?;
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox_time::epoch_i64();
|
||||||
let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
|
let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
|
||||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||||
|
@ -3,15 +3,7 @@
|
|||||||
//! This library implements the client side to access the backups
|
//! This library implements the client side to access the backups
|
||||||
//! server using https.
|
//! server using https.
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
|
|
||||||
use pbs_api_types::{Authid, Userid};
|
|
||||||
use pbs_tools::ticket::Ticket;
|
|
||||||
use pbs_tools::cert::CertInfo;
|
|
||||||
use pbs_tools::auth::private_auth_key;
|
|
||||||
|
|
||||||
pub mod catalog_shell;
|
pub mod catalog_shell;
|
||||||
pub mod dynamic_index;
|
|
||||||
pub mod pxar;
|
pub mod pxar;
|
||||||
pub mod tools;
|
pub mod tools;
|
||||||
|
|
||||||
@ -49,26 +41,3 @@ mod chunk_stream;
|
|||||||
pub use chunk_stream::{ChunkStream, FixedChunkStream};
|
pub use chunk_stream::{ChunkStream, FixedChunkStream};
|
||||||
|
|
||||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||||
|
|
||||||
/// Connect to localhost:8007 as root@pam
|
|
||||||
///
|
|
||||||
/// This automatically creates a ticket if run as 'root' user.
|
|
||||||
pub fn connect_to_localhost() -> Result<HttpClient, Error> {
|
|
||||||
|
|
||||||
let uid = nix::unistd::Uid::current();
|
|
||||||
|
|
||||||
let client = if uid.is_root() {
|
|
||||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
|
||||||
.sign(private_auth_key(), None)?;
|
|
||||||
let fingerprint = CertInfo::new()?.fingerprint()?;
|
|
||||||
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
|
|
||||||
|
|
||||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
|
||||||
} else {
|
|
||||||
let options = HttpClientOptions::new_interactive(None, None);
|
|
||||||
|
|
||||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(client)
|
|
||||||
}
|
|
||||||
|
@ -19,11 +19,11 @@ use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
|
|||||||
use pxar::Metadata;
|
use pxar::Metadata;
|
||||||
use pxar::encoder::{SeqWrite, LinkOffset};
|
use pxar::encoder::{SeqWrite, LinkOffset};
|
||||||
|
|
||||||
use proxmox::c_str;
|
|
||||||
use proxmox::sys::error::SysError;
|
use proxmox::sys::error::SysError;
|
||||||
use proxmox::tools::fd::RawFdNum;
|
use proxmox::tools::fd::RawFdNum;
|
||||||
use proxmox::tools::vec;
|
|
||||||
use proxmox::tools::fd::Fd;
|
use proxmox::tools::fd::Fd;
|
||||||
|
use proxmox_io::vec;
|
||||||
|
use proxmox_lang::c_str;
|
||||||
|
|
||||||
use pbs_datastore::catalog::BackupCatalogWriter;
|
use pbs_datastore::catalog::BackupCatalogWriter;
|
||||||
use pbs_tools::{acl, fs, xattr};
|
use pbs_tools::{acl, fs, xattr};
|
||||||
|
@ -22,10 +22,8 @@ use pxar::format::Device;
|
|||||||
use pxar::{Entry, EntryKind, Metadata};
|
use pxar::{Entry, EntryKind, Metadata};
|
||||||
|
|
||||||
use proxmox::c_result;
|
use proxmox::c_result;
|
||||||
use proxmox::tools::{
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
fs::{create_path, CreateOptions},
|
use proxmox_io::{sparse_copy, sparse_copy_async};
|
||||||
io::{sparse_copy, sparse_copy_async},
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_tools::zip::{ZipEncoder, ZipEntry};
|
use pbs_tools::zip::{ZipEncoder, ZipEntry};
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ use futures::select;
|
|||||||
use futures::sink::SinkExt;
|
use futures::sink::SinkExt;
|
||||||
use futures::stream::{StreamExt, TryStreamExt};
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
|
|
||||||
use proxmox::tools::vec;
|
use proxmox_io::vec;
|
||||||
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
|
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
|
||||||
|
|
||||||
use proxmox_fuse::requests::{self, FuseRequest};
|
use proxmox_fuse::requests::{self, FuseRequest};
|
||||||
@ -344,7 +344,7 @@ impl SessionImpl {
|
|||||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||||
},
|
},
|
||||||
Request::Getattr(request) => match self.getattr(request.inode).await {
|
Request::Getattr(request) => match self.getattr(request.inode).await {
|
||||||
Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
|
Ok(stat) => request.reply(&stat, f64::MAX).map_err(Error::from),
|
||||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||||
},
|
},
|
||||||
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
|
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
|
||||||
@ -539,7 +539,7 @@ impl SessionImpl {
|
|||||||
let file = file?.decode_entry().await?;
|
let file = file?.decode_entry().await?;
|
||||||
let stat = to_stat(to_inode(&file), &file)?;
|
let stat = to_stat(to_inode(&file), &file)?;
|
||||||
let name = file.file_name();
|
let name = file.file_name();
|
||||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||||
ReplyBufState::Ok => (),
|
ReplyBufState::Ok => (),
|
||||||
ReplyBufState::Full => return Ok(lookups),
|
ReplyBufState::Full => return Ok(lookups),
|
||||||
}
|
}
|
||||||
@ -551,7 +551,7 @@ impl SessionImpl {
|
|||||||
let file = dir.lookup_self().await?;
|
let file = dir.lookup_self().await?;
|
||||||
let stat = to_stat(to_inode(&file), &file)?;
|
let stat = to_stat(to_inode(&file), &file)?;
|
||||||
let name = OsStr::new(".");
|
let name = OsStr::new(".");
|
||||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||||
ReplyBufState::Ok => (),
|
ReplyBufState::Ok => (),
|
||||||
ReplyBufState::Full => return Ok(lookups),
|
ReplyBufState::Full => return Ok(lookups),
|
||||||
}
|
}
|
||||||
@ -565,7 +565,7 @@ impl SessionImpl {
|
|||||||
let file = parent_dir.lookup_self().await?;
|
let file = parent_dir.lookup_self().await?;
|
||||||
let stat = to_stat(to_inode(&file), &file)?;
|
let stat = to_stat(to_inode(&file), &file)?;
|
||||||
let name = OsStr::new("..");
|
let name = OsStr::new("..");
|
||||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||||
ReplyBufState::Ok => (),
|
ReplyBufState::Ok => (),
|
||||||
ReplyBufState::Full => return Ok(lookups),
|
ReplyBufState::Full => return Ok(lookups),
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ fn mode_string(entry: &Entry) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
||||||
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
if let Ok(s) = proxmox_time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
format!("{}.{}", mtime.secs, mtime.nanos)
|
format!("{}.{}", mtime.secs, mtime.nanos)
|
||||||
|
@ -5,7 +5,7 @@ use serde_json::{json, Value};
|
|||||||
use tokio::signal::unix::{signal, SignalKind};
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
|
||||||
use proxmox::api::cli::format_and_print_result;
|
use proxmox_router::cli::format_and_print_result;
|
||||||
|
|
||||||
use pbs_tools::percent_encoding::percent_encode_component;
|
use pbs_tools::percent_encoding::percent_encode_component;
|
||||||
|
|
||||||
|
@ -6,9 +6,9 @@ use std::io::Read;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::schema::*;
|
|
||||||
use proxmox::sys::linux::tty;
|
use proxmox::sys::linux::tty;
|
||||||
use proxmox::tools::fs::file_get_contents;
|
use proxmox::tools::fs::file_get_contents;
|
||||||
|
use proxmox_schema::*;
|
||||||
|
|
||||||
use pbs_api_types::CryptMode;
|
use pbs_api_types::CryptMode;
|
||||||
|
|
||||||
|
@ -10,11 +10,9 @@ use anyhow::{bail, format_err, Context, Error};
|
|||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::*;
|
||||||
api::schema::*,
|
use proxmox_router::cli::shellword_split;
|
||||||
api::cli::shellword_split,
|
use proxmox::tools::fs::file_get_json;
|
||||||
tools::fs::file_get_json,
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
|
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
|
||||||
use pbs_datastore::BackupDir;
|
use pbs_datastore::BackupDir;
|
||||||
@ -306,7 +304,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
|||||||
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| pbs_tools::format::strip_server_file_extension(&v))
|
.map(|v| pbs_tools::format::strip_server_file_extension(&v).to_owned())
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,7 +313,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|name| {
|
.filter_map(|name| {
|
||||||
if name.ends_with(".pxar.didx") {
|
if name.ends_with(".pxar.didx") {
|
||||||
Some(pbs_tools::format::strip_server_file_extension(name))
|
Some(pbs_tools::format::strip_server_file_extension(name).to_owned())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -328,7 +326,7 @@ pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|name| {
|
.filter_map(|name| {
|
||||||
if name.ends_with(".img.fidx") {
|
if name.ends_with(".img.fidx") {
|
||||||
Some(pbs_tools::format::strip_server_file_extension(name))
|
Some(pbs_tools::format::strip_server_file_extension(name).to_owned())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ use serde_json::Value;
|
|||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||||
use tokio::net::UnixStream;
|
use tokio::net::UnixStream;
|
||||||
|
|
||||||
use proxmox::api::error::HttpError;
|
use proxmox_router::HttpError;
|
||||||
|
|
||||||
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
||||||
|
|
||||||
|
@ -6,17 +6,23 @@ edition = "2018"
|
|||||||
description = "Configuration file management for PBS"
|
description = "Configuration file management for PBS"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
libc = "0.2"
|
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
hex = "0.4.3"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
|
libc = "0.2"
|
||||||
|
nix = "0.19.1"
|
||||||
|
once_cell = "1.3.1"
|
||||||
|
openssl = "0.10"
|
||||||
|
regex = "1.2"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
openssl = "0.10"
|
|
||||||
nix = "0.19.1"
|
|
||||||
regex = "1.2"
|
|
||||||
once_cell = "1.3.1"
|
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
proxmox = "0.14.0"
|
||||||
|
proxmox-lang = "1"
|
||||||
|
proxmox-router = { version = "1", default-features = false }
|
||||||
|
proxmox-schema = "1"
|
||||||
|
proxmox-section-config = "1"
|
||||||
|
proxmox-time = "1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
@ -8,7 +8,7 @@ use anyhow::{bail, Error};
|
|||||||
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat, ApiType};
|
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, Userid, Role, ROLE_NAME_NO_ACCESS};
|
use pbs_api_types::{Authid, Userid, Role, ROLE_NAME_NO_ACCESS};
|
||||||
|
|
||||||
|
@ -3,11 +3,11 @@
|
|||||||
use std::sync::{RwLock, Arc};
|
use std::sync::{RwLock, Arc};
|
||||||
|
|
||||||
use anyhow::{Error, bail};
|
use anyhow::{Error, bail};
|
||||||
|
|
||||||
use proxmox::api::section_config::SectionConfigData;
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use proxmox::api::UserInformation;
|
|
||||||
use proxmox::tools::time::epoch_i64;
|
use proxmox_router::UserInformation;
|
||||||
|
use proxmox_section_config::SectionConfigData;
|
||||||
|
use proxmox_time::epoch_i64;
|
||||||
|
|
||||||
use pbs_api_types::{Authid, Userid, User, ApiToken, ROLE_ADMIN};
|
use pbs_api_types::{Authid, Userid, User, ApiToken, ROLE_ADMIN};
|
||||||
|
|
||||||
|
@ -2,14 +2,8 @@ use anyhow::{Error};
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{ApiType, Schema};
|
||||||
schema::{ApiType, Schema},
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
||||||
|
|
||||||
|
@ -1,17 +1,11 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use std::collections::HashMap;
|
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{api, ApiType, Updater, Schema};
|
||||||
api,
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
schema::*,
|
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
use pbs_api_types::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
||||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
@ -59,7 +53,7 @@ pub enum OpenIdUserAttribute {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize,Updater)]
|
#[derive(Serialize, Deserialize, Updater)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all="kebab-case")]
|
||||||
/// OpenID configuration properties.
|
/// OpenID configuration properties.
|
||||||
pub struct OpenIdRealmConfig {
|
pub struct OpenIdRealmConfig {
|
||||||
|
@ -16,16 +16,8 @@ use std::collections::HashMap;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::*;
|
||||||
api::{
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
schema::*,
|
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Context, Error};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
use proxmox_lang::try_block;
|
||||||
|
|
||||||
use pbs_api_types::{Kdf, KeyInfo, Fingerprint};
|
use pbs_api_types::{Kdf, KeyInfo, Fingerprint};
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ impl KeyConfig {
|
|||||||
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
||||||
let fingerprint = Some(Fingerprint::new(crypt_config.fingerprint()));
|
let fingerprint = Some(Fingerprint::new(crypt_config.fingerprint()));
|
||||||
|
|
||||||
let created = proxmox::tools::time::epoch_i64();
|
let created = proxmox_time::epoch_i64();
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
kdf: None,
|
kdf: None,
|
||||||
created,
|
created,
|
||||||
@ -183,7 +183,7 @@ impl KeyConfig {
|
|||||||
enc_data.extend_from_slice(&tag);
|
enc_data.extend_from_slice(&tag);
|
||||||
enc_data.extend_from_slice(&encrypted_key);
|
enc_data.extend_from_slice(&encrypted_key);
|
||||||
|
|
||||||
let created = proxmox::tools::time::epoch_i64();
|
let created = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
// always compute fingerprint
|
// always compute fingerprint
|
||||||
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
||||||
@ -370,8 +370,8 @@ fn encrypt_decrypt_test() -> Result<(), Error> {
|
|||||||
|
|
||||||
let key = KeyConfig {
|
let key = KeyConfig {
|
||||||
kdf: None,
|
kdf: None,
|
||||||
created: proxmox::tools::time::epoch_i64(),
|
created: proxmox_time::epoch_i64(),
|
||||||
modified: proxmox::tools::time::epoch_i64(),
|
modified: proxmox_time::epoch_i64(),
|
||||||
data: (0u8..32u8).collect(),
|
data: (0u8..32u8).collect(),
|
||||||
fingerprint: Some(Fingerprint::new([
|
fingerprint: Some(Fingerprint::new([
|
||||||
14, 171, 212, 70, 11, 110, 185, 202, 52, 80, 35, 222, 226, 183, 120, 199, 144, 229, 74,
|
14, 171, 212, 70, 11, 110, 185, 202, 52, 80, 35, 222, 226, 183, 120, 199, 144, 229, 74,
|
||||||
@ -396,8 +396,8 @@ fn encrypt_decrypt_test() -> Result<(), Error> {
|
|||||||
fn fingerprint_checks() -> Result<(), Error> {
|
fn fingerprint_checks() -> Result<(), Error> {
|
||||||
let key = KeyConfig {
|
let key = KeyConfig {
|
||||||
kdf: None,
|
kdf: None,
|
||||||
created: proxmox::tools::time::epoch_i64(),
|
created: proxmox_time::epoch_i64(),
|
||||||
modified: proxmox::tools::time::epoch_i64(),
|
modified: proxmox_time::epoch_i64(),
|
||||||
data: (0u8..32u8).collect(),
|
data: (0u8..32u8).collect(),
|
||||||
fingerprint: Some(Fingerprint::new([0u8; 32])), // wrong FP
|
fingerprint: Some(Fingerprint::new([0u8; 32])), // wrong FP
|
||||||
hint: None,
|
hint: None,
|
||||||
@ -413,8 +413,8 @@ fn fingerprint_checks() -> Result<(), Error> {
|
|||||||
|
|
||||||
let key = KeyConfig {
|
let key = KeyConfig {
|
||||||
kdf: None,
|
kdf: None,
|
||||||
created: proxmox::tools::time::epoch_i64(),
|
created: proxmox_time::epoch_i64(),
|
||||||
modified: proxmox::tools::time::epoch_i64(),
|
modified: proxmox_time::epoch_i64(),
|
||||||
data: (0u8..32u8).collect(),
|
data: (0u8..32u8).collect(),
|
||||||
fingerprint: None,
|
fingerprint: None,
|
||||||
hint: None,
|
hint: None,
|
||||||
|
@ -4,23 +4,15 @@
|
|||||||
//! provides a type safe interface to store [`MediaPoolConfig`],
|
//! provides a type safe interface to store [`MediaPoolConfig`],
|
||||||
//!
|
//!
|
||||||
//! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
|
//! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
|
||||||
//! [SectionConfig]: proxmox::api::section_config::SectionConfig
|
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::*;
|
||||||
api::{
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
schema::*,
|
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{MEDIA_POOL_NAME_SCHEMA, MediaPoolConfig};
|
use pbs_api_types::{MEDIA_POOL_NAME_SCHEMA, MediaPoolConfig};
|
||||||
|
|
||||||
|
@ -1,15 +1,10 @@
|
|||||||
use anyhow::{Error};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{
|
use anyhow::Error;
|
||||||
schema::*,
|
use lazy_static::lazy_static;
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
use proxmox_schema::*;
|
||||||
SectionConfigData,
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{Remote, REMOTE_ID_SCHEMA};
|
use pbs_api_types::{Remote, REMOTE_ID_SCHEMA};
|
||||||
|
|
||||||
|
@ -1,15 +1,10 @@
|
|||||||
use anyhow::{Error};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{
|
use anyhow::Error;
|
||||||
schema::*,
|
use lazy_static::lazy_static;
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
use proxmox_schema::{ApiType, Schema};
|
||||||
SectionConfigData,
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{JOB_ID_SCHEMA, SyncJobConfig};
|
use pbs_api_types::{JOB_ID_SCHEMA, SyncJobConfig};
|
||||||
|
|
||||||
|
@ -2,14 +2,8 @@ use anyhow::{Error};
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::{Schema, ApiType};
|
||||||
schema::{Schema, ApiType},
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA};
|
use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA};
|
||||||
|
|
||||||
|
@ -4,14 +4,8 @@ use std::sync::{Arc, RwLock};
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox_schema::*;
|
||||||
schema::*,
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
|
||||||
SectionConfigData,
|
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, Userid, ApiToken, User,
|
Authid, Userid, ApiToken, User,
|
||||||
|
@ -1,15 +1,10 @@
|
|||||||
use anyhow::{Error};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{
|
use anyhow::Error;
|
||||||
schema::*,
|
use lazy_static::lazy_static;
|
||||||
section_config::{
|
|
||||||
SectionConfig,
|
use proxmox_schema::*;
|
||||||
SectionConfigData,
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
SectionConfigPlugin,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{JOB_ID_SCHEMA, VerificationJobConfig};
|
use pbs_api_types::{JOB_ID_SCHEMA, VerificationJobConfig};
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ base64 = "0.12"
|
|||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = [ "arrays" ] }
|
endian_trait = { version = "0.6", features = [ "arrays" ] }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = "0.19.1"
|
nix = "0.19.1"
|
||||||
@ -18,12 +19,19 @@ openssl = "0.10"
|
|||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
tokio = { version = "1.6", features = [] }
|
tokio = { version = "1.6", features = [] }
|
||||||
|
walkdir = "2"
|
||||||
zstd = { version = "0.6", features = [ "bindgen" ] }
|
zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||||
|
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
pxar = "0.10.1"
|
pxar = "0.10.1"
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
proxmox = "0.14.0"
|
||||||
|
proxmox-borrow = "1"
|
||||||
|
proxmox-io = "1"
|
||||||
|
proxmox-lang = "1"
|
||||||
|
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||||
|
proxmox-time = "1"
|
||||||
|
proxmox-uuid = "1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
@ -138,7 +138,7 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
let timestamp = proxmox_time::parse_rfc3339(backup_time)?;
|
||||||
if let Some(last_timestamp) = last {
|
if let Some(last_timestamp) = last {
|
||||||
if timestamp > last_timestamp {
|
if timestamp > last_timestamp {
|
||||||
last = Some(timestamp);
|
last = Some(timestamp);
|
||||||
@ -215,7 +215,7 @@ impl BackupDir {
|
|||||||
V: Into<String>,
|
V: Into<String>,
|
||||||
{
|
{
|
||||||
let backup_time_string = backup_time_string.into();
|
let backup_time_string = backup_time_string.into();
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
group,
|
group,
|
||||||
@ -255,7 +255,7 @@ impl BackupDir {
|
|||||||
|
|
||||||
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
||||||
// fixme: can this fail? (avoid unwrap)
|
// fixme: can this fail? (avoid unwrap)
|
||||||
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
|
Ok(proxmox_time::epoch_to_rfc3339_utc(backup_time)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,8 +8,9 @@ use anyhow::{bail, format_err, Error};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use pathpatterns::{MatchList, MatchType};
|
use pathpatterns::{MatchList, MatchType};
|
||||||
use proxmox::api::api;
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
|
use proxmox_schema::api;
|
||||||
|
|
||||||
use crate::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
use crate::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||||
|
|
||||||
@ -570,7 +571,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
}
|
}
|
||||||
CatalogEntryType::File => {
|
CatalogEntryType::File => {
|
||||||
let mut mtime_string = mtime.to_string();
|
let mut mtime_string = mtime.to_string();
|
||||||
if let Ok(s) = proxmox::tools::time::strftime_local("%FT%TZ", mtime as i64) {
|
if let Ok(s) = proxmox_time::strftime_local("%FT%TZ", mtime as i64) {
|
||||||
mtime_string = s;
|
mtime_string = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,8 @@ use anyhow::{Error};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
|
|
||||||
use pbs_tools::borrow::Tied;
|
use proxmox_borrow::Tied;
|
||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
pub struct ChecksumReader<R> {
|
pub struct ChecksumReader<R> {
|
||||||
|
@ -3,7 +3,8 @@ use std::io::Write;
|
|||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use pbs_tools::borrow::Tied;
|
use proxmox_borrow::Tied;
|
||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
pub struct ChecksumWriter<W> {
|
pub struct ChecksumWriter<W> {
|
||||||
|
@ -9,10 +9,9 @@ use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
|||||||
|
|
||||||
use pbs_api_types::GarbageCollectionStatus;
|
use pbs_api_types::GarbageCollectionStatus;
|
||||||
use pbs_tools::process_locker::{self, ProcessLocker};
|
use pbs_tools::process_locker::{self, ProcessLocker};
|
||||||
|
use pbs_tools::{task_log, task::WorkerTaskContext};
|
||||||
|
|
||||||
use crate::DataBlob;
|
use crate::DataBlob;
|
||||||
use crate::task_log;
|
|
||||||
use crate::task::TaskState;
|
|
||||||
|
|
||||||
/// File system based chunk store
|
/// File system based chunk store
|
||||||
pub struct ChunkStore {
|
pub struct ChunkStore {
|
||||||
@ -66,7 +65,7 @@ impl ChunkStore {
|
|||||||
&self.base
|
&self.base
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result<Self, Error>
|
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn WorkerTaskContext>) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
P: Into<PathBuf>,
|
P: Into<PathBuf>,
|
||||||
{
|
{
|
||||||
@ -281,13 +280,12 @@ impl ChunkStore {
|
|||||||
ProcessLocker::oldest_shared_lock(self.locker.clone())
|
ProcessLocker::oldest_shared_lock(self.locker.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sweep_unused_chunks<F: Fn() -> Result<(), Error>>(
|
pub fn sweep_unused_chunks(
|
||||||
&self,
|
&self,
|
||||||
oldest_writer: i64,
|
oldest_writer: i64,
|
||||||
phase1_start_time: i64,
|
phase1_start_time: i64,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn TaskState,
|
worker: &dyn WorkerTaskContext,
|
||||||
fail_on_shutdown: F,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
use nix::sys::stat::fstatat;
|
use nix::sys::stat::fstatat;
|
||||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||||
@ -306,7 +304,7 @@ impl ChunkStore {
|
|||||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||||
if last_percentage != percentage {
|
if last_percentage != percentage {
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
crate::task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"processed {}% ({} chunks)",
|
"processed {}% ({} chunks)",
|
||||||
percentage,
|
percentage,
|
||||||
@ -315,7 +313,7 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
fail_on_shutdown()?;
|
worker.fail_on_shutdown()?;
|
||||||
|
|
||||||
let (dirfd, entry) = match entry {
|
let (dirfd, entry) = match entry {
|
||||||
Ok(entry) => (entry.parent_fd(), entry),
|
Ok(entry) => (entry.parent_fd(), entry),
|
||||||
|
@ -4,7 +4,7 @@ use std::io::Write;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use openssl::symm::{decrypt_aead, Mode};
|
use openssl::symm::{decrypt_aead, Mode};
|
||||||
|
|
||||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
use proxmox_io::{ReadExt, WriteExt};
|
||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use pbs_api_types::CryptMode;
|
use pbs_api_types::CryptMode;
|
||||||
@ -58,13 +58,13 @@ impl DataBlob {
|
|||||||
|
|
||||||
/// accessor to crc32 checksum
|
/// accessor to crc32 checksum
|
||||||
pub fn crc(&self) -> u32 {
|
pub fn crc(&self) -> u32 {
|
||||||
let crc_o = proxmox::offsetof!(DataBlobHeader, crc);
|
let crc_o = proxmox_lang::offsetof!(DataBlobHeader, crc);
|
||||||
u32::from_le_bytes(self.raw_data[crc_o..crc_o+4].try_into().unwrap())
|
u32::from_le_bytes(self.raw_data[crc_o..crc_o+4].try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the CRC checksum field
|
// set the CRC checksum field
|
||||||
pub fn set_crc(&mut self, crc: u32) {
|
pub fn set_crc(&mut self, crc: u32) {
|
||||||
let crc_o = proxmox::offsetof!(DataBlobHeader, crc);
|
let crc_o = proxmox_lang::offsetof!(DataBlobHeader, crc);
|
||||||
self.raw_data[crc_o..crc_o+4].copy_from_slice(&crc.to_le_bytes());
|
self.raw_data[crc_o..crc_o+4].copy_from_slice(&crc.to_le_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,8 @@ use std::io::{BufReader, Read};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use proxmox::tools::io::ReadExt;
|
|
||||||
|
use proxmox_io::ReadExt;
|
||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
use anyhow::Error;
|
|
||||||
use proxmox::tools::io::WriteExt;
|
|
||||||
use std::io::{Seek, SeekFrom, Write};
|
use std::io::{Seek, SeekFrom, Write};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use proxmox_io::WriteExt;
|
||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
use crate::checksum_writer::ChecksumWriter;
|
use crate::checksum_writer::ChecksumWriter;
|
||||||
|
@ -12,24 +12,23 @@ use lazy_static::lazy_static;
|
|||||||
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
|
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
|
||||||
|
|
||||||
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
|
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
|
||||||
use pbs_datastore::{task_log, task_warn};
|
use pbs_tools::format::HumanByte;
|
||||||
use pbs_datastore::DataBlob;
|
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use pbs_datastore::backup_info::{BackupGroup, BackupDir};
|
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
||||||
use pbs_datastore::chunk_store::ChunkStore;
|
use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
|
||||||
use pbs_datastore::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||||
use pbs_datastore::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
|
||||||
use pbs_datastore::index::IndexFile;
|
use crate::DataBlob;
|
||||||
use pbs_datastore::manifest::{
|
use crate::backup_info::{BackupGroup, BackupDir};
|
||||||
|
use crate::chunk_store::ChunkStore;
|
||||||
|
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||||
|
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||||
|
use crate::index::IndexFile;
|
||||||
|
use crate::manifest::{
|
||||||
MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME,
|
MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME,
|
||||||
ArchiveType, BackupManifest,
|
ArchiveType, BackupManifest,
|
||||||
archive_type,
|
archive_type,
|
||||||
};
|
};
|
||||||
use pbs_datastore::task::TaskState;
|
|
||||||
use pbs_tools::format::HumanByte;
|
|
||||||
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
|
|
||||||
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
|
||||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
|
||||||
use proxmox_rest_server::fail_on_shutdown;
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||||
@ -499,7 +498,7 @@ impl DataStore {
|
|||||||
index: I,
|
index: I,
|
||||||
file_name: &Path, // only used for error reporting
|
file_name: &Path, // only used for error reporting
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn TaskState,
|
worker: &dyn WorkerTaskContext,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
status.index_file_count += 1;
|
status.index_file_count += 1;
|
||||||
@ -507,7 +506,7 @@ impl DataStore {
|
|||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
fail_on_shutdown()?;
|
worker.fail_on_shutdown()?;
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
if !self.chunk_store.cond_touch_chunk(digest, false)? {
|
if !self.chunk_store.cond_touch_chunk(digest, false)? {
|
||||||
task_warn!(
|
task_warn!(
|
||||||
@ -535,7 +534,7 @@ impl DataStore {
|
|||||||
fn mark_used_chunks(
|
fn mark_used_chunks(
|
||||||
&self,
|
&self,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn TaskState,
|
worker: &dyn WorkerTaskContext,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let image_list = self.list_images()?;
|
let image_list = self.list_images()?;
|
||||||
@ -548,7 +547,7 @@ impl DataStore {
|
|||||||
for (i, img) in image_list.into_iter().enumerate() {
|
for (i, img) in image_list.into_iter().enumerate() {
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
fail_on_shutdown()?;
|
worker.fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Some(backup_dir_path) = img.parent() {
|
if let Some(backup_dir_path) = img.parent() {
|
||||||
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
||||||
@ -612,7 +611,7 @@ impl DataStore {
|
|||||||
!matches!(self.gc_mutex.try_lock(), Ok(_))
|
!matches!(self.gc_mutex.try_lock(), Ok(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
pub fn garbage_collection(&self, worker: &dyn WorkerTaskContext, upid: &UPID) -> Result<(), Error> {
|
||||||
|
|
||||||
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
||||||
|
|
||||||
@ -621,7 +620,7 @@ impl DataStore {
|
|||||||
// writer" information and thus no safe atime cutoff
|
// writer" information and thus no safe atime cutoff
|
||||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||||
|
|
||||||
let phase1_start_time = proxmox::tools::time::epoch_i64();
|
let phase1_start_time = proxmox_time::epoch_i64();
|
||||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||||
|
|
||||||
let mut gc_status = GarbageCollectionStatus::default();
|
let mut gc_status = GarbageCollectionStatus::default();
|
||||||
@ -637,7 +636,6 @@ impl DataStore {
|
|||||||
phase1_start_time,
|
phase1_start_time,
|
||||||
&mut gc_status,
|
&mut gc_status,
|
||||||
worker,
|
worker,
|
||||||
fail_on_shutdown,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -744,7 +742,7 @@ impl DataStore {
|
|||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(filename);
|
path.push(filename);
|
||||||
|
|
||||||
proxmox::try_block!({
|
proxmox_lang::try_block!({
|
||||||
let mut file = std::fs::File::open(&path)?;
|
let mut file = std::fs::File::open(&path)?;
|
||||||
DataBlob::load_from_reader(&mut file)
|
DataBlob::load_from_reader(&mut file)
|
||||||
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
||||||
@ -760,7 +758,7 @@ impl DataStore {
|
|||||||
|
|
||||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
|
|
||||||
proxmox::try_block!({
|
proxmox_lang::try_block!({
|
||||||
let mut file = std::fs::File::open(&chunk_path)?;
|
let mut file = std::fs::File::open(&chunk_path)?;
|
||||||
DataBlob::load_from_reader(&mut file)
|
DataBlob::load_from_reader(&mut file)
|
||||||
}).map_err(|err| format_err!(
|
}).map_err(|err| format_err!(
|
@ -9,9 +9,9 @@ use std::task::Context;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
|
||||||
use proxmox::tools::uuid::Uuid;
|
|
||||||
use proxmox::tools::mmap::Mmap;
|
use proxmox::tools::mmap::Mmap;
|
||||||
|
use proxmox_io::ReadExt;
|
||||||
|
use proxmox_uuid::Uuid;
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use pbs_tools::lru_cache::LruCache;
|
use pbs_tools::lru_cache::LruCache;
|
||||||
@ -35,7 +35,7 @@ pub struct DynamicIndexHeader {
|
|||||||
pub index_csum: [u8; 32],
|
pub index_csum: [u8; 32],
|
||||||
reserved: [u8; 4032], // overall size is one page (4096 bytes)
|
reserved: [u8; 4032], // overall size is one page (4096 bytes)
|
||||||
}
|
}
|
||||||
proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
proxmox_lang::static_assert_size!(DynamicIndexHeader, 4096);
|
||||||
// TODO: Once non-Copy unions are stabilized, use:
|
// TODO: Once non-Copy unions are stabilized, use:
|
||||||
// union DynamicIndexHeader {
|
// union DynamicIndexHeader {
|
||||||
// reserved: [u8; 4096],
|
// reserved: [u8; 4096],
|
||||||
@ -119,7 +119,7 @@ impl DynamicIndexReader {
|
|||||||
bail!("got unknown magic number");
|
bail!("got unknown magic number");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ctime = proxmox::tools::time::epoch_i64();
|
let ctime = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
let index_size = stat.st_size as usize - header_size;
|
let index_size = stat.st_size as usize - header_size;
|
||||||
let index_count = index_size / 40;
|
let index_count = index_size / 40;
|
||||||
@ -301,7 +301,7 @@ impl DynamicIndexWriter {
|
|||||||
|
|
||||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||||
|
|
||||||
let ctime = proxmox::tools::time::epoch_i64();
|
let ctime = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
let uuid = Uuid::generate();
|
let uuid = Uuid::generate();
|
||||||
|
|
||||||
@ -344,7 +344,7 @@ impl DynamicIndexWriter {
|
|||||||
|
|
||||||
self.writer.flush()?;
|
self.writer.flush()?;
|
||||||
|
|
||||||
let csum_offset = proxmox::offsetof!(DynamicIndexHeader, index_csum);
|
let csum_offset = proxmox_lang::offsetof!(DynamicIndexHeader, index_csum);
|
||||||
self.writer.seek(SeekFrom::Start(csum_offset as u64))?;
|
self.writer.seek(SeekFrom::Start(csum_offset as u64))?;
|
||||||
|
|
||||||
let csum = self.csum.take().unwrap();
|
let csum = self.csum.take().unwrap();
|
||||||
|
@ -9,8 +9,8 @@ use anyhow::{bail, format_err, Error};
|
|||||||
|
|
||||||
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox::tools::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
|
||||||
use crate::chunk_stat::ChunkStat;
|
use crate::chunk_stat::ChunkStat;
|
||||||
use crate::chunk_store::ChunkStore;
|
use crate::chunk_store::ChunkStore;
|
||||||
@ -30,7 +30,7 @@ pub struct FixedIndexHeader {
|
|||||||
pub chunk_size: u64,
|
pub chunk_size: u64,
|
||||||
reserved: [u8; 4016], // overall size is one page (4096 bytes)
|
reserved: [u8; 4016], // overall size is one page (4096 bytes)
|
||||||
}
|
}
|
||||||
proxmox::static_assert_size!(FixedIndexHeader, 4096);
|
proxmox_lang::static_assert_size!(FixedIndexHeader, 4096);
|
||||||
|
|
||||||
// split image into fixed size chunks
|
// split image into fixed size chunks
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ impl FixedIndexReader {
|
|||||||
println!("ChunkSize: {}", self.chunk_size);
|
println!("ChunkSize: {}", self.chunk_size);
|
||||||
|
|
||||||
let mut ctime_str = self.ctime.to_string();
|
let mut ctime_str = self.ctime.to_string();
|
||||||
if let Ok(s) = proxmox::tools::time::strftime_local("%c", self.ctime) {
|
if let Ok(s) = proxmox_time::strftime_local("%c", self.ctime) {
|
||||||
ctime_str = s;
|
ctime_str = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,7 +281,7 @@ impl FixedIndexWriter {
|
|||||||
panic!("got unexpected header size");
|
panic!("got unexpected header size");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ctime = proxmox::tools::time::epoch_i64();
|
let ctime = proxmox_time::epoch_i64();
|
||||||
|
|
||||||
let uuid = Uuid::generate();
|
let uuid = Uuid::generate();
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ impl FixedIndexWriter {
|
|||||||
|
|
||||||
self.unmap()?;
|
self.unmap()?;
|
||||||
|
|
||||||
let csum_offset = proxmox::offsetof!(FixedIndexHeader, index_csum);
|
let csum_offset = proxmox_lang::offsetof!(FixedIndexHeader, index_csum);
|
||||||
self.file.seek(SeekFrom::Start(csum_offset as u64))?;
|
self.file.seek(SeekFrom::Start(csum_offset as u64))?;
|
||||||
self.file.write_all(&index_csum)?;
|
self.file.write_all(&index_csum)?;
|
||||||
self.file.flush()?;
|
self.file.flush()?;
|
||||||
|
@ -179,7 +179,6 @@ pub mod paperkey;
|
|||||||
pub mod prune;
|
pub mod prune;
|
||||||
pub mod read_chunk;
|
pub mod read_chunk;
|
||||||
pub mod store_progress;
|
pub mod store_progress;
|
||||||
pub mod task;
|
|
||||||
|
|
||||||
pub mod dynamic_index;
|
pub mod dynamic_index;
|
||||||
pub mod fixed_index;
|
pub mod fixed_index;
|
||||||
@ -196,3 +195,12 @@ pub use data_blob_reader::DataBlobReader;
|
|||||||
pub use data_blob_writer::DataBlobWriter;
|
pub use data_blob_writer::DataBlobWriter;
|
||||||
pub use manifest::BackupManifest;
|
pub use manifest::BackupManifest;
|
||||||
pub use store_progress::StoreProgress;
|
pub use store_progress::StoreProgress;
|
||||||
|
|
||||||
|
mod datastore;
|
||||||
|
pub use datastore::{check_backup_owner, DataStore};
|
||||||
|
|
||||||
|
mod snapshot_reader;
|
||||||
|
pub use snapshot_reader::SnapshotReader;
|
||||||
|
|
||||||
|
mod local_chunk_reader;
|
||||||
|
pub use local_chunk_reader::LocalChunkReader;
|
||||||
|
@ -6,10 +6,10 @@ use anyhow::{bail, Error};
|
|||||||
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use pbs_api_types::CryptMode;
|
use pbs_api_types::CryptMode;
|
||||||
use pbs_datastore::data_blob::DataBlob;
|
|
||||||
use pbs_datastore::read_chunk::{ReadChunk, AsyncReadChunk};
|
|
||||||
|
|
||||||
use super::datastore::DataStore;
|
use crate::data_blob::DataBlob;
|
||||||
|
use crate::read_chunk::{ReadChunk, AsyncReadChunk};
|
||||||
|
use crate::DataStore;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LocalChunkReader {
|
pub struct LocalChunkReader {
|
@ -4,7 +4,7 @@ use std::process::{Command, Stdio};
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
use pbs_config::key_config::KeyConfig;
|
use pbs_config::key_config::KeyConfig;
|
||||||
|
|
||||||
|
@ -135,17 +135,17 @@ pub fn compute_prune_info(
|
|||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
use proxmox::tools::time::strftime_local;
|
use proxmox_time::strftime_local;
|
||||||
|
|
||||||
if let Some(keep_hourly) = options.keep_hourly {
|
if let Some(keep_hourly) = options.keep_hourly {
|
||||||
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
||||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
|
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time()).map_err(Error::from)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(keep_daily) = options.keep_daily {
|
if let Some(keep_daily) = options.keep_daily {
|
||||||
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
||||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
|
strftime_local("%Y/%m/%d", info.backup_dir.backup_time()).map_err(Error::from)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,19 +153,19 @@ pub fn compute_prune_info(
|
|||||||
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
||||||
// Note: Use iso-week year/week here. This year number
|
// Note: Use iso-week year/week here. This year number
|
||||||
// might not match the calendar year number.
|
// might not match the calendar year number.
|
||||||
strftime_local("%G/%V", info.backup_dir.backup_time())
|
strftime_local("%G/%V", info.backup_dir.backup_time()).map_err(Error::from)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(keep_monthly) = options.keep_monthly {
|
if let Some(keep_monthly) = options.keep_monthly {
|
||||||
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
||||||
strftime_local("%Y/%m", info.backup_dir.backup_time())
|
strftime_local("%Y/%m", info.backup_dir.backup_time()).map_err(Error::from)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(keep_yearly) = options.keep_yearly {
|
if let Some(keep_yearly) = options.keep_yearly {
|
||||||
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
||||||
strftime_local("%Y", info.backup_dir.backup_time())
|
strftime_local("%Y", info.backup_dir.backup_time()).map_err(Error::from)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,15 +6,14 @@ use std::fs::File;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use nix::dir::Dir;
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
use crate::backup_info::BackupDir;
|
||||||
use pbs_datastore::index::IndexFile;
|
use crate::index::IndexFile;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use crate::fixed_index::FixedIndexReader;
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use crate::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
|
use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
|
||||||
|
use crate::DataStore;
|
||||||
use pbs_tools::fs::lock_dir_noblock_shared;
|
use pbs_tools::fs::lock_dir_noblock_shared;
|
||||||
|
|
||||||
use crate::backup::DataStore;
|
|
||||||
|
|
||||||
/// Helper to access the contents of a datastore backup snapshot
|
/// Helper to access the contents of a datastore backup snapshot
|
||||||
///
|
///
|
||||||
/// This make it easy to iterate over all used chunks and files.
|
/// This make it easy to iterate over all used chunks and files.
|
||||||
@ -108,7 +107,7 @@ impl <'a> Iterator for SnapshotChunkIterator<'a> {
|
|||||||
type Item = Result<[u8; 32], Error>;
|
type Item = Result<[u8; 32], Error>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
proxmox::try_block!({
|
proxmox_lang::try_block!({
|
||||||
loop {
|
loop {
|
||||||
if self.current_index.is_none() {
|
if self.current_index.is_none() {
|
||||||
if let Some(filename) = self.todo_list.pop() {
|
if let Some(filename) = self.todo_list.pop() {
|
@ -1,56 +0,0 @@
|
|||||||
use anyhow::Error;
|
|
||||||
|
|
||||||
/// `WorkerTask` methods commonly used from contexts otherwise not related to the API server.
|
|
||||||
pub trait TaskState {
|
|
||||||
/// If the task should be aborted, this should fail with a reasonable error message.
|
|
||||||
fn check_abort(&self) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Create a log message for this task.
|
|
||||||
fn log(&self, level: log::Level, message: &std::fmt::Arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convenience implementation:
|
|
||||||
impl<T: TaskState + ?Sized> TaskState for std::sync::Arc<T> {
|
|
||||||
fn check_abort(&self) -> Result<(), Error> {
|
|
||||||
<T as TaskState>::check_abort(&*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
|
|
||||||
<T as TaskState>::log(&*self, level, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_error {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_warn {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_log {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_debug {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_trace {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
@ -14,7 +14,7 @@ nix = "0.19.1"
|
|||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
tokio = { version = "1.6", features = [] }
|
tokio = { version = "1.6", features = [] }
|
||||||
|
|
||||||
proxmox = "0.13.3"
|
proxmox-time = "1"
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
|
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
@ -1,29 +1,29 @@
|
|||||||
//! Map a raw data reader as a loop device via FUSE
|
//! Map a raw data reader as a loop device via FUSE
|
||||||
|
|
||||||
use anyhow::{Error, format_err, bail};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::fs::{File, remove_file, read_to_string, OpenOptions};
|
|
||||||
use std::io::SeekFrom;
|
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::fs::{read_to_string, remove_file, File, OpenOptions};
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::io::SeekFrom;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use nix::unistd::Pid;
|
|
||||||
use nix::sys::signal::{self, Signal};
|
use nix::sys::signal::{self, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
use tokio::io::{AsyncRead, AsyncSeek, AsyncReadExt, AsyncSeekExt};
|
use futures::channel::mpsc::{Receiver, Sender};
|
||||||
use futures::stream::{StreamExt, TryStreamExt};
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
use futures::channel::mpsc::{Sender, Receiver};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
|
||||||
use proxmox::tools::time;
|
|
||||||
use proxmox_fuse::{*, requests::FuseRequest};
|
|
||||||
use super::loopdev;
|
use super::loopdev;
|
||||||
|
use proxmox_fuse::{requests::FuseRequest, *};
|
||||||
|
use proxmox_time::epoch_i64;
|
||||||
|
|
||||||
const RUN_DIR: &str = "/run/pbs-loopdev";
|
const RUN_DIR: &str = "/run/pbs-loopdev";
|
||||||
|
|
||||||
const_regex! {
|
lazy_static::lazy_static! {
|
||||||
pub LOOPDEV_REGEX = r"^loop\d+$";
|
static ref LOOPDEV_REGEX: Regex = Regex::new(r"^loop\d+$").unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents an ongoing FUSE-session that has been mapped onto a loop device.
|
/// Represents an ongoing FUSE-session that has been mapped onto a loop device.
|
||||||
@ -40,12 +40,14 @@ pub struct FuseLoopSession<R: AsyncRead + AsyncSeek + Unpin> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
||||||
|
|
||||||
/// Prepare for mapping the given reader as a block device node at
|
/// Prepare for mapping the given reader as a block device node at
|
||||||
/// /dev/loopN. Creates a temporary file for FUSE and a PID file for unmap.
|
/// /dev/loopN. Creates a temporary file for FUSE and a PID file for unmap.
|
||||||
pub async fn map_loop<P: AsRef<str>>(size: u64, mut reader: R, name: P, options: &OsStr)
|
pub async fn map_loop<P: AsRef<str>>(
|
||||||
-> Result<Self, Error>
|
size: u64,
|
||||||
{
|
mut reader: R,
|
||||||
|
name: P,
|
||||||
|
options: &OsStr,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
// attempt a single read to check if the reader is configured correctly
|
// attempt a single read to check if the reader is configured correctly
|
||||||
let _ = reader.read_u8().await?;
|
let _ = reader.read_u8().await?;
|
||||||
|
|
||||||
@ -61,14 +63,14 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
cleanup_unused_run_files(Some(name.as_ref().to_owned()));
|
cleanup_unused_run_files(Some(name.as_ref().to_owned()));
|
||||||
|
|
||||||
match OpenOptions::new().write(true).create_new(true).open(&path) {
|
match OpenOptions::new().write(true).create_new(true).open(&path) {
|
||||||
Ok(_) => { /* file created, continue on */ },
|
Ok(_) => { /* file created, continue on */ }
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() == std::io::ErrorKind::AlreadyExists {
|
if e.kind() == std::io::ErrorKind::AlreadyExists {
|
||||||
bail!("the given archive is already mapped, cannot map twice");
|
bail!("the given archive is already mapped, cannot map twice");
|
||||||
} else {
|
} else {
|
||||||
bail!("error while creating backing file ({:?}) - {}", &path, e);
|
bail!("error while creating backing file ({:?}) - {}", &path, e);
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let session = Fuse::builder("pbs-block-dev")?
|
let session = Fuse::builder("pbs-block-dev")?
|
||||||
@ -77,9 +79,8 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
.build()?
|
.build()?
|
||||||
.mount(&path)?;
|
.mount(&path)?;
|
||||||
|
|
||||||
let loopdev_path = loopdev::get_or_create_free_dev().map_err(|err| {
|
let loopdev_path = loopdev::get_or_create_free_dev()
|
||||||
format_err!("loop-control GET_FREE failed - {}", err)
|
.map_err(|err| format_err!("loop-control GET_FREE failed - {}", err))?;
|
||||||
})?;
|
|
||||||
|
|
||||||
// write pidfile so unmap can later send us a signal to exit
|
// write pidfile so unmap can later send us a signal to exit
|
||||||
Self::write_pidfile(&pid_path)?;
|
Self::write_pidfile(&pid_path)?;
|
||||||
@ -111,7 +112,6 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
mut startup_chan: Sender<Result<(), Error>>,
|
mut startup_chan: Sender<Result<(), Error>>,
|
||||||
abort_chan: Receiver<()>,
|
abort_chan: Receiver<()>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
if self.session.is_none() {
|
if self.session.is_none() {
|
||||||
panic!("internal error: fuse_loop::main called before ::map_loop");
|
panic!("internal error: fuse_loop::main called before ::map_loop");
|
||||||
}
|
}
|
||||||
@ -121,7 +121,10 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
let (loopdev_path, fuse_path) = (self.loopdev_path.clone(), self.fuse_path.clone());
|
let (loopdev_path, fuse_path) = (self.loopdev_path.clone(), self.fuse_path.clone());
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
if let Err(err) = loopdev::assign(loopdev_path, fuse_path) {
|
if let Err(err) = loopdev::assign(loopdev_path, fuse_path) {
|
||||||
let _ = startup_chan.try_send(Err(format_err!("error while assigning loop device - {}", err)));
|
let _ = startup_chan.try_send(Err(format_err!(
|
||||||
|
"error while assigning loop device - {}",
|
||||||
|
err
|
||||||
|
)));
|
||||||
} else {
|
} else {
|
||||||
// device is assigned successfully, which means not only is the
|
// device is assigned successfully, which means not only is the
|
||||||
// loopdev ready, but FUSE is also okay, since the assignment
|
// loopdev ready, but FUSE is also okay, since the assignment
|
||||||
@ -130,16 +133,17 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let (loopdev_path, fuse_path, pid_path) =
|
let (loopdev_path, fuse_path, pid_path) = (
|
||||||
(self.loopdev_path.clone(), self.fuse_path.clone(), self.pid_path.clone());
|
self.loopdev_path.clone(),
|
||||||
|
self.fuse_path.clone(),
|
||||||
|
self.pid_path.clone(),
|
||||||
|
);
|
||||||
let cleanup = |session: futures::stream::Fuse<Fuse>| {
|
let cleanup = |session: futures::stream::Fuse<Fuse>| {
|
||||||
// only warn for errors on cleanup, if these fail nothing is lost
|
// only warn for errors on cleanup, if these fail nothing is lost
|
||||||
if let Err(err) = loopdev::unassign(&loopdev_path) {
|
if let Err(err) = loopdev::unassign(&loopdev_path) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"cleanup: warning: could not unassign file {} from loop device {} - {}",
|
"cleanup: warning: could not unassign file {} from loop device {} - {}",
|
||||||
&fuse_path,
|
&fuse_path, &loopdev_path, err,
|
||||||
&loopdev_path,
|
|
||||||
err,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,21 +153,19 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
if let Err(err) = remove_file(&fuse_path) {
|
if let Err(err) = remove_file(&fuse_path) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"cleanup: warning: could not remove temporary file {} - {}",
|
"cleanup: warning: could not remove temporary file {} - {}",
|
||||||
&fuse_path,
|
&fuse_path, err,
|
||||||
err,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if let Err(err) = remove_file(&pid_path) {
|
if let Err(err) = remove_file(&pid_path) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"cleanup: warning: could not remove PID file {} - {}",
|
"cleanup: warning: could not remove PID file {} - {}",
|
||||||
&pid_path,
|
&pid_path, err,
|
||||||
err,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select!{
|
tokio::select! {
|
||||||
_ = abort_chan.next() => {
|
_ = abort_chan.next() => {
|
||||||
// aborted, do cleanup and exit
|
// aborted, do cleanup and exit
|
||||||
break;
|
break;
|
||||||
@ -176,7 +178,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
req.reply(&entry)
|
req.reply(&entry)
|
||||||
},
|
},
|
||||||
Some(Request::Getattr(req)) => {
|
Some(Request::Getattr(req)) => {
|
||||||
req.reply(&self.stat, std::f64::MAX)
|
req.reply(&self.stat, f64::MAX)
|
||||||
},
|
},
|
||||||
Some(Request::Read(req)) => {
|
Some(Request::Read(req)) => {
|
||||||
match self.reader.seek(SeekFrom::Start(req.offset)).await {
|
match self.reader.seek(SeekFrom::Start(req.offset)).await {
|
||||||
@ -227,8 +229,8 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
|||||||
pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
||||||
if let Ok(maps) = find_all_mappings() {
|
if let Ok(maps) = find_all_mappings() {
|
||||||
for (name, loopdev) in maps {
|
for (name, loopdev) in maps {
|
||||||
if loopdev.is_none() &&
|
if loopdev.is_none()
|
||||||
(filter_name.is_none() || &name == filter_name.as_ref().unwrap())
|
&& (filter_name.is_none() || &name == filter_name.as_ref().unwrap())
|
||||||
{
|
{
|
||||||
let mut path = PathBuf::from(RUN_DIR);
|
let mut path = PathBuf::from(RUN_DIR);
|
||||||
path.push(&name);
|
path.push(&name);
|
||||||
@ -254,10 +256,17 @@ pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_backing_file(loopdev: &str) -> Result<String, Error> {
|
fn get_backing_file(loopdev: &str) -> Result<String, Error> {
|
||||||
let num = loopdev.split_at(9).1.parse::<u8>().map_err(|err|
|
let num = loopdev.split_at(9).1.parse::<u8>().map_err(|err| {
|
||||||
format_err!("malformed loopdev path, does not end with valid number - {}", err))?;
|
format_err!(
|
||||||
|
"malformed loopdev path, does not end with valid number - {}",
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
let block_path = PathBuf::from(format!("/sys/devices/virtual/block/loop{}/loop/backing_file", num));
|
let block_path = PathBuf::from(format!(
|
||||||
|
"/sys/devices/virtual/block/loop{}/loop/backing_file",
|
||||||
|
num
|
||||||
|
));
|
||||||
let backing_file = read_to_string(block_path).map_err(|err| {
|
let backing_file = read_to_string(block_path).map_err(|err| {
|
||||||
if err.kind() == std::io::ErrorKind::NotFound {
|
if err.kind() == std::io::ErrorKind::NotFound {
|
||||||
format_err!("nothing mapped to {}", loopdev)
|
format_err!("nothing mapped to {}", loopdev)
|
||||||
@ -281,7 +290,7 @@ fn get_backing_file(loopdev: &str) -> Result<String, Error> {
|
|||||||
|
|
||||||
// call in broken state: we found the mapping, but the client is already dead,
|
// call in broken state: we found the mapping, but the client is already dead,
|
||||||
// only thing to do is clean up what we can
|
// only thing to do is clean up what we can
|
||||||
fn emerg_cleanup (loopdev: Option<&str>, mut backing_file: PathBuf) {
|
fn emerg_cleanup(loopdev: Option<&str>, mut backing_file: PathBuf) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"warning: found mapping with dead process ({:?}), attempting cleanup",
|
"warning: found mapping with dead process ({:?}), attempting cleanup",
|
||||||
&backing_file
|
&backing_file
|
||||||
@ -312,35 +321,36 @@ fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(),
|
|||||||
}
|
}
|
||||||
format_err!("error reading pidfile {:?}: {}", &pid_path, err)
|
format_err!("error reading pidfile {:?}: {}", &pid_path, err)
|
||||||
})?;
|
})?;
|
||||||
let pid = pid_str.parse::<i32>().map_err(|err|
|
let pid = pid_str
|
||||||
format_err!("malformed PID ({}) in pidfile - {}", pid_str, err))?;
|
.parse::<i32>()
|
||||||
|
.map_err(|err| format_err!("malformed PID ({}) in pidfile - {}", pid_str, err))?;
|
||||||
|
|
||||||
let pid = Pid::from_raw(pid);
|
let pid = Pid::from_raw(pid);
|
||||||
|
|
||||||
// send SIGINT to trigger cleanup and exit in target process
|
// send SIGINT to trigger cleanup and exit in target process
|
||||||
match signal::kill(pid, Signal::SIGINT) {
|
match signal::kill(pid, Signal::SIGINT) {
|
||||||
Ok(()) => {},
|
Ok(()) => {}
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||||
emerg_cleanup(loopdev, backing_file.to_owned());
|
emerg_cleanup(loopdev, backing_file.to_owned());
|
||||||
return Ok(());
|
return Ok(());
|
||||||
},
|
}
|
||||||
Err(e) => return Err(e.into()),
|
Err(e) => return Err(e.into()),
|
||||||
}
|
}
|
||||||
|
|
||||||
// block until unmap is complete or timeout
|
// block until unmap is complete or timeout
|
||||||
let start = time::epoch_i64();
|
let start = epoch_i64();
|
||||||
loop {
|
loop {
|
||||||
match signal::kill(pid, None) {
|
match signal::kill(pid, None) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
// 10 second timeout, then assume failure
|
// 10 second timeout, then assume failure
|
||||||
if (time::epoch_i64() - start) > 10 {
|
if (epoch_i64() - start) > 10 {
|
||||||
return Err(format_err!("timed out waiting for PID '{}' to exit", &pid));
|
return Err(format_err!("timed out waiting for PID '{}' to exit", &pid));
|
||||||
}
|
}
|
||||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
},
|
}
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||||
break;
|
break;
|
||||||
},
|
}
|
||||||
Err(e) => return Err(e.into()),
|
Err(e) => return Err(e.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -360,13 +370,13 @@ pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String
|
|||||||
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
|
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
|
||||||
if let Ok(file) = get_backing_file(&loopdev) {
|
if let Ok(file) = get_backing_file(&loopdev) {
|
||||||
// insert filename only, strip RUN_DIR/
|
// insert filename only, strip RUN_DIR/
|
||||||
loopmap.insert(file[RUN_DIR.len()+1..].to_owned(), loopdev);
|
loopmap.insert(file[RUN_DIR.len() + 1..].to_owned(), loopdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(pbs_tools::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?
|
Ok(
|
||||||
.filter_map(move |ent| {
|
pbs_tools::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?.filter_map(move |ent| {
|
||||||
match ent {
|
match ent {
|
||||||
Ok(ent) => {
|
Ok(ent) => {
|
||||||
let file = ent.file_name().to_string_lossy();
|
let file = ent.file_name().to_string_lossy();
|
||||||
@ -376,10 +386,11 @@ pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String
|
|||||||
let loopdev = loopmap.get(file.as_ref()).map(String::to_owned);
|
let loopdev = loopmap.get(file.as_ref()).map(String::to_owned);
|
||||||
Some((file.into_owned(), loopdev))
|
Some((file.into_owned(), loopdev))
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
Err(_) => None,
|
Err(_) => None,
|
||||||
}
|
}
|
||||||
}))
|
}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try and unmap a running proxmox-backup-client instance from the given
|
/// Try and unmap a running proxmox-backup-client instance from the given
|
||||||
|
@ -18,7 +18,16 @@ bitflags = "1.2.1"
|
|||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
|
|
||||||
proxmox = { version = "0.13.3", default-features = false, features = [] }
|
proxmox = "0.14.0"
|
||||||
|
proxmox-io = "1"
|
||||||
|
proxmox-lang = "1"
|
||||||
|
# api-macro is only used by the binaries, so maybe we should split them out
|
||||||
|
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||||
|
proxmox-time = "1"
|
||||||
|
proxmox-uuid = "1"
|
||||||
|
|
||||||
|
# router::cli is only used by binaries, so maybe we should split them out
|
||||||
|
proxmox-router = "1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
@ -18,19 +18,9 @@ use std::convert::TryInto;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
|
||||||
api::{
|
use proxmox_router::cli::*;
|
||||||
api,
|
use proxmox_router::RpcEnvironment;
|
||||||
cli::*,
|
|
||||||
schema::{
|
|
||||||
Schema,
|
|
||||||
IntegerSchema,
|
|
||||||
StringSchema,
|
|
||||||
ArraySchema,
|
|
||||||
},
|
|
||||||
RpcEnvironment,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
LTO_DRIVE_PATH_SCHEMA, DRIVE_NAME_SCHEMA, LtoTapeDrive,
|
LTO_DRIVE_PATH_SCHEMA, DRIVE_NAME_SCHEMA, LtoTapeDrive,
|
||||||
|
@ -17,13 +17,9 @@ use std::fs::File;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox_schema::api;
|
||||||
api::{
|
use proxmox_router::cli::*;
|
||||||
api,
|
use proxmox_router::RpcEnvironment;
|
||||||
cli::*,
|
|
||||||
RpcEnvironment,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use pbs_config::drive::complete_changer_name;
|
use pbs_config::drive::complete_changer_name;
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
@ -93,7 +89,7 @@ fn inquiry(
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let result: Result<_, Error> = proxmox::try_block!({
|
let result: Result<_, Error> = proxmox_lang::try_block!({
|
||||||
let mut file = get_changer_handle(¶m)?;
|
let mut file = get_changer_handle(¶m)?;
|
||||||
let info = scsi_inquiry(&mut file)?;
|
let info = scsi_inquiry(&mut file)?;
|
||||||
Ok(info)
|
Ok(info)
|
||||||
@ -281,7 +277,7 @@ fn status(
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let result: Result<_, Error> = proxmox::try_block!({
|
let result: Result<_, Error> = proxmox_lang::try_block!({
|
||||||
let mut file = get_changer_handle(¶m)?;
|
let mut file = get_changer_handle(¶m)?;
|
||||||
let status = sg_pt_changer::read_element_status(&mut file)?;
|
let status = sg_pt_changer::read_element_status(&mut file)?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user