Compare commits

..

3 Commits

Author SHA1 Message Date
edc876c58e bump version to 2.0.12-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-10-19 10:48:54 +02:00
ac383beb0a bump d/control
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-10-19 10:45:23 +02:00
716753f1a8 pbs-tools: drop borrow module
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-10-19 10:45:23 +02:00
241 changed files with 8041 additions and 7226 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "2.1.2"
version = "2.0.12"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -25,8 +25,11 @@ members = [
"pbs-config",
"pbs-datastore",
"pbs-fuse-loop",
"pbs-runtime",
"proxmox-rest-server",
"proxmox-rrd-api-types",
"proxmox-rrd",
"proxmox-systemd",
"pbs-tape",
"pbs-tools",
@ -43,10 +46,9 @@ path = "src/lib.rs"
[dependencies]
apt-pkg-native = "0.3.2"
base64 = "0.13"
base64 = "0.12"
bitflags = "1.2.1"
bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.7"
@ -80,10 +82,11 @@ tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0"
udev = "0.4"
udev = ">= 0.3, <0.5"
url = "2.1"
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
walkdir = "2"
webauthn-rs = "0.2.5"
xdg = "2.2"
nom = "5.1"
crossbeam-channel = "0.5"
@ -94,31 +97,31 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox = { version = "0.14.0", features = [ "sortable-macro" ] }
proxmox-http = { version = "0.5.0", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1"
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-router = { version = "1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-section-config = "1"
proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
proxmox-tfa = { version = "1", features = [ "u2f" ] }
proxmox-time = "1"
proxmox-uuid = "1"
proxmox-shared-memory = "0.1.1"
proxmox-sys = "0.1.2"
proxmox-acme-rs = "0.3"
proxmox-acme-rs = "0.2.1"
proxmox-apt = "0.8.0"
proxmox-async = "0.2"
proxmox-openid = "0.9.0"
proxmox-openid = "0.8.0"
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
pbs-datastore = { path = "pbs-datastore" }
pbs-runtime = { path = "pbs-runtime" }
proxmox-rest-server = { path = "proxmox-rest-server" }
proxmox-rrd-api-types = { path = "proxmox-rrd-api-types" }
proxmox-rrd = { path = "proxmox-rrd" }
proxmox-systemd = { path = "proxmox-systemd" }
pbs-tools = { path = "pbs-tools" }
pbs-tape = { path = "pbs-tape" }
@ -127,8 +130,6 @@ pbs-tape = { path = "pbs-tape" }
[patch.crates-io]
#proxmox = { path = "../proxmox/proxmox" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#pxar = { path = "../pxar" }
[features]

View File

@ -38,8 +38,11 @@ SUBCRATES := \
pbs-config \
pbs-datastore \
pbs-fuse-loop \
pbs-runtime \
proxmox-rest-server \
proxmox-rrd-api-types \
proxmox-rrd \
proxmox-systemd \
pbs-tape \
pbs-tools \
proxmox-backup-banner \

117
debian/changelog vendored
View File

@ -1,120 +1,3 @@
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
* docs: backup-client: fix wrong reference
* docs: remotes: note that protected flags will not be synced
* sync job: correctly apply rate limit
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Nov 2021 13:56:15 +0100
rust-proxmox-backup (2.1.1-2) bullseye; urgency=medium
* docs: update and add traffic control related screenshots
* docs: mention traffic control (bandwidth limits) for sync jobs
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 16:07:39 +0100
rust-proxmox-backup (2.1.1-1) bullseye; urgency=medium
* fix proxmox-backup-manager sync-job list
* ui, api: sync-job: allow one to configure a rate limit
* api: snapshot list: set default for 'protected' flag
* ui: datastore content: rework rendering protection state
* docs: update traffic control docs (use HumanBytes)
* ui: traffic-control: include ipv6 in 'all' networks
* ui: traffic-control edit: add spaces between networks for more
readabillity
* tape: fix passing-through key-fingerprint
* avoid a bogus error regarding logrotate-path due to a reversed check
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 12:24:31 +0100
rust-proxmox-backup (2.1.0-1) bullseye; urgency=medium
* rest server: make successful-ticket auth log a debug one to avoid
syslog spam
* traffic-controls: add API/CLI to show current traffic
* docs: add traffic control section
* ui: use TFA widgets from widget toolkit
* sync: allow pulling groups selectively
* fix #3533: tape backup: filter groups according to config
* proxmox-tape: add missing notify-user option to backup command
* openid: allow arbitrary username-claims
* openid: support configuring the prompt, scopes and ACR values
* use human-byte for traffic-control rate-in/out and burst-in/out config
* ui: add traffic control view and editor
-- Proxmox Support Team <support@proxmox.com> Sat, 20 Nov 2021 22:44:07 +0100
rust-proxmox-backup (2.0.14-1) bullseye; urgency=medium
* fix directory permission problems
* add traffic control configuration config with API
* proxmox-backup-proxy: implement traffic control
* proxmox-backup-client: add rate/burst parameter to backup/restore CLI
* openid_login: vertify that firstname, lastname and email fits our
schema definitions
* docs: add info about protection flag to client docs
* fix #3602: ui: datastore/Content: add action to set protection status
* ui: add protected icon to snapshot (if they are protected)
* ui: PruneInputPanel: add keepReason 'protected' for protected backups
* proxmox-backup-client: add 'protected' commands
* acme: interpret no TOS as accepted
* acme: new_account: prevent replacing existing accounts
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Nov 2021 08:04:55 +0100
rust-proxmox-backup (2.0.13-1) bullseye; urgency=medium
* tape: simplify export_media_set for pool writer
* tape: improve export_media error message for not found tape
* rest-server: use hashmap for parameter errors
* proxmox-rrd: use new file firmat with higher resolution
* proxmox-rrd: use a journal to reduce amount of bytes written
* use new fsync parameter to replace_file and atomic_open_or_create
* docs: langauge and formatting fixup
* docs: Update for new features/functionality
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 08:17:00 +0200
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
* proxmox-backup-proxy: clean up old tasks when their reference was rotated

48
debian/control vendored
View File

@ -8,10 +8,9 @@ Build-Depends: debhelper (>= 12),
libstd-rust-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-base64-0.13+default-dev,
librust-base64-0.12+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
@ -25,8 +24,8 @@ Build-Depends: debhelper (>= 12),
librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
librust-hyper-0.14+full-dev (>= 0.14.5-~~),
librust-hyper-0.14+default-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev,
@ -40,43 +39,30 @@ Build-Depends: debhelper (>= 12),
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-0.15+default-dev (>= 0.15.3-~~),
librust-proxmox-0.15+sortable-macro-dev (>= 0.15.3-~~),
librust-proxmox-0.15+tokio-dev (>= 0.15.3-~~),
librust-proxmox-acme-rs-0.3+default-dev,
librust-proxmox-0.14+sortable-macro-dev,
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-apt-0.8+default-dev,
librust-proxmox-async-0.2+default-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-http-0.5+client-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+default-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+http-helpers-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+websocket-dev (>= 0.5.4-~~),
librust-proxmox-io-1+default-dev,
librust-proxmox-http-0.5+client-dev,
librust-proxmox-http-0.5+default-dev ,
librust-proxmox-http-0.5+http-helpers-dev,
librust-proxmox-http-0.5+websocket-dev,
librust-proxmox-io-1+tokio-dev,
librust-proxmox-lang-1+default-dev,
librust-proxmox-openid-0.9+default-dev,
librust-proxmox-router-1+cli-dev (>= 1.1-~~),
librust-proxmox-router-1+default-dev (>= 1.1-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+default-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.0.1-~~),
librust-proxmox-openid-0.8+default-dev,
librust-proxmox-router-1+cli-dev (>= 1.1.0-~~),
librust-proxmox-schema-1+api-macro-dev,
librust-proxmox-section-config-1+default-dev,
librust-proxmox-shared-memory-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-sys-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-tfa-1+api-dev (>= 1.3-~~),
librust-proxmox-tfa-1+api-types-dev (>= 1.3-~~),
librust-proxmox-tfa-1+default-dev (>= 1.3-~~),
librust-proxmox-time-1+default-dev (>= 1.1-~~),
librust-proxmox-tfa-1+u2f-dev,
librust-proxmox-time-1+default-dev,
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-7+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev,
@ -92,7 +78,6 @@ Build-Depends: debhelper (>= 12),
librust-tokio-1+rt-dev (>= 1.6-~~),
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
librust-tokio-1+signal-dev (>= 1.6-~~),
librust-tokio-1+sync-dev (>= 1.6-~~),
librust-tokio-1+time-dev (>= 1.6-~~),
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev,
@ -100,9 +85,10 @@ Build-Depends: debhelper (>= 12),
librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev,
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.6+bindgen-dev,
librust-zstd-0.6+default-dev,
@ -150,7 +136,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 3.4-3),
proxmox-widget-toolkit (>= 3.3-2),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,

31
debian/postinst vendored
View File

@ -4,14 +4,6 @@ set -e
#DEBHELPER#
update_sync_job() {
job="$1"
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|| echo "Failed, please check sync.cfg manually!"
}
case "$1" in
configure)
# need to have user backup in the tape group
@ -40,29 +32,6 @@ case "$1" in
echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi
if dpkg --compare-versions "$2" 'lt' '7.1-1' && test -e /etc/proxmox-backup/sync.cfg; then
prev_job=""
# read from HERE doc because POSIX sh limitations
while read -r key value; do
if test "$key" = "sync:"; then
if test -n "$prev_job"; then
# previous job doesn't have an explicit value
update_sync_job "$prev_job"
fi
prev_job=$value
else
prev_job=""
fi
done <<EOF
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
EOF
if test -n "$prev_job"; then
# last job doesn't have an explicit value
update_sync_job "$prev_job"
fi
fi
fi
;;

View File

@ -654,25 +654,6 @@ shows the list of existing snapshots and what actions prune would take.
in the chunk-store. The chunk-store still contains the data blocks. To free
space you need to perform :ref:`client_garbage-collection`.
It is also possible to protect single snapshots from being pruned or deleted:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> true
This will set the protected flag on the snapshot and prevent pruning or manual
deletion of this snapshot untilt he flag is removed again with:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> false
When a group is with a protected snapshot is deleted, only the non-protected
ones are removed and the group will remain.
.. note:: This flag will not be synced when using pull or sync jobs. If you
want to protect a synced snapshot, you have to manually to this again on
the target backup server.
.. _client_garbage-collection:

View File

@ -1,10 +1,10 @@
Backup Protocol
===============
Proxmox Backup Server uses a REST-based API. While the management
interface uses normal HTTP, the actual backup and restore interface uses
Proxmox Backup Server uses a REST based API. While the management
interface use normal HTTP, the actual backup and restore interface use
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
standards, so the following section assumes that you are familiar with
standards, so the following section assumes that you are familiar on
how to use them.
@ -13,35 +13,35 @@ Backup Protocol API
To start a new backup, the API call ``GET /api2/json/backup`` needs to
be upgraded to a HTTP/2 connection using
``proxmox-backup-protocol-v1`` as the protocol name::
``proxmox-backup-protocol-v1`` as protocol name::
GET /api2/json/backup HTTP/1.1
UPGRADE: proxmox-backup-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code,
and you can then issue REST commands on the updated HTTP/2 connection.
The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on that updated HTTP/2 connection.
The backup protocol allows you to upload three different kind of files:
- Chunks and blobs (binary data)
- Fixed indexes (List of chunks with fixed size)
- Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (List of chunks with variable size)
- Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to upload such
The following section gives a short introduction how to upload such
files. Please use the `API Viewer <api-viewer/index.html>`_ for
details about the available REST commands.
details about available REST commands.
Upload Blobs
~~~~~~~~~~~~
Blobs are uploaded using ``POST /blob``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`).
The file name must end with ``.blob``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
The file name needs to end with ``.blob``, and is automatically added
to the backup manifest.
Upload Chunks
@ -56,41 +56,40 @@ encoded as :ref:`Data Blob <data-blob-format>`).
Upload Fixed Indexes
~~~~~~~~~~~~~~~~~~~~
Fixed indexes are used to store VM image data. The VM image is split
Fixed indexes are use to store VM image data. The VM image is split
into equally sized chunks, which are uploaded individually. The index
file simply contains a list of chunk digests.
file simply contains a list to chunk digests.
You create a fixed index with ``POST /fixed_index``. Then, upload
You create a fixed index with ``POST /fixed_index``. Then upload
chunks with ``POST /fixed_chunk``, and append them to the index with
``PUT /fixed_index``. When finished, you need to close the index using
``POST /fixed_close``.
The file name needs to end with ``.fidx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
to the backup manifest.
Upload Dynamic Indexes
~~~~~~~~~~~~~~~~~~~~~~
Dynamic indexes are used to store file archive data. The archive data
Dynamic indexes are use to store file archive data. The archive data
is split into dynamically sized chunks, which are uploaded
individually. The index file simply contains a list of chunk digests
individually. The index file simply contains a list to chunk digests
and offsets.
You can create a dynamically sized index with ``POST /dynamic_index``. Then,
You create a dynamic sized index with ``POST /dynamic_index``. Then
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
``PUT /dynamic_index``. When finished, you need to close the index using
``POST /dynamic_close``.
The filename needs to end with ``.didx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
The file name needs to end with ``.didx``, and is automatically added
to the backup manifest.
Finish Backup
~~~~~~~~~~~~~
Once you have uploaded all data, you need to call ``POST /finish``. This
commits all data and ends the backup protocol.
Once you have uploaded all data, you need to call ``POST
/finish``. This commits all data and ends the backup protocol.
Restore/Reader Protocol API
@ -103,39 +102,39 @@ be upgraded to a HTTP/2 connection using
GET /api2/json/reader HTTP/1.1
UPGRADE: proxmox-backup-reader-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code,
The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on that updated HTTP/2 connection.
The reader protocol allows you to download three different kinds of files:
The reader protocol allows you to download three different kind of files:
- Chunks and blobs (binary data)
- Fixed indexes (list of chunks with fixed size)
- Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (list of chunks with variable size)
- Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to download such
The following section gives a short introduction how to download such
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
the available REST commands.
available REST commands.
Download Blobs
~~~~~~~~~~~~~~
Blobs are downloaded using ``GET /download``. The HTTP body contains the
Downloading blobs is done using ``GET /download``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Download Chunks
~~~~~~~~~~~~~~~
Chunks are downloaded using ``GET /chunk``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`).
Download Index Files
~~~~~~~~~~~~~~~~~~~~
Index files are downloaded using ``GET /download``. The HTTP body
Downloading index files is done using ``GET /download``. The HTTP body
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
or :ref:`Dynamic Index <dynamic-index-format>`.

View File

@ -37,7 +37,7 @@ Each field can contain multiple values in the following formats:
* and a combination of the above: e.g., 01,05..10,12/02
* or a `*` for every possible value: e.g., \*:00
There are some special values that have a specific meaning:
There are some special values that have specific meaning:
================================= ==============================
Value Syntax
@ -81,19 +81,19 @@ Not all features of systemd calendar events are implemented:
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
a specific point in time
* no timezone: all schedules use the timezone of the server
* no timezone: all schedules use the set timezone on the server
* no sub-second resolution
* no reverse day syntax (e.g. 2020-03~01)
* no repetition of ranges (e.g. 1..10/2)
Notes on Scheduling
Notes on scheduling
-------------------
In `Proxmox Backup`_, scheduling for most tasks is done in the
In `Proxmox Backup`_ scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
every minute, to see if any are due. This means that even though
if they are due every minute. This means that even if
`calendar events` can contain seconds, it will only be checked
once per minute.
once a minute.
Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server.

View File

@ -10,7 +10,7 @@ Command Syntax
Catalog Shell Commands
~~~~~~~~~~~~~~~~~~~~~~
The following commands are available in an interactive restore shell:
Those command are available when you start an interactive restore shell:
.. code-block:: console

View File

@ -2,13 +2,13 @@ This file contains the access control list for the Proxmox Backup
Server API.
Each line starts with ``acl:``, followed by 4 additional values
separated by colon.
separated by collon.
:propagate: Propagate permissions down the hierarchy
:propagate: Propagate permissions down the hierachrchy
:path: The object path
:User/Token: List of users and tokens
:User/Token: List of users and token
:Role: List of assigned roles

View File

@ -1,5 +1,5 @@
This file contains a list of datastore configuration sections. Each
section starts with the header ``datastore: <name>``, followed by the
The file contains a list of datastore configuration sections. Each
section starts with a header ``datastore: <name>``, followed by the
datastore configuration options.
::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``pool: <name>``, followed by the
Each entry starts with a header ``pool: <name>``, followed by the
media pool configuration options.
::

View File

@ -1,6 +1,6 @@
This file contains information used to access remote servers.
Each entry starts with the header ``remote: <name>``, followed by the
Each entry starts with a header ``remote: <name>``, followed by the
remote configuration options.
::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``sync: <name>``, followed by the
Each entry starts with a header ``sync: <name>``, followed by the
job configuration options.
::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``backup: <name>``, followed by the
Each entry starts with a header ``backup: <name>``, followed by the
job configuration options.
::

View File

@ -1,7 +1,7 @@
Each LTO drive configuration section starts with the header ``lto: <name>``,
Each LTO drive configuration section starts with a header ``lto: <name>``,
followed by the drive configuration options.
Tape changer configurations start with the header ``changer: <name>``,
Tape changer configurations starts with ``changer: <name>``,
followed by the changer configuration options.
::
@ -18,5 +18,5 @@ followed by the changer configuration options.
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
commands to manipulate this file.
.. NOTE:: The ``virtual:`` drive type is experimental and should only be used
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
for debugging.

View File

@ -1,9 +1,9 @@
This file contains the list of API users and API tokens.
Each user configuration section starts with the header ``user: <name>``,
Each user configuration section starts with a header ``user: <name>``,
followed by the user configuration options.
API token configuration starts with the header ``token:
API token configuration starts with a header ``token:
<userid!token_name>``, followed by the token configuration. The data
used to authenticate tokens is stored in a separate file
(``token.shadow``).

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``verification: <name>``, followed by the
Each entry starts with a header ``verification: <name>``, followed by the
job configuration options.
::

View File

@ -1,7 +1,7 @@
Configuration Files
===================
All Proxmox Backup Server configuration files reside in the directory
All Proxmox Backup Server configuration files resides inside directory
``/etc/proxmox-backup/``.

View File

@ -69,6 +69,6 @@ be able to read the data.
Is the backup incremental/deduplicated?
---------------------------------------
With Proxmox Backup Server, backups are sent incrementally to the server, and
data is then deduplicated on the server. This minimizes both the storage
consumed and the impact on the network.
With Proxmox Backup Server, backups are sent incremental and data is
deduplicated on the server.
This minimizes both the storage consumed and the network impact.

View File

@ -14,8 +14,7 @@ Proxmox File Archive Format (``.pxar``)
Data Blob Format (``.blob``)
----------------------------
The data blob format is used to store small binary data. The magic number
decides the exact format:
The data blob format is used to store small binary data. The magic number decides the exact format:
.. list-table::
:widths: auto
@ -33,8 +32,7 @@ decides the exact format:
- encrypted
- compressed
The compression algorithm used is ``zstd``. The encryption cipher is
``AES_256_GCM``.
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
Unencrypted blobs use the following format:
@ -45,9 +43,9 @@ Unencrypted blobs use the following format:
* - ``CRC32: [u8; 4]``
* - ``Data: (max 16MiB)``
Encrypted blobs additionally contain a 16 byte initialization vector (IV),
followed by a 16 byte authenticated encryption (AE) tag, followed by the
encrypted data:
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
byte Authenticated Encyryption (AE) tag, followed by the encrypted
data:
.. list-table::
@ -74,19 +72,19 @@ All numbers are stored as little-endian.
* - ``ctime: i64``,
- Creation Time (epoch)
* - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(digest1||digest2||...)``
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
* - ``size: u64``,
- Image size
* - ``chunk_size: u64``,
- Chunk size
* - ``reserved: [u8; 4016]``,
- Overall header size is one page (4096 bytes)
- overall header size is one page (4096 bytes)
* - ``digest1: [u8; 32]``
- First chunk digest
- first chunk digest
* - ``digest2: [u8; 32]``
- Second chunk digest
- next chunk
* - ...
- Next chunk digest ...
- next chunk ...
.. _dynamic-index-format:
@ -105,16 +103,16 @@ All numbers are stored as little-endian.
* - ``ctime: i64``,
- Creation Time (epoch)
* - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
* - ``reserved: [u8; 4032]``,
- Overall header size is one page (4096 bytes)
* - ``offset1: u64``
- End of first chunk
* - ``digest1: [u8; 32]``
- First chunk digest
- first chunk digest
* - ``offset2: u64``
- End of second chunk
* - ``digest2: [u8; 32]``
- Second chunk digest
- second chunk digest
* - ...
- Next chunk offset/digest
- next chunk offset/digest

View File

@ -11,7 +11,7 @@ Glossary
`Container`_
A container is an isolated user space. Programs run directly on
the host's kernel, but with limited access to the host's resources.
the host's kernel, but with limited access to the host resources.
Datastore
@ -23,19 +23,19 @@ Glossary
Rust is a new, fast and memory-efficient system programming
language. It has no runtime or garbage collector. Rusts rich type
system and ownership model guarantee memory-safety and
thread-safety. This can eliminate many classes of bugs
thread-safety. I can eliminate many classes of bugs
at compile-time.
`Sphinx`_
Is a tool that makes it easy to create intelligent and nicely formatted
documentation. It was originally created for the documentation of the
Python programming language. It has excellent facilities for the
Is a tool that makes it easy to create intelligent and
beautiful documentation. It was originally created for the
documentation of the Python programming language. It has excellent facilities for the
documentation of software projects in a range of languages.
`reStructuredText`_
Is an easy-to-read, what-you-see-is-what-you-get, plaintext
Is an easy-to-read, what-you-see-is-what-you-get plaintext
markup syntax and parser system.
`FUSE`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

View File

@ -85,43 +85,12 @@ To set up sync jobs, the configuring user needs the following permissions:
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or is set to something other than the configuring user,
``Datastore.Modify`` is required as well.
If the ``group-filter`` option is set, only backup groups matching at least one
of the specified criteria are synced. The available criteria are:
* backup type, for example to only sync groups of the `ct` (Container) type:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter type:ct
* full group identifier
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100
* regular expression matched against the full group identifier
.. todo:: add example for regex
The same filter is applied to local groups for handling of the
``remove-vanished`` option.
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Bandwidth Limit
^^^^^^^^^^^^^^^
Syncing datastores to an archive can produce lots of traffic and impact other
users of the network. So, to avoid network or storage congetsion you can limit
the bandwith of the sync job by setting the ``rate-in`` option either in the
web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.

View File

@ -89,5 +89,3 @@ You can also configure DNS settings, from the **DNS** section
of **Configuration** or by using the ``dns`` subcommand of
``proxmox-backup-manager``.
.. include:: traffic-control.rst

View File

@ -1,5 +1,5 @@
Most commands that produce output support the ``--output-format``
parameter. This accepts the following values:
Most commands producing output supports the ``--output-format``
parameter. It accepts the following values:
:``text``: Text format (default). Structured data is rendered as a table.

View File

@ -1,101 +0,0 @@
.. _sysadmin_traffic_control:
Traffic Control
---------------
.. image:: images/screenshots/pbs-gui-traffic-control-add.png
:align: right
:alt: Add a traffic control limit
Creating and restoring backups can produce lots of traffic and impact other
users of the network or shared storages.
Proxmox Backup Server allows to limit network traffic for clients within
specified networks using a token bucket filter (TBF).
This allows you to avoid network congestion or to prioritize traffic from
certain hosts.
You can manage the traffic controls either over the web-interface or using the
``traffic-control`` commandos of the ``proxmox-backup-manager`` command-line
tool.
.. note:: Sync jobs on the server are not affected by its rate-in limits. If
you want to limit the incomming traffic that a pull-based sync job
generates, you need to setup a job-specific rate-in limit. See
:ref:`syncjobs`.
The following command adds a traffic control rule to limit all IPv4 clients
(network ``0.0.0.0/0``) to 100 MB/s:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule0 --network 0.0.0.0/0 \
--rate-in 100MB --rate-out 100MB \
--comment "Default rate limit (100MB/s) for all clients"
.. note:: To limit both IPv4 and IPv6 network spaces you need to pass two
network parameters ``::/0`` and ``0.0.0.0/0``.
It is possible to restrict rules to certain time frames, for example the
company office hours:
.. tip:: You can use SI (base 10: KB, MB, ...) or IEC (base 2: KiB, MiB, ...)
units.
.. code-block:: console
# proxmox-backup-manager traffic-control update rule0 \
--timeframe "mon..fri 8-12" \
--timeframe "mon..fri 14:30-18"
If there are more rules, the server uses the rule with the smaller network. For
example, we can overwrite the setting for our private network (and the server
itself) with:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule1 \
--network 192.168.2.0/24 \
--network 127.0.0.0/8 \
--rate-in 20GB --rate-out 20GB \
--comment "Use 20GB/s for the local network"
.. note:: The behavior is undefined if there are several rules for the same network.
If there are multiple rules that match the same network all of them will be
applied, which means that the smallest one wins, as it's bucket fills up the
fastest.
To list the current rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control list
┌───────┬─────────────┬─────────────┬─────────────────────────┬────────────...─┐
│ name │ rate-in │ rate-out │ network │ timeframe ... │
╞═══════╪═════════════╪═════════════╪═════════════════════════╪════════════...═╡
│ rule0 │ 100 MB │ 100 MB │ ["0.0.0.0/0"] │ ["mon..fri ... │
├───────┼─────────────┼─────────────┼─────────────────────────┼────────────...─┤
│ rule1 │ 20 GB │ 20 GB │ ["192.168.2.0/24", ...] │ ... │
└───────┴─────────────┴─────────────┴─────────────────────────┴────────────...─┘
Rules can also be removed:
.. code-block:: console
# proxmox-backup-manager traffic-control remove rule1
To show the state (current data rate) of all configured rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control traffic
┌───────┬─────────────┬──────────────┐
│ name │ cur-rate-in │ cur-rate-out │
╞═══════╪═════════════╪══════════════╡
│ rule0 │ 0 B │ 0 B │
├───────┼─────────────┼──────────────┤
│ rule1 │ 1.161 GiB │ 19.146 KiB │
└───────┴─────────────┴──────────────┘

View File

@ -59,7 +59,7 @@ async fn run() -> Result<(), Error> {
}
fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) {
if let Err(err) = pbs_runtime::main(run()) {
eprintln!("ERROR: {}", err);
}
println!("DONE");

View File

@ -69,7 +69,7 @@ fn send_request(
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
pbs_runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -69,7 +69,7 @@ fn send_request(
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
pbs_runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -9,7 +9,7 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir;
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
pbs_runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream};
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
pbs_runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -13,7 +13,7 @@ use pbs_client::ChunkStream;
// Note: I can currently get about 830MB/s
fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) {
if let Err(err) = pbs_runtime::main(run()) {
panic!("ERROR: {}", err);
}
}

View File

@ -27,7 +27,7 @@ async fn upload_speed() -> Result<f64, Error> {
}
fn main() {
match proxmox_async::runtime::main(upload_speed()) {
match pbs_runtime::main(upload_speed()) {
Ok(mbs) => {
println!("average upload speed: {} MB/s", mbs);
}

View File

@ -7,7 +7,6 @@ description = "general API type helpers for PBS"
[dependencies]
anyhow = "1.0"
hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
@ -15,8 +14,12 @@ openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] }
proxmox = "0.15.3"
proxmox = "0.14.0"
proxmox-lang = "1.0.0"
proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] }
proxmox-time = "1.1"
proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] }
proxmox-time = "1.0.0"
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" }
proxmox-systemd = { path = "../proxmox-systemd" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -5,6 +5,8 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::api;
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
@ -33,9 +35,6 @@ impl Fingerprint {
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
}
/// Display as short key ID
@ -56,43 +55,3 @@ impl std::str::FromStr for Fingerprint {
}
}
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>().join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Serializer, Deserializer};
pub fn serialize<S>(
bytes: &[u8; 32],
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(
deserializer: D,
) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

View File

@ -40,7 +40,6 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive
.schema();
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
.format(&BACKUP_ID_FORMAT)
@ -58,10 +57,6 @@ pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epo
.minimum(1_547_797_308)
.schema();
pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
.format(&BACKUP_GROUP_FORMAT)
.schema();
pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
@ -395,9 +390,6 @@ pub struct SnapshotListItem {
/// The owner of the snapshots group
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
/// Protection from prunes
#[serde(default)]
pub protected: bool,
}
#[api(

View File

@ -1,341 +0,0 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
/// Size units for byte sizes
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum SizeUnit {
Byte,
// SI (base 10)
KByte,
MByte,
GByte,
TByte,
PByte,
// IEC (base 2)
Kibi,
Mebi,
Gibi,
Tebi,
Pebi,
}
impl SizeUnit {
/// Returns the scaling factor
pub fn factor(&self) -> f64 {
match self {
SizeUnit::Byte => 1.0,
// SI (base 10)
SizeUnit::KByte => 1_000.0,
SizeUnit::MByte => 1_000_000.0,
SizeUnit::GByte => 1_000_000_000.0,
SizeUnit::TByte => 1_000_000_000_000.0,
SizeUnit::PByte => 1_000_000_000_000_000.0,
// IEC (base 2)
SizeUnit::Kibi => 1024.0,
SizeUnit::Mebi => 1024.0 * 1024.0,
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
}
}
/// gets the biggest possible unit still having a value greater zero before the decimal point
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
if binary {
let bits = 64 - (size as u64).leading_zeros();
match bits {
51.. => SizeUnit::Pebi,
41..=50 => SizeUnit::Tebi,
31..=40 => SizeUnit::Gibi,
21..=30 => SizeUnit::Mebi,
11..=20 => SizeUnit::Kibi,
_ => SizeUnit::Byte,
}
} else {
if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else {
SizeUnit::Byte
}
}
}
}
/// Returns the string repesentation
impl std::fmt::Display for SizeUnit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SizeUnit::Byte => write!(f, "B"),
// SI (base 10)
SizeUnit::KByte => write!(f, "KB"),
SizeUnit::MByte => write!(f, "MB"),
SizeUnit::GByte => write!(f, "GB"),
SizeUnit::TByte => write!(f, "TB"),
SizeUnit::PByte => write!(f, "PB"),
// IEC (base 2)
SizeUnit::Kibi => write!(f, "KiB"),
SizeUnit::Mebi => write!(f, "MiB"),
SizeUnit::Gibi => write!(f, "GiB"),
SizeUnit::Tebi => write!(f, "TiB"),
SizeUnit::Pebi => write!(f, "PiB"),
}
}
}
/// Strips a trailing SizeUnit inclusive trailing whitespace
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
fn strip_unit(v: &str) -> (&str, SizeUnit) {
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
let (v, binary) = match v.strip_suffix('i') {
Some(n) => (n, true),
None => (v, false),
};
let mut unit = SizeUnit::Byte;
(v.strip_suffix(|c: char| match c {
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
// binary (IEC recommended) variants
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
_ => false
}).unwrap_or(v).trim_end(), unit)
}
/// Byte size which can be displayed in a human friendly way
#[derive(Debug, Copy, Clone, UpdaterType)]
pub struct HumanByte {
/// The siginficant value, it does not includes any factor of the `unit`
size: f64,
/// The scale/unit of the value
unit: SizeUnit,
}
fn verify_human_byte(s: &str) -> Result<(), Error> {
match s.parse::<HumanByte>() {
Ok(_) => Ok(()),
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
}
}
impl ApiType for HumanByte {
const API_SCHEMA: Schema = StringSchema::new(
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
)
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
.min_length(1)
.max_length(64)
.schema();
}
impl HumanByte {
/// Create instance with size and unit (size must be positive)
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
if size < 0.0 {
bail!("byte size may not be negative");
}
Ok(HumanByte { size, unit })
}
/// Create a new instance with optimal binary unit computed
pub fn new_binary(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, true);
HumanByte { size: size / unit.factor(), unit }
}
/// Create a new instance with optimal decimal unit computed
pub fn new_decimal(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, false);
HumanByte { size: size / unit.factor(), unit }
}
/// Returns the size as u64 number of bytes
pub fn as_u64(&self) -> u64 {
self.as_f64() as u64
}
/// Returns the size as f64 number of bytes
pub fn as_f64(&self) -> f64 {
self.size * self.unit.factor()
}
/// Returns a copy with optimal binary unit computed
pub fn auto_scale_binary(self) -> Self {
HumanByte::new_binary(self.as_f64())
}
/// Returns a copy with optimal decimal unit computed
pub fn auto_scale_decimal(self) -> Self {
HumanByte::new_decimal(self.as_f64())
}
}
impl From<u64> for HumanByte {
fn from(v: u64) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl From<usize> for HumanByte {
fn from(v: usize) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl std::fmt::Display for HumanByte {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let precision = f.precision().unwrap_or(3) as f64;
let precision_factor = 1.0 * 10.0_f64.powf(precision);
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
let size = ((self.size * precision_factor).round()) / precision_factor;
write!(f, "{} {}", size, self.unit)
}
}
impl std::str::FromStr for HumanByte {
type Err = Error;
fn from_str(v: &str) -> Result<Self, Error> {
let (v, unit) = strip_unit(v);
HumanByte::with_unit(v.parse()?, unit)
}
}
proxmox::forward_deserialize_to_from_str!(HumanByte);
proxmox::forward_serialize_to_display!(HumanByte);
#[test]
fn test_human_byte_parser() -> Result<(), Error> {
assert!("-10".parse::<HumanByte>().is_err()); // negative size
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
let h: HumanByte = v.parse()?;
if h.size != size {
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
}
if h.unit != unit {
bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit);
}
let new = h.to_string();
if &new != as_str {
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
}
Ok(())
}
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
match do_test(v, size, unit, as_str) {
Ok(_) => true,
Err(err) => {
eprintln!("{}", err); // makes debugging easier
false
}
}
}
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
let h: HumanByte = "1.2345678".parse()?;
assert_eq!(&format!("{:.0}", h), "1 B");
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
assert_eq!(&format!("{:.1}", h), "1.2 B");
assert_eq!(&format!("{:.2}", h), "1.23 B");
assert_eq!(&format!("{:.3}", h), "1.235 B");
assert_eq!(&format!("{:.4}", h), "1.2346 B");
assert_eq!(&format!("{:.5}", h), "1.23457 B");
assert_eq!(&format!("{:.6}", h), "1.234568 B");
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B"));
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
Ok(())
}
#[test]
fn test_human_byte_auto_unit_decimal() {
fn convert(b: u64) -> String {
HumanByte::new_decimal(b as f64).to_string()
}
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1.022 KB");
assert_eq!(convert(9_000), "9 KB");
assert_eq!(convert(1_000), "1 KB");
assert_eq!(convert(1_000_000), "1 MB");
assert_eq!(convert(1_000_000_000), "1 GB");
assert_eq!(convert(1_000_000_000_000), "1 TB");
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
}
#[test]
fn test_human_byte_auto_unit_binary() {
fn convert(b: u64) -> String {
HumanByte::from(b).to_string()
}
assert_eq!(convert(0), "0 B");
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1022 B");
assert_eq!(convert(9_000), "8.789 KiB");
assert_eq!(convert(10_000_000), "9.537 MiB");
assert_eq!(convert(10_000_000_000), "9.313 GiB");
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
assert_eq!(convert(1 << 10), "1 KiB");
assert_eq!(convert((1 << 10) * 10), "10 KiB");
assert_eq!(convert(1 << 20), "1 MiB");
assert_eq!(convert(1 << 30), "1 GiB");
assert_eq!(convert(1 << 40), "1 TiB");
assert_eq!(convert(1 << 50), "1 PiB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
}

View File

@ -1,16 +1,10 @@
use anyhow::format_err;
use std::str::FromStr;
use regex::Regex;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
Userid, Authid, RateLimitConfig,
REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA,
};
const_regex!{
@ -29,31 +23,31 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
.default(false)
.default(true)
.schema();
#[api(
@ -248,10 +242,6 @@ pub struct VerificationJobStatus {
optional: true,
type: Userid,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize,Clone,Updater)]
@ -270,8 +260,6 @@ pub struct TapeBackupJobSetup {
/// Send job email notification to this user
#[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>,
#[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
}
#[api(
@ -329,57 +317,6 @@ pub struct TapeBackupJobStatus {
pub next_media_label: Option<String>,
}
#[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum GroupFilter {
/// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(String),
/// Full identifier of BackupGroup, including type
Group(String),
/// A regular expression matched against the full identifier of the BackupGroup
Regex(Regex),
}
impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(":") {
Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())),
Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
}.map_err(|err| format_err!("'{}' - {}", s, err))
}
}
// used for serializing below, caution!
impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
proxmox::forward_deserialize_to_from_str!(GroupFilter);
proxmox::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
}
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
#[api(
properties: {
id: {
@ -406,17 +343,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group fil
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize,Clone,Updater)]
@ -436,10 +366,6 @@ pub struct SyncJobConfig {
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
}
#[api(

View File

@ -7,7 +7,6 @@ use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
};
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
use proxmox_time::parse_daily_duration;
#[rustfmt::skip]
#[macro_export]
@ -39,9 +38,6 @@ pub use acl::*;
mod datastore;
pub use datastore::*;
mod human_byte;
pub use human_byte::HumanByte;
mod jobs;
pub use jobs::*;
@ -67,22 +63,16 @@ pub use user::*;
pub use proxmox_schema::upid::*;
mod crypto;
pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint};
pub use crypto::{CryptMode, Fingerprint};
pub mod file_restore;
mod openid;
pub use openid::*;
mod remote;
pub use remote::*;
mod tape;
pub use tape::*;
mod traffic_control;
pub use traffic_control::*;
mod zfs;
pub use zfs::*;
@ -162,9 +152,6 @@ pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema();
@ -437,32 +424,4 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
).schema(),
};
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
/// RRD consolidation mode
pub enum RRDMode {
/// Maximum
Max,
/// Average
Average,
}
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// RRD time frame
pub enum RRDTimeFrame {
/// Hour
Hour,
/// Day
Day,
/// Week
Week,
/// Month
Month,
/// Year
Year,
/// Decade (10 years)
Decade,
}
pub use proxmox_rrd_api_types::{RRDMode, RRDTimeFrameResolution};

View File

@ -1,121 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater,
};
use super::{
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
.format(&OPENID_ACR_LIST_FORMAT)
.schema();
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
"Use the value of this attribute/claim as unique user name. It \
is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \
attribute by himself!")
.max_length(64)
.min_length(1)
.format(&PROXMOX_SAFE_ID_FORMAT) .schema();
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
"scopes": {
schema: OPENID_SCOPE_LIST_SCHEMA,
optional: true,
},
"acr-values": {
schema: OPENID_ACR_LIST_SCHEMA,
optional: true,
},
prompt: {
description: "OpenID Prompt",
type: String,
format: &PROXMOX_SAFE_ID_FORMAT,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
schema: OPENID_USERNAME_CLAIM_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all="kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
#[serde(skip_serializing_if="Option::is_none")]
pub scopes: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub acr_values: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub prompt: Option<String>,
/// OpenID Client Key
#[serde(skip_serializing_if="Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if="Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if="Option::is_none")]
pub username_claim: Option<String>,
}

View File

@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater};
use proxmox_time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
use crate::{
PROXMOX_SAFE_ID_FORMAT,

View File

@ -1,122 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
use crate::{
HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT,
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new(
"Timeframe to specify when the rule is actice.")
.format(&DAILY_DURATION_FORMAT)
.schema();
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new(
"Rate limit (for Token bucket filter) in bytes/second.")
.minimum(100_000)
.schema();
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new(
"Size of the token bucket (for Token bucket filter) in bytes.")
.minimum(1000)
.schema();
#[api(
properties: {
"rate-in": {
type: HumanByte,
optional: true,
},
"burst-in": {
type: HumanByte,
optional: true,
},
"rate-out": {
type: HumanByte,
optional: true,
},
"burst-out": {
type: HumanByte,
optional: true,
},
},
)]
#[derive(Serialize,Deserialize,Default,Clone,Updater)]
#[serde(rename_all = "kebab-case")]
/// Rate Limit Configuration
pub struct RateLimitConfig {
#[serde(skip_serializing_if="Option::is_none")]
pub rate_in: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub burst_in: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub rate_out: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub burst_out: Option<HumanByte>,
}
impl RateLimitConfig {
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
Self {
rate_in: rate,
burst_in: burst,
rate_out: rate,
burst_out: burst,
}
}
}
#[api(
properties: {
name: {
schema: TRAFFIC_CONTROL_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
network: {
type: Array,
items: {
schema: CIDR_SCHEMA,
},
},
timeframe: {
type: Array,
items: {
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
},
optional: true,
},
},
)]
#[derive(Serialize,Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule
pub struct TrafficControlRule {
#[updater(skip)]
pub name: String,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// Rule applies to Source IPs within this networks
pub network: Vec<String>,
#[serde(flatten)]
pub limit: RateLimitConfig,
// fixme: expose this?
// /// Bandwidth is shared accross all connections
// #[serde(skip_serializing_if="Option::is_none")]
// pub shared: Option<bool>,
/// Enable the rule at specific times
#[serde(skip_serializing_if="Option::is_none")]
pub timeframe: Option<Vec<String>>,
}

View File

@ -1,6 +1,6 @@
[package]
name = "pbs-buildcfg"
version = "2.1.2"
version = "2.0.12"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "macros used for pbs related paths such as configdir and rundir"

View File

@ -22,9 +22,6 @@ pub const BACKUP_GROUP_NAME: &str = "backup";
#[macro_export]
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
#[macro_export]
macro_rules! PROXMOX_BACKUP_STATE_DIR_M { () => ("/var/lib/proxmox-backup") }
#[macro_export]
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
@ -39,9 +36,6 @@ macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
/// namespaced directory for in-memory (tmpfs) run state
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
/// namespaced directory for persistent state
pub const PROXMOX_BACKUP_STATE_DIR: &str = PROXMOX_BACKUP_STATE_DIR_M!();
/// namespaced directory for persistent logging
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();

View File

@ -28,13 +28,12 @@ tower-service = "0.3.0"
xdg = "2.2"
pathpatterns = "0.1.2"
proxmox = "0.15.3"
proxmox-async = "0.2"
proxmox = "0.14.0"
proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox-http = { version = "0.5.0", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-router = { version = "1", features = [ "cli" ] }
proxmox-schema = "1"
proxmox-time = "1"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
@ -42,4 +41,5 @@ pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-datastore = { path = "../pbs-datastore" }
pbs-runtime = { path = "../pbs-runtime" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -14,8 +14,8 @@ use tokio_stream::wrappers::ReceiverStream;
use proxmox::tools::digest_to_hex;
use pbs_api_types::HumanByte;
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::format::HumanByte;
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;

View File

@ -19,7 +19,7 @@ use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLin
use proxmox_schema::api;
use pxar::{EntryKind, Metadata};
use proxmox_async::runtime::block_in_place;
use pbs_runtime::block_in_place;
use pbs_datastore::catalog::{self, DirEntryAttribute};
use crate::pxar::Flags;
@ -79,13 +79,13 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
"restore-selected",
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
.arg_param(&["target"])
.completion_cb("target", cli::complete_file_name),
.completion_cb("target", pbs_tools::fs::complete_file_name),
)
.insert(
"restore",
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
.arg_param(&["target"])
.completion_cb("target", cli::complete_file_name),
.completion_cb("target", pbs_tools::fs::complete_file_name),
)
.insert(
"find",

View File

@ -20,11 +20,11 @@ use proxmox::{
};
use proxmox_router::HttpError;
use proxmox_http::client::{HttpsConnector, RateLimiter};
use proxmox_http::client::HttpsConnector;
use proxmox_http::uri::build_authority;
use proxmox_async::broadcast_future::BroadcastFuture;
use pbs_api_types::{Authid, Userid, RateLimitConfig};
use pbs_api_types::{Authid, Userid};
use pbs_tools::broadcast_future::BroadcastFuture;
use pbs_tools::json::json_object_to_query;
use pbs_tools::ticket;
use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
@ -51,7 +51,6 @@ pub struct HttpClientOptions {
ticket_cache: bool,
fingerprint_cache: bool,
verify_cert: bool,
limit: RateLimitConfig,
}
impl HttpClientOptions {
@ -110,11 +109,6 @@ impl HttpClientOptions {
self.verify_cert = verify_cert;
self
}
pub fn rate_limit(mut self, rate_limit: RateLimitConfig) -> Self {
self.limit = rate_limit;
self
}
}
impl Default for HttpClientOptions {
@ -127,7 +121,6 @@ impl Default for HttpClientOptions {
ticket_cache: false,
fingerprint_cache: false,
verify_cert: true,
limit: RateLimitConfig::default(), // unlimited
}
}
}
@ -160,7 +153,7 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Resu
map.remove(username.as_str());
}
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
Ok(())
}
@ -202,7 +195,7 @@ fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<()
result.push_str(fingerprint);
result.push('\n');
replace_file(path, result.as_bytes(), CreateOptions::new(), false)?;
replace_file(path, result.as_bytes(), CreateOptions::new())?;
Ok(())
}
@ -257,7 +250,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
}
}
replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
Ok(())
}
@ -350,21 +343,7 @@ impl HttpClient {
httpc.enforce_http(false); // we want https...
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
let mut https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
if let Some(rate_in) = options.limit.rate_in {
let burst_in = options.limit.burst_in.unwrap_or_else(|| rate_in).as_u64();
https.set_read_limiter(Some(Arc::new(Mutex::new(
RateLimiter::new(rate_in.as_u64(), burst_in)
))));
}
if let Some(rate_out) = options.limit.rate_out {
let burst_out = options.limit.burst_out.unwrap_or_else(|| rate_out).as_u64();
https.set_write_limiter(Some(Arc::new(Mutex::new(
RateLimiter::new(rate_out.as_u64(), burst_out)
))));
}
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
let client = Client::builder()
//.http2_initial_stream_window_size( (1 << 31) - 2)

View File

@ -25,7 +25,7 @@ use proxmox::c_result;
use proxmox::tools::fs::{create_path, CreateOptions};
use proxmox_io::{sparse_copy, sparse_copy_async};
use proxmox_async::zip::{ZipEncoder, ZipEntry};
use pbs_tools::zip::{ZipEncoder, ZipEntry};
use crate::pxar::dir_stack::PxarDirStack;
use crate::pxar::metadata;

View File

@ -12,10 +12,9 @@ use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use proxmox_async::blocking::TokioWriterAdapter;
use pbs_datastore::catalog::CatalogWriter;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
/// Stream implementation to encode and upload .pxar archives.
///
@ -112,7 +111,7 @@ impl Stream for PxarBackupStream {
}
}
match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
Ok(data) => Poll::Ready(Some(data)),
Err(_) => {
let error = self.error.lock().unwrap();

View File

@ -5,13 +5,12 @@ use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
use proxmox_async::runtime::block_on;
use pbs_tools::crypt_config::CryptConfig;
use pbs_api_types::CryptMode;
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::read_chunk::ReadChunk;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_runtime::block_on;
use super::BackupReader;

View File

@ -440,8 +440,8 @@ fn test_crypto_parameters_handling() -> Result<(), Error> {
mode: CryptMode::SignOnly,
};
replace_file(&keypath, &some_key, CreateOptions::default(), false)?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default(), false)?;
replace_file(&keypath, &some_key, CreateOptions::default())?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));

View File

@ -11,10 +11,10 @@ use serde_json::{json, Value};
use xdg::BaseDirectories;
use proxmox_schema::*;
use proxmox_router::cli::{complete_file_name, shellword_split};
use proxmox_router::cli::shellword_split;
use proxmox::tools::fs::file_get_json;
use pbs_api_types::{BACKUP_REPO_URL, Authid, RateLimitConfig, UserWithTokens};
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
use pbs_datastore::BackupDir;
use pbs_tools::json::json_object_to_query;
@ -135,30 +135,15 @@ pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<Ba
}
pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
let rate_limit = RateLimitConfig::default(); // unlimited
connect_do(repo.host(), repo.port(), repo.auth_id(), rate_limit)
connect_do(repo.host(), repo.port(), repo.auth_id())
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
}
pub fn connect_rate_limited(
repo: &BackupRepository,
rate_limit: RateLimitConfig,
) -> Result<HttpClient, Error> {
connect_do(repo.host(), repo.port(), repo.auth_id(), rate_limit)
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
}
fn connect_do(
server: &str,
port: u16,
auth_id: &Authid,
rate_limit: RateLimitConfig,
) -> Result<HttpClient, Error> {
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
let options = HttpClientOptions::new_interactive(password, fingerprint)
.rate_limit(rate_limit);
let options = HttpClientOptions::new_interactive(password, fingerprint);
HttpClient::new(server, port, auth_id, options)
}
@ -192,7 +177,7 @@ pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
}
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_async::runtime::main(async { complete_backup_group_do(param).await })
pbs_runtime::main(async { complete_backup_group_do(param).await })
}
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
@ -222,7 +207,7 @@ pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<St
}
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_async::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
@ -241,7 +226,7 @@ pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, St
}
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_async::runtime::main(async { complete_backup_snapshot_do(param).await })
pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
}
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
@ -273,7 +258,7 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
}
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_async::runtime::main(async { complete_server_file_name_do(param).await })
pbs_runtime::main(async { complete_server_file_name_do(param).await })
}
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
@ -364,7 +349,7 @@ pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<
}
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_async::runtime::main(async { complete_auth_id_do(param).await })
pbs_runtime::main(async { complete_auth_id_do(param).await })
}
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
@ -426,7 +411,7 @@ pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec
return result;
}
let files = complete_file_name(data[1], param);
let files = pbs_tools::fs::complete_file_name(data[1], param);
for file in files {
result.push(format!("{}:{}", data[0], file));

View File

@ -17,14 +17,12 @@ regex = "1.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
proxmox = "0.15.3"
proxmox = "0.14.0"
proxmox-lang = "1"
proxmox-router = { version = "1.1", default-features = false }
proxmox-router = { version = "1", default-features = false }
proxmox-schema = "1"
proxmox-section-config = "1"
proxmox-time = "1"
proxmox-shared-memory = "0.1.1"
proxmox-sys = "0.1.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }

View File

@ -12,7 +12,7 @@ use proxmox_time::epoch_i64;
use pbs_api_types::{Authid, Userid, User, ApiToken, ROLE_ADMIN};
use crate::acl::{AclTree, ROLE_NAMES};
use crate::ConfigVersionCache;
use crate::memcom::Memcom;
/// Cache User/Group/Token/Acl configuration data for fast permission tests
pub struct CachedUserInfo {
@ -38,8 +38,8 @@ impl CachedUserInfo {
pub fn new() -> Result<Arc<Self>, Error> {
let now = epoch_i64();
let version_cache = ConfigVersionCache::new()?;
let user_cache_generation = version_cache.user_cache_generation();
let memcom = Memcom::new()?;
let user_cache_generation = memcom.user_cache_generation();
{ // limit scope
let cache = CACHED_CONFIG.read().unwrap();

View File

@ -1,121 +0,0 @@
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::mem::MaybeUninit;
use anyhow::{bail, Error};
use once_cell::sync::OnceCell;
use nix::sys::stat::Mode;
use proxmox::tools::fs::{create_path, CreateOptions};
// openssl::sha::sha256(b"Proxmox Backup ConfigVersionCache v1.0")[0..8];
pub const PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0: [u8; 8] = [25, 198, 168, 230, 154, 132, 143, 131];
const FILE_PATH: &str = pbs_buildcfg::rundir!("/shmem/config-versions");
use proxmox_shared_memory::*;
#[derive(Debug)]
#[repr(C)]
struct ConfigVersionCacheData {
magic: [u8; 8],
// User (user.cfg) cache generation/version.
user_cache_generation: AtomicUsize,
// Traffic control (traffic-control.cfg) generation/version.
traffic_control_generation: AtomicUsize,
// Add further atomics here (and reduce padding size)
padding: [u8; 4096 - 3*8],
}
impl Init for ConfigVersionCacheData {
fn initialize(this: &mut MaybeUninit<Self>) {
unsafe {
let me = &mut *this.as_mut_ptr();
me.magic = PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0;
}
}
fn check_type_magic(this: &MaybeUninit<Self>) -> Result<(), Error> {
unsafe {
let me = &*this.as_ptr();
if me.magic != PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0 {
bail!("ConfigVersionCache: wrong magic number");
}
Ok(())
}
}
}
pub struct ConfigVersionCache {
shmem: SharedMemory<ConfigVersionCacheData>
}
static INSTANCE: OnceCell<Arc< ConfigVersionCache>> = OnceCell::new();
impl ConfigVersionCache {
/// Open the memory based communication channel singleton.
pub fn new() -> Result<Arc<Self>, Error> {
INSTANCE.get_or_try_init(Self::open).map(Arc::clone)
}
// Actual work of `new`:
fn open() -> Result<Arc<Self>, Error> {
let user = crate::backup_user()?;
let dir_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o770))
.owner(user.uid)
.group(user.gid);
let file_path = Path::new(FILE_PATH);
let dir_path = file_path.parent().unwrap();
create_path(
dir_path,
Some(dir_opts.clone()),
Some(dir_opts))?;
let file_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o660))
.owner(user.uid)
.group(user.gid);
let shmem: SharedMemory<ConfigVersionCacheData> =
SharedMemory::open(file_path, file_opts)?;
Ok(Arc::new(Self { shmem }))
}
/// Returns the user cache generation number.
pub fn user_cache_generation(&self) -> usize {
self.shmem.data()
.user_cache_generation.load(Ordering::Acquire)
}
/// Increase the user cache generation number.
pub fn increase_user_cache_generation(&self) {
self.shmem.data()
.user_cache_generation
.fetch_add(1, Ordering::AcqRel);
}
/// Returns the traffic control generation number.
pub fn traffic_control_generation(&self) -> usize {
self.shmem.data()
.traffic_control_generation.load(Ordering::Acquire)
}
/// Increase the traffic control generation number.
pub fn increase_traffic_control_generation(&self) {
self.shmem.data()
.traffic_control_generation
.fetch_add(1, Ordering::AcqRel);
}
}

View File

@ -2,17 +2,79 @@ use std::collections::HashMap;
use anyhow::{Error};
use lazy_static::lazy_static;
use serde::{Serialize, Deserialize};
use proxmox_schema::{ApiType, Schema};
use proxmox_schema::{api, ApiType, Updater, Schema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{OpenIdRealmConfig, REALM_ID_SCHEMA};
use pbs_api_types::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
#[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Use the value of this attribute/claim as unique user name. It is
/// up to the identity provider to guarantee the uniqueness. The
/// OpenID specification only guarantees that Subject ('sub') is unique. Also
/// make sure that the user is not allowed to change that attribute by
/// himself!
pub enum OpenIdUserAttribute {
/// Subject (OpenId 'sub' claim)
Subject,
/// Username (OpenId 'preferred_username' claim)
Username,
/// Email (OpenId 'email' claim)
Email,
}
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
type: OpenIdUserAttribute,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all="kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
/// OpenID Client Key
#[serde(skip_serializing_if="Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if="Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if="Option::is_none")]
pub username_claim: Option<OpenIdUserAttribute>,
}
fn init() -> SectionConfig {
let obj_schema = match OpenIdRealmConfig::API_SCHEMA {

View File

@ -67,7 +67,7 @@ impl KeyDerivationConfig {
/// Encryption Key Configuration
///
/// We use this struct to store secret keys. When used with a key
/// derivation function, the key data is encrypted (AES-GCM), and you
/// derivation function, the key data is encrypted (AES-CGM), and you
/// need the password to restore the plain key.
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct KeyConfig {
@ -100,7 +100,7 @@ impl From<&KeyConfig> for KeyInfo {
fingerprint: key_config
.fingerprint
.as_ref()
.map(|fp| fp.signature()),
.map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())),
hint: key_config.hint.clone(),
}
}
@ -281,7 +281,7 @@ impl KeyConfig {
try_block!({
if replace {
let mode = nix::sys::stat::Mode::S_IRUSR | nix::sys::stat::Mode::S_IWUSR;
replace_file(path, data.as_bytes(), CreateOptions::new().perm(mode), true)?;
replace_file(path, data.as_bytes(), CreateOptions::new().perm(mode))?;
} else {
use std::os::unix::fs::OpenOptionsExt;

View File

@ -12,12 +12,10 @@ pub mod sync;
pub mod tape_encryption_keys;
pub mod tape_job;
pub mod token_shadow;
pub mod traffic_control;
pub mod user;
pub mod verify;
mod config_version_cache;
pub use config_version_cache::ConfigVersionCache;
pub(crate) mod memcom;
use anyhow::{format_err, Error};
@ -82,7 +80,7 @@ pub fn replace_backup_config<P: AsRef<std::path::Path>>(
.owner(nix::unistd::ROOT)
.group(backup_user.gid);
proxmox::tools::fs::replace_file(path, data, options, true)?;
proxmox::tools::fs::replace_file(path, data, options)?;
Ok(())
}
@ -102,7 +100,7 @@ pub fn replace_secret_config<P: AsRef<std::path::Path>>(
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
proxmox::tools::fs::replace_file(path, data, options, true)?;
proxmox::tools::fs::replace_file(path, data, options)?;
Ok(())
}

81
pbs-config/src/memcom.rs Normal file
View File

@ -0,0 +1,81 @@
//! Memory based communication channel between proxy & daemon for things such as cache
//! invalidation.
use std::os::unix::io::AsRawFd;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use anyhow::Error;
use nix::fcntl::OFlag;
use nix::sys::mman::{MapFlags, ProtFlags};
use nix::sys::stat::Mode;
use once_cell::sync::OnceCell;
use proxmox::tools::fs::CreateOptions;
use proxmox::tools::mmap::Mmap;
/// In-memory communication channel.
pub struct Memcom {
mmap: Mmap<u8>,
}
#[repr(C)]
struct Head {
// User (user.cfg) cache generation/version.
user_cache_generation: AtomicUsize,
}
static INSTANCE: OnceCell<Arc<Memcom>> = OnceCell::new();
const MEMCOM_FILE_PATH: &str = pbs_buildcfg::rundir!("/proxmox-backup-memcom");
const EMPTY_PAGE: [u8; 4096] = [0u8; 4096];
impl Memcom {
/// Open the memory based communication channel singleton.
pub fn new() -> Result<Arc<Self>, Error> {
INSTANCE.get_or_try_init(Self::open).map(Arc::clone)
}
// Actual work of `new`:
fn open() -> Result<Arc<Self>, Error> {
let user = crate::backup_user()?;
let options = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o660))
.owner(user.uid)
.group(user.gid);
let file = proxmox::tools::fs::atomic_open_or_create_file(
MEMCOM_FILE_PATH,
OFlag::O_RDWR | OFlag::O_CLOEXEC,
&EMPTY_PAGE, options)?;
let mmap = unsafe {
Mmap::<u8>::map_fd(
file.as_raw_fd(),
0,
4096,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_SHARED | MapFlags::MAP_NORESERVE | MapFlags::MAP_POPULATE,
)?
};
Ok(Arc::new(Self { mmap }))
}
// Shortcut to get the mapped `Head` as a `Head`.
fn head(&self) -> &Head {
unsafe { &*(self.mmap.as_ptr() as *const u8 as *const Head) }
}
/// Returns the user cache generation number.
pub fn user_cache_generation(&self) -> usize {
self.head().user_cache_generation.load(Ordering::Acquire)
}
/// Increase the user cache generation number.
pub fn increase_user_cache_generation(&self) {
self.head()
.user_cache_generation
.fetch_add(1, Ordering::AcqRel);
}
}

View File

@ -448,7 +448,7 @@ pub fn save_config(config: &NetworkConfig) -> Result<(), Error> {
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
replace_file(NETWORK_INTERFACES_NEW_FILENAME, &raw, options, true)?;
replace_file(NETWORK_INTERFACES_NEW_FILENAME, &raw, options)?;
Ok(())
}

View File

@ -17,7 +17,7 @@ lazy_static! {
fn init() -> SectionConfig {
let obj_schema = match SyncJobConfig::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
Schema::Object(ref obj_schema) => obj_schema,
_ => unreachable!(),
};

View File

@ -190,5 +190,5 @@ pub fn complete_key_fingerprint(_arg: &str, _param: &HashMap<String, String>) ->
Err(_) => return Vec::new(),
};
data.keys().map(|fp| fp.signature()).collect()
data.keys().map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())).collect()
}

View File

@ -45,7 +45,7 @@ fn write_file(data: HashMap<Authid, String>) -> Result<(), Error> {
.group(backup_user.gid);
let json = serde_json::to_vec(&data)?;
proxmox::tools::fs::replace_file(CONF_FILE, &json, options, true)
proxmox::tools::fs::replace_file(CONF_FILE, &json, options)
}
@ -58,7 +58,7 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
let data = read_file()?;
match data.get(tokenid) {
Some(hashed_secret) => {
proxmox_sys::crypt::verify_crypt_pw(secret, &hashed_secret)
pbs_tools::crypt::verify_crypt_pw(secret, &hashed_secret)
},
None => bail!("invalid API token"),
}
@ -73,7 +73,7 @@ pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
let _guard = lock_config()?;
let mut data = read_file()?;
let hashed_secret = proxmox_sys::crypt::encrypt_pw(secret)?;
let hashed_secret = pbs_tools::crypt::encrypt_pw(secret)?;
data.insert(tokenid.clone(), hashed_secret);
write_file(data)?;

View File

@ -1,97 +0,0 @@
//! Traffic Control Settings (Network rate limits)
use std::collections::HashMap;
use anyhow::Error;
use lazy_static::lazy_static;
use proxmox_schema::{ApiType, Schema};
use pbs_api_types::{TrafficControlRule, TRAFFIC_CONTROL_ID_SCHEMA};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use crate::ConfigVersionCache;
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
/// Static [`SectionConfig`] to access parser/writer functions.
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let mut config = SectionConfig::new(&TRAFFIC_CONTROL_ID_SCHEMA);
let obj_schema = match TrafficControlRule::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("rule".to_string(), Some("name".to_string()), obj_schema);
config.register_plugin(plugin);
config
}
/// Configuration file name
pub const TRAFFIC_CONTROL_CFG_FILENAME: &str = "/etc/proxmox-backup/traffic-control.cfg";
/// Lock file name (used to prevent concurrent access)
pub const TRAFFIC_CONTROL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.traffic-control.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(TRAFFIC_CONTROL_CFG_LOCKFILE, None, true)
}
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(TRAFFIC_CONTROL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;
Ok((data, digest))
}
/// Save the configuration file
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, &config)?;
replace_backup_config(TRAFFIC_CONTROL_CFG_FILENAME, raw.as_bytes())?;
// increase traffic control version
// We use this in TrafficControlCache
let version_cache = ConfigVersionCache::new()?;
version_cache.increase_traffic_control_generation();
Ok(())
}
// shell completion helper
pub fn complete_traffic_control_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test1() -> Result<(), Error> {
let content = "rule: rule1
comment localnet at working hours
network 192.168.2.0/24
network 192.168.3.0/24
rate-in 500000
timeframe mon..wed 8:00-16:30
timeframe fri 9:00-12:00
";
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;
eprintln!("GOT {:?}", data);
Ok(())
}
}

View File

@ -11,7 +11,7 @@ use pbs_api_types::{
Authid, Userid, ApiToken, User,
};
use crate::ConfigVersionCache;
use crate::memcom::Memcom;
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
@ -120,10 +120,10 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(USER_CFG_FILENAME, &config)?;
replace_backup_config(USER_CFG_FILENAME, raw.as_bytes())?;
// increase user version
// increase user cache generation
// We use this in CachedUserInfo
let version_cache = ConfigVersionCache::new()?;
version_cache.increase_user_cache_generation();
let memcom = Memcom::new()?;
memcom.increase_user_cache_generation();
Ok(())
}

View File

@ -7,7 +7,7 @@ description = "low level pbs data storage access"
[dependencies]
anyhow = "1.0"
base64 = "0.13"
base64 = "0.12"
crc32fast = "1"
endian_trait = { version = "0.6", features = [ "arrays" ] }
futures = "0.3"
@ -25,14 +25,13 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2"
pxar = "0.10.1"
proxmox = "0.15.3"
proxmox = "0.14.0"
proxmox-borrow = "1"
proxmox-io = "1"
proxmox-lang = "1"
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-uuid = "1"
proxmox-sys = "0.1.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -1,6 +1,5 @@
use std::os::unix::io::RawFd;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use anyhow::{bail, format_err, Error};
@ -11,7 +10,6 @@ use pbs_api_types::{
GROUP_PATH_REGEX,
SNAPSHOT_PATH_REGEX,
BACKUP_FILE_REGEX,
GroupFilter,
};
use super::manifest::MANIFEST_BLOB_NAME;
@ -94,9 +92,7 @@ impl BackupGroup {
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
let files = list_backup_files(l2_fd, backup_time)?;
let protected = backup_dir.is_protected(base_path.to_owned());
list.push(BackupInfo { backup_dir, files, protected });
list.push(BackupInfo { backup_dir, files });
Ok(())
},
@ -157,17 +153,6 @@ impl BackupGroup {
Ok(last)
}
pub fn matches(&self, filter: &GroupFilter) -> bool {
match filter {
GroupFilter::Group(backup_group) => match BackupGroup::from_str(&backup_group) {
Ok(group) => &group == self,
Err(_) => false, // shouldn't happen if value is schema-checked
},
GroupFilter::BackupType(backup_type) => self.backup_type() == backup_type,
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
}
}
}
impl std::fmt::Display for BackupGroup {
@ -268,17 +253,6 @@ impl BackupDir {
relative_path
}
pub fn protected_file(&self, mut path: PathBuf) -> PathBuf {
path.push(self.relative_path());
path.push(".protected");
path
}
pub fn is_protected(&self, base_path: PathBuf) -> bool {
let path = self.protected_file(base_path);
path.exists()
}
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
// fixme: can this fail? (avoid unwrap)
Ok(proxmox_time::epoch_to_rfc3339_utc(backup_time)?)
@ -319,8 +293,6 @@ pub struct BackupInfo {
pub backup_dir: BackupDir,
/// List of data files
pub files: Vec<String>,
/// Protection Status
pub protected: bool,
}
impl BackupInfo {
@ -329,9 +301,8 @@ impl BackupInfo {
path.push(backup_dir.relative_path());
let files = list_backup_files(libc::AT_FDCWD, &path)?;
let protected = backup_dir.is_protected(base_path.to_owned());
Ok(BackupInfo { backup_dir, files, protected })
Ok(BackupInfo { backup_dir, files })
}
/// Finds the latest backup inside a backup group

View File

@ -6,11 +6,10 @@ use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
use proxmox_sys::process_locker::{ProcessLocker, ProcessLockSharedGuard, ProcessLockExclusiveGuard};
use proxmox_sys::worker_task_context::WorkerTaskContext;
use proxmox_sys::task_log;
use pbs_api_types::GarbageCollectionStatus;
use pbs_api_types::GarbageCollectionStatus;
use pbs_tools::process_locker::{self, ProcessLocker};
use pbs_tools::{task_log, task::WorkerTaskContext};
use crate::DataBlob;
@ -96,7 +95,7 @@ impl ChunkStore {
// create lock file with correct owner/group
let lockfile_path = Self::lockfile_path(&base);
proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?;
// create 64*1024 subdirs
let mut last_percentage = 0;
@ -460,11 +459,11 @@ impl ChunkStore {
self.base.clone()
}
pub fn try_shared_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
pub fn try_shared_lock(&self) -> Result<process_locker::ProcessLockSharedGuard, Error> {
ProcessLocker::try_shared_lock(self.locker.clone())
}
pub fn try_exclusive_lock(&self) -> Result<ProcessLockExclusiveGuard, Error> {
pub fn try_exclusive_lock(&self) -> Result<process_locker::ProcessLockExclusiveGuard, Error> {
ProcessLocker::try_exclusive_lock(self.locker.clone())
}
}

View File

@ -10,12 +10,12 @@ use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
use proxmox_sys::process_locker::ProcessLockSharedGuard;
use proxmox_sys::worker_task_context::WorkerTaskContext;
use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus, HumanByte};
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
use pbs_tools::format::HumanByte;
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
use pbs_tools::process_locker::ProcessLockSharedGuard;
use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::DataBlob;
@ -266,9 +266,8 @@ impl DataStore {
full_path
}
/// Remove a complete backup group including all snapshots, returns true
/// if all snapshots were removed, and false if some were protected
pub fn remove_backup_group(&self, backup_group: &BackupGroup) -> Result<bool, Error> {
/// Remove a complete backup group including all snapshots
pub fn remove_backup_group(&self, backup_group: &BackupGroup) -> Result<(), Error> {
let full_path = self.group_path(backup_group);
@ -276,30 +275,22 @@ impl DataStore {
log::info!("removing backup group {:?}", full_path);
let mut removed_all = true;
// remove all individual backup dirs first to ensure nothing is using them
for snap in backup_group.list_backups(&self.base_path())? {
if snap.backup_dir.is_protected(self.base_path()) {
removed_all = false;
continue;
}
self.remove_backup_dir(&snap.backup_dir, false)?;
}
if removed_all {
// no snapshots left, we can now safely remove the empty folder
std::fs::remove_dir_all(&full_path)
.map_err(|err| {
format_err!(
"removing backup group directory {:?} failed - {}",
full_path,
err,
)
})?;
}
// no snapshots left, we can now safely remove the empty folder
std::fs::remove_dir_all(&full_path)
.map_err(|err| {
format_err!(
"removing backup group directory {:?} failed - {}",
full_path,
err,
)
})?;
Ok(removed_all)
Ok(())
}
/// Remove a backup directory including all content
@ -313,10 +304,6 @@ impl DataStore {
_manifest_guard = self.lock_manifest(backup_dir)?;
}
if backup_dir.is_protected(self.base_path()) {
bail!("cannot remove protected snapshot");
}
log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path)
.map_err(|err| {
@ -718,7 +705,7 @@ impl DataStore {
.group(backup_user.gid);
// ignore errors
let _ = replace_file(path, serialized.as_bytes(), options, false);
let _ = replace_file(path, serialized.as_bytes(), options);
}
*self.last_gc_status.lock().unwrap() = gc_status;
@ -853,31 +840,7 @@ impl DataStore {
path.push(MANIFEST_BLOB_NAME);
// atomic replace invalidates flock - no other writes past this point!
replace_file(&path, raw_data, CreateOptions::new(), false)?;
Ok(())
}
/// Updates the protection status of the specified snapshot.
pub fn update_protection(
&self,
backup_dir: &BackupDir,
protection: bool
) -> Result<(), Error> {
let full_path = self.snapshot_path(backup_dir);
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
let protected_path = backup_dir.protected_file(self.base_path());
if protection {
std::fs::File::create(protected_path)
.map_err(|err| format_err!("could not create protection file: {}", err))?;
} else if let Err(err) = std::fs::remove_file(protected_path) {
// ignore error for non-existing file
if err.kind() != std::io::ErrorKind::NotFound {
bail!("could not remove protection file: {}", err);
}
}
replace_file(&path, raw_data, CreateOptions::new())?;
Ok(())
}

View File

@ -12,10 +12,10 @@ use anyhow::{bail, format_err, Error};
use proxmox::tools::mmap::Mmap;
use proxmox_io::ReadExt;
use proxmox_uuid::Uuid;
use proxmox_sys::process_locker::ProcessLockSharedGuard;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_tools::lru_cache::LruCache;
use pbs_tools::process_locker::ProcessLockSharedGuard;
use crate::Chunker;
use crate::chunk_stat::ChunkStat;

View File

@ -7,7 +7,8 @@ use std::io::{Seek, SeekFrom};
use anyhow::{bail, format_err, Error};
use proxmox_sys::process_locker::ProcessLockSharedGuard;
use pbs_tools::process_locker::ProcessLockSharedGuard;
use proxmox_io::ReadExt;
use proxmox_uuid::Uuid;

View File

@ -7,29 +7,7 @@ use pbs_api_types::PruneOptions;
use super::BackupInfo;
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum PruneMark { Protected, Keep, KeepPartial, Remove }
impl PruneMark {
pub fn keep(self) -> bool {
self != PruneMark::Remove
}
pub fn protected(self) -> bool {
self == PruneMark::Protected
}
}
impl std::fmt::Display for PruneMark {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
PruneMark::Protected => "protected",
PruneMark::Keep => "keep",
PruneMark::KeepPartial => "keep-partial",
PruneMark::Remove => "remove",
})
}
}
enum PruneMark { Keep, KeepPartial, Remove }
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
mark: &mut HashMap<PathBuf, PruneMark>,
@ -52,10 +30,6 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
for info in list {
let backup_id = info.backup_dir.relative_path();
if mark.get(&backup_id).is_some() { continue; }
if info.protected {
mark.insert(backup_id, PruneMark::Protected);
continue;
}
let sel_id: String = select_id(&info)?;
if already_included.contains(&sel_id) { continue; }
@ -147,7 +121,7 @@ pub fn cli_options_string(options: &PruneOptions) -> String {
pub fn compute_prune_info(
mut list: Vec<BackupInfo>,
options: &PruneOptions,
) -> Result<Vec<(BackupInfo, PruneMark)>, Error> {
) -> Result<Vec<(BackupInfo, bool)>, Error> {
let mut mark = HashMap::new();
@ -195,16 +169,15 @@ pub fn compute_prune_info(
})?;
}
let prune_info: Vec<(BackupInfo, PruneMark)> = list.into_iter()
let prune_info: Vec<(BackupInfo, bool)> = list.into_iter()
.map(|info| {
let backup_id = info.backup_dir.relative_path();
let mark = if info.protected {
PruneMark::Protected
} else {
mark.get(&backup_id).copied().unwrap_or(PruneMark::Remove)
let keep = match mark.get(&backup_id) {
Some(PruneMark::Keep) => true,
Some(PruneMark::KeepPartial) => true,
_ => false,
};
(info, mark)
(info, keep)
})
.collect();

11
pbs-runtime/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "pbs-runtime"
version = "0.1.0"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "tokio runtime related helpers required for binaries"
[dependencies]
lazy_static = "1.4"
pin-utils = "0.1.0"
tokio = { version = "1.6", features = [ "rt", "rt-multi-thread" ] }

203
pbs-runtime/src/lib.rs Normal file
View File

@ -0,0 +1,203 @@
//! Helpers for quirks of the current tokio runtime.
use std::cell::RefCell;
use std::future::Future;
use std::sync::{Arc, Weak, Mutex};
use std::task::{Context, Poll, RawWaker, Waker};
use std::thread::{self, Thread};
use lazy_static::lazy_static;
use pin_utils::pin_mut;
use tokio::runtime::{self, Runtime};
thread_local! {
static BLOCKING: RefCell<bool> = RefCell::new(false);
}
fn is_in_tokio() -> bool {
tokio::runtime::Handle::try_current()
.is_ok()
}
fn is_blocking() -> bool {
BLOCKING.with(|v| *v.borrow())
}
struct BlockingGuard(bool);
impl BlockingGuard {
fn set() -> Self {
Self(BLOCKING.with(|v| {
let old = *v.borrow();
*v.borrow_mut() = true;
old
}))
}
}
impl Drop for BlockingGuard {
fn drop(&mut self) {
BLOCKING.with(|v| {
*v.borrow_mut() = self.0;
});
}
}
lazy_static! {
// avoid openssl bug: https://github.com/openssl/openssl/issues/6214
// by dropping the runtime as early as possible
static ref RUNTIME: Mutex<Weak<Runtime>> = Mutex::new(Weak::new());
}
#[link(name = "crypto")]
extern "C" {
fn OPENSSL_thread_stop();
}
/// Get or create the current main tokio runtime.
///
/// This makes sure that tokio's worker threads are marked for us so that we know whether we
/// can/need to use `block_in_place` in our `block_on` helper.
pub fn get_runtime_with_builder<F: Fn() -> runtime::Builder>(get_builder: F) -> Arc<Runtime> {
let mut guard = RUNTIME.lock().unwrap();
if let Some(rt) = guard.upgrade() { return rt; }
let mut builder = get_builder();
builder.on_thread_stop(|| {
// avoid openssl bug: https://github.com/openssl/openssl/issues/6214
// call OPENSSL_thread_stop to avoid race with openssl cleanup handlers
unsafe { OPENSSL_thread_stop(); }
});
let runtime = builder.build().expect("failed to spawn tokio runtime");
let rt = Arc::new(runtime);
*guard = Arc::downgrade(&rt);
rt
}
/// Get or create the current main tokio runtime.
///
/// This calls get_runtime_with_builder() using the tokio default threaded scheduler
pub fn get_runtime() -> Arc<Runtime> {
get_runtime_with_builder(|| {
let mut builder = runtime::Builder::new_multi_thread();
builder.enable_all();
builder
})
}
/// Block on a synchronous piece of code.
pub fn block_in_place<R>(fut: impl FnOnce() -> R) -> R {
// don't double-exit the context (tokio doesn't like that)
// also, if we're not actually in a tokio-worker we must not use block_in_place() either
if is_blocking() || !is_in_tokio() {
fut()
} else {
// we are in an actual tokio worker thread, block it:
tokio::task::block_in_place(move || {
let _guard = BlockingGuard::set();
fut()
})
}
}
/// Block on a future in this thread.
pub fn block_on<F: Future>(fut: F) -> F::Output {
// don't double-exit the context (tokio doesn't like that)
if is_blocking() {
block_on_local_future(fut)
} else if is_in_tokio() {
// inside a tokio worker we need to tell tokio that we're about to really block:
tokio::task::block_in_place(move || {
let _guard = BlockingGuard::set();
block_on_local_future(fut)
})
} else {
// not a worker thread, not associated with a runtime, make sure we have a runtime (spawn
// it on demand if necessary), then enter it
let _guard = BlockingGuard::set();
let _enter_guard = get_runtime().enter();
get_runtime().block_on(fut)
}
}
/*
fn block_on_impl<F>(mut fut: F) -> F::Output
where
F: Future + Send,
F::Output: Send + 'static,
{
let (tx, rx) = tokio::sync::oneshot::channel();
let fut_ptr = &mut fut as *mut F as usize; // hack to not require F to be 'static
tokio::spawn(async move {
let fut: F = unsafe { std::ptr::read(fut_ptr as *mut F) };
tx
.send(fut.await)
.map_err(drop)
.expect("failed to send block_on result to channel")
});
futures::executor::block_on(async move {
rx.await.expect("failed to receive block_on result from channel")
})
std::mem::forget(fut);
}
*/
/// This used to be our tokio main entry point. Now this just calls out to `block_on` for
/// compatibility, which will perform all the necessary tasks on-demand anyway.
pub fn main<F: Future>(fut: F) -> F::Output {
block_on(fut)
}
fn block_on_local_future<F: Future>(fut: F) -> F::Output {
pin_mut!(fut);
let waker = Arc::new(thread::current());
let waker = thread_waker_clone(Arc::into_raw(waker) as *const ());
let waker = unsafe { Waker::from_raw(waker) };
let mut context = Context::from_waker(&waker);
loop {
match fut.as_mut().poll(&mut context) {
Poll::Ready(out) => return out,
Poll::Pending => thread::park(),
}
}
}
const THREAD_WAKER_VTABLE: std::task::RawWakerVTable = std::task::RawWakerVTable::new(
thread_waker_clone,
thread_waker_wake,
thread_waker_wake_by_ref,
thread_waker_drop,
);
fn thread_waker_clone(this: *const ()) -> RawWaker {
let this = unsafe { Arc::from_raw(this as *const Thread) };
let cloned = Arc::clone(&this);
let _ = Arc::into_raw(this);
RawWaker::new(Arc::into_raw(cloned) as *const (), &THREAD_WAKER_VTABLE)
}
fn thread_waker_wake(this: *const ()) {
let this = unsafe { Arc::from_raw(this as *const Thread) };
this.unpark();
}
fn thread_waker_wake_by_ref(this: *const ()) {
let this = unsafe { Arc::from_raw(this as *const Thread) };
this.unpark();
let _ = Arc::into_raw(this);
}
fn thread_waker_drop(this: *const ()) {
let this = unsafe { Arc::from_raw(this as *const Thread) };
drop(this);
}

View File

@ -16,9 +16,9 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bitflags = "1.2.1"
regex = "1.2"
udev = "0.4"
udev = ">= 0.3, <0.5"
proxmox = "0.15.3"
proxmox = "0.14.0"
proxmox-io = "1"
proxmox-lang = "1"
# api-macro is only used by the binaries, so maybe we should split them out
@ -27,7 +27,7 @@ proxmox-time = "1"
proxmox-uuid = "1"
# router::cli is only used by binaries, so maybe we should split them out
proxmox-router = "1.1"
proxmox-router = "1"
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -10,7 +10,7 @@ use crate::sgutils2::{SgRaw, alloc_page_aligned_buffer};
/// Test if drive supports hardware encryption
///
/// We search for AES_GCM algorithm with 256bits key.
/// We search for AES_CGM algorithm with 256bits key.
pub fn has_encryption<F: AsRawFd>(
file: &mut F,
) -> bool {
@ -213,14 +213,14 @@ struct SspDataEncryptionAlgorithmDescriptor {
algorithm_code: u32,
}
// Returns the algorythm_index for AES-GCM
// Returns the algorythm_index for AES-CGM
fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
proxmox_lang::try_block!({
let mut reader = &data[..];
let _page: SspDataEncryptionCapabilityPage = unsafe { reader.read_be_value()? };
let mut aes_gcm_index = None;
let mut aes_cgm_index = None;
loop {
if reader.is_empty() { break; };
@ -236,14 +236,14 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
continue; // can't decrypt in hardware
}
if desc.algorithm_code == 0x00010014 && desc.key_size == 32 {
aes_gcm_index = Some(desc.algorythm_index);
aes_cgm_index = Some(desc.algorythm_index);
break;
}
}
match aes_gcm_index {
match aes_cgm_index {
Some(index) => Ok(index),
None => bail!("drive does not support AES-GCM encryption"),
None => bail!("drive does not support AES-CGM encryption"),
}
}).map_err(|err: Error| format_err!("decode data encryption caps page failed - {}", err))

View File

@ -8,7 +8,7 @@ description = "common tools used throughout pbs"
# This must not depend on any subcrates more closely related to pbs itself.
[dependencies]
anyhow = "1.0"
base64 = "0.13"
base64 = "0.12"
bytes = "1.0"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
@ -32,15 +32,14 @@ url = "2.1"
walkdir = "2"
zstd = { version = "0.6", features = [ "bindgen" ] }
proxmox = { version = "0.15.3", default-features = false, features = [ "tokio" ] }
proxmox-async = "0.2"
proxmox = { version = "0.14.0", default-features = false, features = [ "tokio" ] }
proxmox-borrow = "1"
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = { version = "1" }
proxmox-time = { version = "1" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-api-types = { path = "../pbs-api-types" }
pbs-runtime = { path = "../pbs-runtime" }
[dev-dependencies]
tokio = { version = "1.6", features = [ "macros" ] }

View File

@ -7,7 +7,7 @@ use std::collections::HashMap;
use std::future::Future;
use std::sync::{Arc, Mutex};
use proxmox_async::broadcast_future::BroadcastFuture;
use crate::broadcast_future::BroadcastFuture;
use crate::lru_cache::LruCache;
/// Interface for asynchronously getting values on cache misses.

99
pbs-tools/src/blocking.rs Normal file
View File

@ -0,0 +1,99 @@
//! Async wrappers for blocking I/O (adding `block_in_place` around channels/readers)
use std::io::{self, Read};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::sync::mpsc::Receiver;
use futures::stream::Stream;
use pbs_runtime::block_in_place;
/// Wrapper struct to convert a Reader into a Stream
pub struct WrappedReaderStream<R: Read + Unpin> {
reader: R,
buffer: Vec<u8>,
}
impl <R: Read + Unpin> WrappedReaderStream<R> {
pub fn new(reader: R) -> Self {
let mut buffer = Vec::with_capacity(64*1024);
unsafe { buffer.set_len(buffer.capacity()); }
Self { reader, buffer }
}
}
impl<R: Read + Unpin> Stream for WrappedReaderStream<R> {
type Item = Result<Vec<u8>, io::Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match block_in_place(|| this.reader.read(&mut this.buffer)) {
Ok(n) => {
if n == 0 {
// EOF
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(this.buffer[..n].to_vec())))
}
}
Err(err) => Poll::Ready(Some(Err(err))),
}
}
}
/// Wrapper struct to convert a channel Receiver into a Stream
pub struct StdChannelStream<T>(pub Receiver<T>);
impl<T> Stream for StdChannelStream<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
match block_in_place(|| self.0.recv()) {
Ok(data) => Poll::Ready(Some(data)),
Err(_) => Poll::Ready(None),// channel closed
}
}
}
#[cfg(test)]
mod test {
use std::io;
use anyhow::Error;
use futures::stream::TryStreamExt;
#[test]
fn test_wrapped_stream_reader() -> Result<(), Error> {
pbs_runtime::main(async {
run_wrapped_stream_reader_test().await
})
}
struct DummyReader(usize);
impl io::Read for DummyReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0 += 1;
if self.0 >= 10 {
return Ok(0);
}
unsafe {
std::ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len());
}
Ok(buf.len())
}
}
async fn run_wrapped_stream_reader_test() -> Result<(), Error> {
let mut reader = super::WrappedReaderStream::new(DummyReader(0));
while let Some(_data) = reader.try_next().await? {
// just waiting
}
Ok(())
}
}

View File

@ -0,0 +1,180 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use anyhow::{format_err, Error};
use futures::future::{FutureExt, TryFutureExt};
use tokio::sync::oneshot;
/// Broadcast results to registered listeners using asnyc oneshot channels
#[derive(Default)]
pub struct BroadcastData<T> {
result: Option<Result<T, String>>,
listeners: Vec<oneshot::Sender<Result<T, Error>>>,
}
impl <T: Clone> BroadcastData<T> {
pub fn new() -> Self {
Self {
result: None,
listeners: vec![],
}
}
pub fn notify_listeners(&mut self, result: Result<T, String>) {
self.result = Some(result.clone());
loop {
match self.listeners.pop() {
None => { break; },
Some(ch) => {
match &result {
Ok(result) => { let _ = ch.send(Ok(result.clone())); },
Err(err) => { let _ = ch.send(Err(format_err!("{}", err))); },
}
},
}
}
}
pub fn listen(&mut self) -> impl Future<Output = Result<T, Error>> {
use futures::future::{ok, Either};
match &self.result {
None => {},
Some(Ok(result)) => return Either::Left(ok(result.clone())),
Some(Err(err)) => return Either::Left(futures::future::err(format_err!("{}", err))),
}
let (tx, rx) = oneshot::channel::<Result<T, Error>>();
self.listeners.push(tx);
Either::Right(rx
.map(|res| match res {
Ok(Ok(t)) => Ok(t),
Ok(Err(e)) => Err(e),
Err(e) => Err(Error::from(e)),
})
)
}
}
type SourceFuture<T> = Pin<Box<dyn Future<Output = Result<T, Error>> + Send>>;
struct BroadCastFutureBinding<T> {
broadcast: BroadcastData<T>,
future: Option<SourceFuture<T>>,
}
/// Broadcast future results to registered listeners
pub struct BroadcastFuture<T> {
inner: Arc<Mutex<BroadCastFutureBinding<T>>>,
}
impl<T: Clone + Send + 'static> BroadcastFuture<T> {
/// Create instance for specified source future.
///
/// The result of the future is sent to all registered listeners.
pub fn new(source: Box<dyn Future<Output = Result<T, Error>> + Send>) -> Self {
let inner = BroadCastFutureBinding {
broadcast: BroadcastData::new(),
future: Some(Pin::from(source)),
};
Self { inner: Arc::new(Mutex::new(inner)) }
}
/// Creates a new instance with a oneshot channel as trigger
pub fn new_oneshot() -> (Self, oneshot::Sender<Result<T, Error>>) {
let (tx, rx) = oneshot::channel::<Result<T, Error>>();
let rx = rx
.map_err(Error::from)
.and_then(futures::future::ready);
(Self::new(Box::new(rx)), tx)
}
fn notify_listeners(
inner: Arc<Mutex<BroadCastFutureBinding<T>>>,
result: Result<T, String>,
) {
let mut data = inner.lock().unwrap();
data.broadcast.notify_listeners(result);
}
fn spawn(inner: Arc<Mutex<BroadCastFutureBinding<T>>>) -> impl Future<Output = Result<T, Error>> {
let mut data = inner.lock().unwrap();
if let Some(source) = data.future.take() {
let inner1 = inner.clone();
let task = source.map(move |value| {
match value {
Ok(value) => Self::notify_listeners(inner1, Ok(value)),
Err(err) => Self::notify_listeners(inner1, Err(err.to_string())),
}
});
tokio::spawn(task);
}
data.broadcast.listen()
}
/// Register a listener
pub fn listen(&self) -> impl Future<Output = Result<T, Error>> {
let inner2 = self.inner.clone();
async move { Self::spawn(inner2).await }
}
}
#[test]
fn test_broadcast_future() {
use std::sync::atomic::{AtomicUsize, Ordering};
static CHECKSUM: AtomicUsize = AtomicUsize::new(0);
let (sender, trigger) = BroadcastFuture::new_oneshot();
let receiver1 = sender.listen()
.map_ok(|res| {
CHECKSUM.fetch_add(res, Ordering::SeqCst);
})
.map_err(|err| { panic!("got error {}", err); })
.map(|_| ());
let receiver2 = sender.listen()
.map_ok(|res| {
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
})
.map_err(|err| { panic!("got error {}", err); })
.map(|_| ());
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
let r1 = tokio::spawn(receiver1);
let r2 = tokio::spawn(receiver2);
trigger.send(Ok(1)).unwrap();
let _ = r1.await;
let _ = r2.await;
});
let result = CHECKSUM.load(Ordering::SeqCst);
assert_eq!(result, 3);
// the result stays available until the BroadcastFuture is dropped
rt.block_on(sender.listen()
.map_ok(|res| {
CHECKSUM.fetch_add(res*4, Ordering::SeqCst);
})
.map_err(|err| { panic!("got error {}", err); })
.map(|_| ()));
let result = CHECKSUM.load(Ordering::SeqCst);
assert_eq!(result, 7);
}

View File

@ -0,0 +1,194 @@
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::Error;
use bytes::Bytes;
use flate2::{Compress, Compression, FlushCompress};
use futures::ready;
use futures::stream::Stream;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use proxmox::io_format_err;
use proxmox_io::ByteBuffer;
const BUFFER_SIZE: usize = 8192;
pub enum Level {
Fastest,
Best,
Default,
Precise(u32),
}
#[derive(Eq, PartialEq)]
enum EncoderState {
Reading,
Writing,
Flushing,
Finished,
}
pub struct DeflateEncoder<T> {
inner: T,
compressor: Compress,
buffer: ByteBuffer,
input_buffer: Bytes,
state: EncoderState,
}
impl<T> DeflateEncoder<T> {
pub fn new(inner: T) -> Self {
Self::with_quality(inner, Level::Default)
}
pub fn with_quality(inner: T, level: Level) -> Self {
let level = match level {
Level::Fastest => Compression::fast(),
Level::Best => Compression::best(),
Level::Default => Compression::new(3),
Level::Precise(val) => Compression::new(val),
};
Self {
inner,
compressor: Compress::new(level, false),
buffer: ByteBuffer::with_capacity(BUFFER_SIZE),
input_buffer: Bytes::new(),
state: EncoderState::Reading,
}
}
pub fn total_in(&self) -> u64 {
self.compressor.total_in()
}
pub fn total_out(&self) -> u64 {
self.compressor.total_out()
}
pub fn into_inner(self) -> T {
self.inner
}
fn encode(
&mut self,
inbuf: &[u8],
flush: FlushCompress,
) -> Result<(usize, flate2::Status), io::Error> {
let old_in = self.compressor.total_in();
let old_out = self.compressor.total_out();
let res = self
.compressor
.compress(&inbuf[..], self.buffer.get_free_mut_slice(), flush)?;
let new_in = (self.compressor.total_in() - old_in) as usize;
let new_out = (self.compressor.total_out() - old_out) as usize;
self.buffer.add_size(new_out);
Ok((new_in, res))
}
}
impl DeflateEncoder<Vec<u8>> {
// assume small files
pub async fn compress_vec<R>(&mut self, reader: &mut R, size_hint: usize) -> Result<(), Error>
where
R: AsyncRead + Unpin,
{
let mut buffer = Vec::with_capacity(size_hint);
reader.read_to_end(&mut buffer).await?;
self.inner.reserve(size_hint); // should be enough since we want smalller files
self.compressor.compress_vec(&buffer[..], &mut self.inner, FlushCompress::Finish)?;
Ok(())
}
}
impl<T: AsyncWrite + Unpin> DeflateEncoder<T> {
pub async fn compress<R>(&mut self, reader: &mut R) -> Result<(), Error>
where
R: AsyncRead + Unpin,
{
let mut buffer = ByteBuffer::with_capacity(BUFFER_SIZE);
let mut eof = false;
loop {
if !eof && !buffer.is_full() {
let read = buffer.read_from_async(reader).await?;
if read == 0 {
eof = true;
}
}
let (read, _res) = self.encode(&buffer[..], FlushCompress::None)?;
buffer.consume(read);
self.inner.write_all(&self.buffer[..]).await?;
self.buffer.clear();
if buffer.is_empty() && eof {
break;
}
}
loop {
let (_read, res) = self.encode(&[][..], FlushCompress::Finish)?;
self.inner.write_all(&self.buffer[..]).await?;
self.buffer.clear();
if res == flate2::Status::StreamEnd {
break;
}
}
Ok(())
}
}
impl<T, O> Stream for DeflateEncoder<T>
where
T: Stream<Item = Result<O, io::Error>> + Unpin,
O: Into<Bytes>
{
type Item = Result<Bytes, io::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
match this.state {
EncoderState::Reading => {
if let Some(res) = ready!(Pin::new(&mut this.inner).poll_next(cx)) {
let buf = res?;
this.input_buffer = buf.into();
this.state = EncoderState::Writing;
} else {
this.state = EncoderState::Flushing;
}
}
EncoderState::Writing => {
if this.input_buffer.is_empty() {
return Poll::Ready(Some(Err(io_format_err!("empty input during write"))));
}
let mut buf = this.input_buffer.split_off(0);
let (read, res) = this.encode(&buf[..], FlushCompress::None)?;
this.input_buffer = buf.split_off(read);
if this.input_buffer.is_empty() {
this.state = EncoderState::Reading;
}
if this.buffer.is_full() || res == flate2::Status::BufError {
let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
return Poll::Ready(Some(Ok(bytes.into())));
}
}
EncoderState::Flushing => {
let (_read, res) = this.encode(&[][..], FlushCompress::Finish)?;
if !this.buffer.is_empty() {
let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
return Poll::Ready(Some(Ok(bytes.into())));
}
if res == flate2::Status::StreamEnd {
this.state = EncoderState::Finished;
}
}
EncoderState::Finished => return Poll::Ready(None),
}
}
}
}

68
pbs-tools/src/crypt.rs Normal file
View File

@ -0,0 +1,68 @@
use std::ffi::CStr;
use anyhow::{bail, Error};
// from libcrypt1, 'lib/crypt.h.in'
const CRYPT_OUTPUT_SIZE: usize = 384;
const CRYPT_MAX_PASSPHRASE_SIZE: usize = 512;
const CRYPT_DATA_RESERVED_SIZE: usize = 767;
const CRYPT_DATA_INTERNAL_SIZE: usize = 30720;
#[repr(C)]
struct crypt_data {
output: [libc::c_char; CRYPT_OUTPUT_SIZE],
setting: [libc::c_char; CRYPT_OUTPUT_SIZE],
input: [libc::c_char; CRYPT_MAX_PASSPHRASE_SIZE],
reserved: [libc::c_char; CRYPT_DATA_RESERVED_SIZE],
initialized: libc::c_char,
internal: [libc::c_char; CRYPT_DATA_INTERNAL_SIZE],
}
pub fn crypt(password: &[u8], salt: &[u8]) -> Result<String, Error> {
#[link(name = "crypt")]
extern "C" {
#[link_name = "crypt_r"]
fn __crypt_r(
key: *const libc::c_char,
salt: *const libc::c_char,
data: *mut crypt_data,
) -> *mut libc::c_char;
}
let mut data: crypt_data = unsafe { std::mem::zeroed() };
for (i, c) in salt.iter().take(data.setting.len() - 1).enumerate() {
data.setting[i] = *c as libc::c_char;
}
for (i, c) in password.iter().take(data.input.len() - 1).enumerate() {
data.input[i] = *c as libc::c_char;
}
let res = unsafe {
let status = __crypt_r(
&data.input as *const _,
&data.setting as *const _,
&mut data as *mut _,
);
if status.is_null() {
bail!("internal error: crypt_r returned null pointer");
}
CStr::from_ptr(&data.output as *const _)
};
Ok(String::from(res.to_str()?))
}
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
let salt = proxmox::sys::linux::random_data(8)?;
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
crypt(password.as_bytes(), salt.as_bytes())
}
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
let verify = crypt(password.as_bytes(), enc_password.as_bytes())?;
if verify != enc_password {
bail!("invalid credentials");
}
Ok(())
}

14
pbs-tools/src/fd.rs Normal file
View File

@ -0,0 +1,14 @@
//! Raw file descriptor related utilities.
use std::os::unix::io::RawFd;
use anyhow::Error;
use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
/// Change the `O_CLOEXEC` flag of an existing file descriptor.
pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
let mut flags = unsafe { FdFlag::from_bits_unchecked(fcntl(fd, F_GETFD)?) };
flags.set(FdFlag::FD_CLOEXEC, on);
fcntl(fd, F_SETFD(flags))?;
Ok(())
}

View File

@ -3,8 +3,6 @@ use std::borrow::Borrow;
use anyhow::{Error};
use serde_json::Value;
use pbs_api_types::HumanByte;
pub fn strip_server_file_extension(name: &str) -> &str {
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
&name[..name.len()-5]
@ -65,3 +63,95 @@ pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result<Str
};
Ok(text)
}
pub struct HumanByte {
b: usize,
}
impl std::fmt::Display for HumanByte {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.b < 1024 {
return write!(f, "{} B", self.b);
}
let kb: f64 = self.b as f64 / 1024.0;
if kb < 1024.0 {
return write!(f, "{:.2} KiB", kb);
}
let mb: f64 = kb / 1024.0;
if mb < 1024.0 {
return write!(f, "{:.2} MiB", mb);
}
let gb: f64 = mb / 1024.0;
if gb < 1024.0 {
return write!(f, "{:.2} GiB", gb);
}
let tb: f64 = gb / 1024.0;
if tb < 1024.0 {
return write!(f, "{:.2} TiB", tb);
}
let pb: f64 = tb / 1024.0;
return write!(f, "{:.2} PiB", pb);
}
}
impl From<usize> for HumanByte {
fn from(v: usize) -> Self {
HumanByte { b: v }
}
}
impl From<u64> for HumanByte {
fn from(v: u64) -> Self {
HumanByte { b: v as usize }
}
}
pub fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>().join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Serializer, Deserializer};
pub fn serialize<S>(
bytes: &[u8; 32],
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(
deserializer: D,
) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}
#[test]
fn correct_byte_convert() {
fn convert(b: usize) -> String {
HumanByte::from(b).to_string()
}
assert_eq!(convert(1023), "1023 B");
assert_eq!(convert(1<<10), "1.00 KiB");
assert_eq!(convert(1<<20), "1.00 MiB");
assert_eq!(convert((1<<30) + 103 * (1<<20)), "1.10 GiB");
assert_eq!(convert((2<<50) + 500 * (1<<40)), "2.49 PiB");
}

View File

@ -1,7 +1,9 @@
//! File system helper utilities.
use std::borrow::{Borrow, BorrowMut};
use std::collections::HashMap;
use std::fs::File;
use std::hash::BuildHasher;
use std::io::{self, BufRead};
use std::ops::{Deref, DerefMut};
use std::os::unix::io::{AsRawFd, RawFd};
@ -10,7 +12,7 @@ use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::dir;
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::fcntl::{AtFlags, OFlag};
use nix::sys::stat::Mode;
use regex::Regex;
@ -347,6 +349,61 @@ fn do_lock_dir_noblock(
Ok(handle)
}
pub fn complete_file_name<S>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String>
where
S: BuildHasher,
{
let mut result = vec![];
let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
Err(_) => false,
};
if !is_dir {
if let Some(parent) = dirname.parent() {
dirname = parent.to_owned();
}
}
let mut dir =
match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
Ok(d) => d,
Err(_) => return result,
};
for item in dir.iter() {
if let Ok(entry) = item {
if let Ok(name) = entry.file_name().to_str() {
if name == "." || name == ".." {
continue;
}
let mut newpath = dirname.clone();
newpath.push(name);
if let Ok(stat) =
nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
{
if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
newpath.push("");
if let Some(newpath) = newpath.to_str() {
result.push(newpath.to_owned());
}
continue;
}
}
if let Some(newpath) = newpath.to_str() {
result.push(newpath.to_owned());
}
}
}
}
result
}
/// Get an iterator over lines of a file, skipping empty lines and comments (lines starting with a
/// `#`).
pub fn file_get_non_comment_lines<P: AsRef<Path>>(

View File

@ -1,20 +1,31 @@
pub mod acl;
pub mod blocking;
pub mod broadcast_future;
pub mod cert;
pub mod cli;
pub mod compression;
pub mod crypt;
pub mod crypt_config;
pub mod format;
pub mod fd;
pub mod fs;
pub mod io;
pub mod json;
pub mod logrotate;
pub mod lru_cache;
pub mod nom;
pub mod percent_encoding;
pub mod process_locker;
pub mod sha;
pub mod str;
pub mod stream;
pub mod sync;
pub mod sys;
pub mod task;
pub mod ticket;
pub mod tokio;
pub mod xattr;
pub mod zip;
pub mod async_lru_cache;

239
pbs-tools/src/logrotate.rs Normal file
View File

@ -0,0 +1,239 @@
use std::path::{Path, PathBuf};
use std::fs::{File, rename};
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::io::Read;
use anyhow::{bail, format_err, Error};
use nix::unistd;
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
/// Used for rotating log files and iterating over them
pub struct LogRotate {
base_path: PathBuf,
compress: bool,
/// User logs should be reowned to.
owner: Option<String>,
}
impl LogRotate {
/// Creates a new instance if the path given is a valid file name (iow. does not end with ..)
/// 'compress' decides if compresses files will be created on rotation, and if it will search
/// '.zst' files when iterating
///
/// By default, newly created files will be owned by the backup user. See [`new_with_user`] for
/// a way to opt out of this behavior.
pub fn new<P: AsRef<Path>>(
path: P,
compress: bool,
) -> Option<Self> {
Self::new_with_user(path, compress, Some(pbs_buildcfg::BACKUP_USER_NAME.to_owned()))
}
/// See [`new`]. Additionally this also takes a user which should by default be used to reown
/// new files to.
pub fn new_with_user<P: AsRef<Path>>(
path: P,
compress: bool,
owner: Option<String>,
) -> Option<Self> {
if path.as_ref().file_name().is_some() {
Some(Self {
base_path: path.as_ref().to_path_buf(),
compress,
owner,
})
} else {
None
}
}
/// Returns an iterator over the logrotated file names that exist
pub fn file_names(&self) -> LogRotateFileNames {
LogRotateFileNames {
base_path: self.base_path.clone(),
count: 0,
compress: self.compress
}
}
/// Returns an iterator over the logrotated file handles
pub fn files(&self) -> LogRotateFiles {
LogRotateFiles {
file_names: self.file_names(),
}
}
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
let mut source = File::open(source_path)?;
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
let target = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
Ok(encoder) => encoder,
Err(err) => {
let _ = unistd::unlink(&tmp_path);
bail!("creating zstd encoder failed - {}", err);
}
};
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
let _ = unistd::unlink(&tmp_path);
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
}
if let Err(err) = encoder.finish() {
let _ = unistd::unlink(&tmp_path);
bail!("zstd finish failed for file {:?} - {}", target_path, err);
}
if let Err(err) = rename(&tmp_path, target_path) {
let _ = unistd::unlink(&tmp_path);
bail!("rename failed for file {:?} - {}", target_path, err);
}
if let Err(err) = unistd::unlink(source_path) {
bail!("unlink failed for file {:?} - {}", source_path, err);
}
Ok(())
}
/// Rotates the files up to 'max_files'
/// if the 'compress' option was given it will compress the newest file
///
/// e.g. rotates
/// foo.2.zst => foo.3.zst
/// foo.1 => foo.2.zst
/// foo => foo.1
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
let mut filenames: Vec<PathBuf> = self.file_names().collect();
if filenames.is_empty() {
return Ok(()); // no file means nothing to rotate
}
let count = filenames.len() + 1;
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
next_filename.push(format!(".{}", filenames.len()));
if self.compress && count > 2 {
next_filename.push(".zst");
}
filenames.push(PathBuf::from(next_filename));
for i in (0..count-1).rev() {
if self.compress
&& filenames[i].extension() != Some(std::ffi::OsStr::new("zst"))
&& filenames[i+1].extension() == Some(std::ffi::OsStr::new("zst"))
{
Self::compress(&filenames[i], &filenames[i+1], &options)?;
} else {
rename(&filenames[i], &filenames[i+1])?;
}
}
if let Some(max_files) = max_files {
for file in filenames.iter().skip(max_files) {
if let Err(err) = unistd::unlink(file) {
eprintln!("could not remove {:?}: {}", &file, err);
}
}
}
Ok(())
}
pub fn rotate(
&mut self,
max_size: u64,
options: Option<CreateOptions>,
max_files: Option<usize>
) -> Result<bool, Error> {
let options = match options {
Some(options) => options,
None => match self.owner.as_deref() {
Some(owner) => {
let user = crate::sys::query_user(owner)?
.ok_or_else(|| {
format_err!("failed to lookup owning user '{}' for logs", owner)
})?;
CreateOptions::new().owner(user.uid).group(user.gid)
}
None => CreateOptions::new(),
}
};
let metadata = match self.base_path.metadata() {
Ok(metadata) => metadata,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
Err(err) => bail!("unable to open task archive - {}", err),
};
if metadata.len() > max_size {
self.do_rotate(options, max_files)?;
Ok(true)
} else {
Ok(false)
}
}
}
/// Iterator over logrotated file names
pub struct LogRotateFileNames {
base_path: PathBuf,
count: usize,
compress: bool,
}
impl Iterator for LogRotateFileNames {
type Item = PathBuf;
fn next(&mut self) -> Option<Self::Item> {
if self.count > 0 {
let mut path: std::ffi::OsString = self.base_path.clone().into();
path.push(format!(".{}", self.count));
self.count += 1;
if Path::new(&path).is_file() {
Some(path.into())
} else if self.compress {
path.push(".zst");
if Path::new(&path).is_file() {
Some(path.into())
} else {
None
}
} else {
None
}
} else if self.base_path.is_file() {
self.count += 1;
Some(self.base_path.to_path_buf())
} else {
None
}
}
}
/// Iterator over logrotated files by returning a boxed reader
pub struct LogRotateFiles {
file_names: LogRotateFileNames,
}
impl Iterator for LogRotateFiles {
type Item = Box<dyn Read + Send>;
fn next(&mut self) -> Option<Self::Item> {
let filename = self.file_names.next()?;
let file = File::open(&filename).ok()?;
if filename.extension() == Some(std::ffi::OsStr::new("zst")) {
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
return Some(Box::new(encoder));
}
Some(Box::new(file))
}
}

View File

@ -0,0 +1,211 @@
//! Inter-process reader-writer lock builder.
//!
//! This implementation uses fcntl record locks with non-blocking
//! F_SETLK command (never blocks).
//!
//! We maintain a map of shared locks with time stamps, so you can get
//! the timestamp for the oldest open lock with
//! `oldest_shared_lock()`.
use std::collections::HashMap;
use std::os::unix::io::AsRawFd;
use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
// fixme: use F_OFD_ locks when implemented with nix::fcntl
// Note: flock lock conversion is not atomic, so we need to use fcntl
/// Inter-process reader-writer lock
pub struct ProcessLocker {
file: std::fs::File,
exclusive: bool,
writers: usize,
next_guard_id: u64,
shared_guard_list: HashMap<u64, i64>, // guard_id => timestamp
}
/// Lock guard for shared locks
///
/// Release the lock when it goes out of scope.
pub struct ProcessLockSharedGuard {
guard_id: u64,
locker: Arc<Mutex<ProcessLocker>>,
}
impl Drop for ProcessLockSharedGuard {
fn drop(&mut self) {
let mut data = self.locker.lock().unwrap();
if data.writers == 0 {
panic!("unexpected ProcessLocker state");
}
data.shared_guard_list.remove(&self.guard_id);
if data.writers == 1 && !data.exclusive {
let op = libc::flock {
l_type: libc::F_UNLCK as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
if let Err(err) =
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
{
panic!("unable to drop writer lock - {}", err);
}
}
if data.writers > 0 {
data.writers -= 1;
}
}
}
/// Lock guard for exclusive locks
///
/// Release the lock when it goes out of scope.
pub struct ProcessLockExclusiveGuard {
locker: Arc<Mutex<ProcessLocker>>,
}
impl Drop for ProcessLockExclusiveGuard {
fn drop(&mut self) {
let mut data = self.locker.lock().unwrap();
if !data.exclusive {
panic!("unexpected ProcessLocker state");
}
let ltype = if data.writers != 0 {
libc::F_RDLCK
} else {
libc::F_UNLCK
};
let op = libc::flock {
l_type: ltype as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
if let Err(err) =
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
{
panic!("unable to drop exclusive lock - {}", err);
}
data.exclusive = false;
}
}
impl ProcessLocker {
/// Create a new instance for the specified file.
///
/// This simply creates the file if it does not exist.
pub fn new<P: AsRef<std::path::Path>>(lockfile: P) -> Result<Arc<Mutex<Self>>, Error> {
let file = std::fs::OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(lockfile)?;
Ok(Arc::new(Mutex::new(Self {
file,
exclusive: false,
writers: 0,
next_guard_id: 0,
shared_guard_list: HashMap::new(),
})))
}
fn try_lock(file: &std::fs::File, ltype: i32) -> Result<(), Error> {
let op = libc::flock {
l_type: ltype as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
nix::fcntl::fcntl(file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLK(&op))?;
Ok(())
}
/// Try to acquire a shared lock
///
/// On success, this makes sure that no other process can get an exclusive lock for the file.
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
let mut data = locker.lock().unwrap();
if data.writers == 0 && !data.exclusive {
if let Err(err) = Self::try_lock(&data.file, libc::F_RDLCK) {
bail!("unable to get shared lock - {}", err);
}
}
data.writers += 1;
let guard = ProcessLockSharedGuard {
locker: locker.clone(),
guard_id: data.next_guard_id,
};
data.next_guard_id += 1;
let now = unsafe { libc::time(std::ptr::null_mut()) };
data.shared_guard_list.insert(guard.guard_id, now);
Ok(guard)
}
/// Get oldest shared lock timestamp
pub fn oldest_shared_lock(locker: Arc<Mutex<Self>>) -> Option<i64> {
let mut result = None;
let data = locker.lock().unwrap();
for v in data.shared_guard_list.values() {
result = match result {
None => Some(*v),
Some(x) => {
if x < *v {
Some(x)
} else {
Some(*v)
}
}
};
}
result
}
/// Try to acquire a exclusive lock
///
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
pub fn try_exclusive_lock(
locker: Arc<Mutex<Self>>,
) -> Result<ProcessLockExclusiveGuard, Error> {
let mut data = locker.lock().unwrap();
if data.exclusive {
bail!("already locked exclusively");
}
if let Err(err) = Self::try_lock(&data.file, libc::F_WRLCK) {
bail!("unable to get exclusive lock - {}", err);
}
data.exclusive = true;
Ok(ProcessLockExclusiveGuard {
locker: locker.clone(),
})
}
}

229
pbs-tools/src/stream.rs Normal file
View File

@ -0,0 +1,229 @@
//! Wrappers between async readers and streams.
use std::io::{self, Read};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{Error, Result};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::sync::mpsc::Sender;
use futures::ready;
use futures::future::FutureExt;
use futures::stream::Stream;
use proxmox::io_format_err;
use proxmox::sys::error::io_err_other;
use proxmox_io::ByteBuffer;
use pbs_runtime::block_in_place;
/// Wrapper struct to convert a Reader into a Stream
pub struct WrappedReaderStream<R: Read + Unpin> {
reader: R,
buffer: Vec<u8>,
}
impl <R: Read + Unpin> WrappedReaderStream<R> {
pub fn new(reader: R) -> Self {
let mut buffer = Vec::with_capacity(64*1024);
unsafe { buffer.set_len(buffer.capacity()); }
Self { reader, buffer }
}
}
impl<R: Read + Unpin> Stream for WrappedReaderStream<R> {
type Item = Result<Vec<u8>, io::Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match block_in_place(|| this.reader.read(&mut this.buffer)) {
Ok(n) => {
if n == 0 {
// EOF
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(this.buffer[..n].to_vec())))
}
}
Err(err) => Poll::Ready(Some(Err(err))),
}
}
}
/// Wrapper struct to convert an AsyncReader into a Stream
pub struct AsyncReaderStream<R: AsyncRead + Unpin> {
reader: R,
buffer: Vec<u8>,
}
impl <R: AsyncRead + Unpin> AsyncReaderStream<R> {
pub fn new(reader: R) -> Self {
let mut buffer = Vec::with_capacity(64*1024);
unsafe { buffer.set_len(buffer.capacity()); }
Self { reader, buffer }
}
pub fn with_buffer_size(reader: R, buffer_size: usize) -> Self {
let mut buffer = Vec::with_capacity(buffer_size);
unsafe { buffer.set_len(buffer.capacity()); }
Self { reader, buffer }
}
}
impl<R: AsyncRead + Unpin> Stream for AsyncReaderStream<R> {
type Item = Result<Vec<u8>, io::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
let mut read_buf = ReadBuf::new(&mut this.buffer);
match ready!(Pin::new(&mut this.reader).poll_read(cx, &mut read_buf)) {
Ok(()) => {
let n = read_buf.filled().len();
if n == 0 {
// EOF
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(this.buffer[..n].to_vec())))
}
}
Err(err) => Poll::Ready(Some(Err(err))),
}
}
}
#[cfg(test)]
mod test {
use std::io;
use anyhow::Error;
use futures::stream::TryStreamExt;
#[test]
fn test_wrapped_stream_reader() -> Result<(), Error> {
pbs_runtime::main(async {
run_wrapped_stream_reader_test().await
})
}
struct DummyReader(usize);
impl io::Read for DummyReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0 += 1;
if self.0 >= 10 {
return Ok(0);
}
unsafe {
std::ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len());
}
Ok(buf.len())
}
}
async fn run_wrapped_stream_reader_test() -> Result<(), Error> {
let mut reader = super::WrappedReaderStream::new(DummyReader(0));
while let Some(_data) = reader.try_next().await? {
// just waiting
}
Ok(())
}
}
/// Wrapper around tokio::sync::mpsc::Sender, which implements Write
pub struct AsyncChannelWriter {
sender: Option<Sender<Result<Vec<u8>, Error>>>,
buf: ByteBuffer,
state: WriterState,
}
type SendResult = io::Result<Sender<Result<Vec<u8>>>>;
enum WriterState {
Ready,
Sending(Pin<Box<dyn Future<Output = SendResult> + Send + 'static>>),
}
impl AsyncChannelWriter {
pub fn new(sender: Sender<Result<Vec<u8>, Error>>, buf_size: usize) -> Self {
Self {
sender: Some(sender),
buf: ByteBuffer::with_capacity(buf_size),
state: WriterState::Ready,
}
}
fn poll_write_impl(
&mut self,
cx: &mut Context,
buf: &[u8],
flush: bool,
) -> Poll<io::Result<usize>> {
loop {
match &mut self.state {
WriterState::Ready => {
if flush {
if self.buf.is_empty() {
return Poll::Ready(Ok(0));
}
} else {
let free_size = self.buf.free_size();
if free_size > buf.len() || self.buf.is_empty() {
let count = free_size.min(buf.len());
self.buf.get_free_mut_slice()[..count].copy_from_slice(&buf[..count]);
self.buf.add_size(count);
return Poll::Ready(Ok(count));
}
}
let sender = match self.sender.take() {
Some(sender) => sender,
None => return Poll::Ready(Err(io_err_other("no sender"))),
};
let data = self.buf.remove_data(self.buf.len()).to_vec();
let future = async move {
sender
.send(Ok(data))
.await
.map(move |_| sender)
.map_err(|err| io_format_err!("could not send: {}", err))
};
self.state = WriterState::Sending(future.boxed());
}
WriterState::Sending(ref mut future) => match ready!(future.as_mut().poll(cx)) {
Ok(sender) => {
self.sender = Some(sender);
self.state = WriterState::Ready;
}
Err(err) => return Poll::Ready(Err(err)),
},
}
}
}
}
impl AsyncWrite for AsyncChannelWriter {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
let this = self.get_mut();
this.poll_write_impl(cx, buf, false)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
let this = self.get_mut();
match ready!(this.poll_write_impl(cx, &[], true)) {
Ok(_) => Poll::Ready(Ok(())),
Err(err) => Poll::Ready(Err(err)),
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}

92
pbs-tools/src/task.rs Normal file
View File

@ -0,0 +1,92 @@
use anyhow::{bail, Error};
/// Worker task abstraction
///
/// A worker task is a long running task, which usually logs output into a separate file.
pub trait WorkerTaskContext: Send + Sync {
/// Test if there was a request to abort the task.
fn abort_requested(&self) -> bool;
/// If the task should be aborted, this should fail with a reasonable error message.
fn check_abort(&self) -> Result<(), Error> {
if self.abort_requested() {
bail!("abort requested - aborting task");
}
Ok(())
}
/// Test if there was a request to shutdown the server.
fn shutdown_requested(&self) -> bool;
/// This should fail with a reasonable error message if there was
/// a request to shutdown the server.
fn fail_on_shutdown(&self) -> Result<(), Error> {
if self.shutdown_requested() {
bail!("Server shutdown requested - aborting task");
}
Ok(())
}
/// Create a log message for this task.
fn log(&self, level: log::Level, message: &std::fmt::Arguments);
}
/// Convenience implementation:
impl<T: WorkerTaskContext + ?Sized> WorkerTaskContext for std::sync::Arc<T> {
fn abort_requested(&self) -> bool {
<T as WorkerTaskContext>::abort_requested(&*self)
}
fn check_abort(&self) -> Result<(), Error> {
<T as WorkerTaskContext>::check_abort(&*self)
}
fn shutdown_requested(&self) -> bool {
<T as WorkerTaskContext>::shutdown_requested(&*self)
}
fn fail_on_shutdown(&self) -> Result<(), Error> {
<T as WorkerTaskContext>::fail_on_shutdown(&*self)
}
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
<T as WorkerTaskContext>::log(&*self, level, message)
}
}
#[macro_export]
macro_rules! task_error {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_warn {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_log {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_debug {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_trace {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
}};
}

View File

@ -0,0 +1,2 @@
pub mod tokio_writer_adapter;
pub use tokio_writer_adapter::TokioWriterAdapter;

View File

@ -0,0 +1,26 @@
use std::io::Write;
use tokio::task::block_in_place;
/// Wrapper around a writer which implements Write
///
/// wraps each write with a 'block_in_place' so that
/// any (blocking) writer can be safely used in async context in a
/// tokio runtime
pub struct TokioWriterAdapter<W: Write>(W);
impl<W: Write> TokioWriterAdapter<W> {
pub fn new(writer: W) -> Self {
Self(writer)
}
}
impl<W: Write> Write for TokioWriterAdapter<W> {
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
block_in_place(|| self.0.write(buf))
}
fn flush(&mut self) -> Result<(), std::io::Error> {
block_in_place(|| self.0.flush())
}
}

672
pbs-tools/src/zip.rs Normal file
View File

@ -0,0 +1,672 @@
//! ZIP Helper
//!
//! Provides an interface to create a ZIP File from ZipEntries
//! for a more detailed description of the ZIP format, see:
//! https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
use std::convert::TryInto;
use std::ffi::OsString;
use std::io;
use std::mem::size_of;
use std::os::unix::ffi::OsStrExt;
use std::path::{Component, Path, PathBuf};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::SystemTime;
use anyhow::{format_err, Error, Result};
use endian_trait::Endian;
use futures::ready;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
use crc32fast::Hasher;
use proxmox_time::gmtime;
use crate::compression::{DeflateEncoder, Level};
const LOCAL_FH_SIG: u32 = 0x04034B50;
const LOCAL_FF_SIG: u32 = 0x08074B50;
const CENTRAL_DIRECTORY_FH_SIG: u32 = 0x02014B50;
const END_OF_CENTRAL_DIR: u32 = 0x06054B50;
const VERSION_NEEDED: u16 = 0x002d;
const VERSION_MADE_BY: u16 = 0x032d;
const ZIP64_EOCD_RECORD: u32 = 0x06064B50;
const ZIP64_EOCD_LOCATOR: u32 = 0x07064B50;
// bits for time:
// 0-4: day of the month (1-31)
// 5-8: month: (1 = jan, etc.)
// 9-15: year offset from 1980
//
// bits for date:
// 0-4: second / 2
// 5-10: minute (0-59)
// 11-15: hour (0-23)
//
// see https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
fn epoch_to_dos(epoch: i64) -> (u16, u16) {
let gmtime = match gmtime(epoch) {
Ok(gmtime) => gmtime,
Err(_) => return (0, 0),
};
let seconds = (gmtime.tm_sec / 2) & 0b11111;
let minutes = gmtime.tm_min & 0xb111111;
let hours = gmtime.tm_hour & 0b11111;
let time: u16 = ((hours << 11) | (minutes << 5) | (seconds)) as u16;
let date: u16 = if gmtime.tm_year > (2108 - 1900) || gmtime.tm_year < (1980 - 1900) {
0
} else {
let day = gmtime.tm_mday & 0b11111;
let month = (gmtime.tm_mon + 1) & 0b1111;
let year = (gmtime.tm_year + 1900 - 1980) & 0b1111111;
((year << 9) | (month << 5) | (day)) as u16
};
(date, time)
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64Field {
field_type: u16,
field_size: u16,
uncompressed_size: u64,
compressed_size: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64FieldWithOffset {
field_type: u16,
field_size: u16,
uncompressed_size: u64,
compressed_size: u64,
offset: u64,
start_disk: u32,
}
#[derive(Endian)]
#[repr(C, packed)]
struct LocalFileHeader {
signature: u32,
version_needed: u16,
flags: u16,
compression: u16,
time: u16,
date: u16,
crc32: u32,
compressed_size: u32,
uncompressed_size: u32,
filename_len: u16,
extra_field_len: u16,
}
#[derive(Endian)]
#[repr(C, packed)]
struct LocalFileFooter {
signature: u32,
crc32: u32,
compressed_size: u64,
uncompressed_size: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct CentralDirectoryFileHeader {
signature: u32,
version_made_by: u16,
version_needed: u16,
flags: u16,
compression: u16,
time: u16,
date: u16,
crc32: u32,
compressed_size: u32,
uncompressed_size: u32,
filename_len: u16,
extra_field_len: u16,
comment_len: u16,
start_disk: u16,
internal_flags: u16,
external_flags: u32,
offset: u32,
}
#[derive(Endian)]
#[repr(C, packed)]
struct EndOfCentralDir {
signature: u32,
disk_number: u16,
start_disk: u16,
disk_record_count: u16,
total_record_count: u16,
directory_size: u32,
directory_offset: u32,
comment_len: u16,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64EOCDRecord {
signature: u32,
field_size: u64,
version_made_by: u16,
version_needed: u16,
disk_number: u32,
disk_number_central_dir: u32,
disk_record_count: u64,
total_record_count: u64,
directory_size: u64,
directory_offset: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64EOCDLocator {
signature: u32,
disk_number: u32,
offset: u64,
disk_count: u32,
}
async fn write_struct<E, T>(output: &mut T, data: E) -> io::Result<()>
where
T: AsyncWrite + ?Sized + Unpin,
E: Endian,
{
let data = data.to_le();
let data = unsafe {
std::slice::from_raw_parts(
&data as *const E as *const u8,
core::mem::size_of_val(&data),
)
};
output.write_all(data).await
}
/// Represents an Entry in a ZIP File
///
/// used to add to a ZipEncoder
pub struct ZipEntry {
filename: OsString,
mtime: i64,
mode: u16,
crc32: u32,
uncompressed_size: u64,
compressed_size: u64,
offset: u64,
is_file: bool,
}
impl ZipEntry {
/// Creates a new ZipEntry
///
/// if is_file is false the path will contain an trailing separator,
/// so that the zip file understands that it is a directory
pub fn new<P: AsRef<Path>>(path: P, mtime: i64, mode: u16, is_file: bool) -> Self {
let mut relpath = PathBuf::new();
for comp in path.as_ref().components() {
if let Component::Normal(_) = comp {
relpath.push(comp);
}
}
if !is_file {
relpath.push(""); // adds trailing slash
}
Self {
filename: relpath.into(),
crc32: 0,
mtime,
mode,
uncompressed_size: 0,
compressed_size: 0,
offset: 0,
is_file,
}
}
async fn write_local_header<W>(&self, mut buf: &mut W) -> io::Result<usize>
where
W: AsyncWrite + Unpin + ?Sized,
{
let filename = self.filename.as_bytes();
let filename_len = filename.len();
let header_size = size_of::<LocalFileHeader>();
let zip_field_size = size_of::<Zip64Field>();
let size: usize = header_size + filename_len + zip_field_size;
let (date, time) = epoch_to_dos(self.mtime);
write_struct(
&mut buf,
LocalFileHeader {
signature: LOCAL_FH_SIG,
version_needed: 0x2d,
flags: 1 << 3,
compression: 0x8,
time,
date,
crc32: 0,
compressed_size: 0xFFFFFFFF,
uncompressed_size: 0xFFFFFFFF,
filename_len: filename_len as u16,
extra_field_len: zip_field_size as u16,
},
)
.await?;
buf.write_all(filename).await?;
write_struct(
&mut buf,
Zip64Field {
field_type: 0x0001,
field_size: 2 * 8,
uncompressed_size: 0,
compressed_size: 0,
},
)
.await?;
Ok(size)
}
async fn write_data_descriptor<W: AsyncWrite + Unpin + ?Sized>(
&self,
mut buf: &mut W,
) -> io::Result<usize> {
let size = size_of::<LocalFileFooter>();
write_struct(
&mut buf,
LocalFileFooter {
signature: LOCAL_FF_SIG,
crc32: self.crc32,
compressed_size: self.compressed_size,
uncompressed_size: self.uncompressed_size,
},
)
.await?;
Ok(size)
}
async fn write_central_directory_header<W: AsyncWrite + Unpin + ?Sized>(
&self,
mut buf: &mut W,
) -> io::Result<usize> {
let filename = self.filename.as_bytes();
let filename_len = filename.len();
let header_size = size_of::<CentralDirectoryFileHeader>();
let zip_field_size = size_of::<Zip64FieldWithOffset>();
let mut size: usize = header_size + filename_len;
let (date, time) = epoch_to_dos(self.mtime);
let (compressed_size, uncompressed_size, offset, need_zip64) = if self.compressed_size
>= (u32::MAX as u64)
|| self.uncompressed_size >= (u32::MAX as u64)
|| self.offset >= (u32::MAX as u64)
{
size += zip_field_size;
(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, true)
} else {
(
self.compressed_size as u32,
self.uncompressed_size as u32,
self.offset as u32,
false,
)
};
write_struct(
&mut buf,
CentralDirectoryFileHeader {
signature: CENTRAL_DIRECTORY_FH_SIG,
version_made_by: VERSION_MADE_BY,
version_needed: VERSION_NEEDED,
flags: 1 << 3,
compression: 0x8,
time,
date,
crc32: self.crc32,
compressed_size,
uncompressed_size,
filename_len: filename_len as u16,
extra_field_len: if need_zip64 { zip_field_size as u16 } else { 0 },
comment_len: 0,
start_disk: 0,
internal_flags: 0,
external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
offset,
},
)
.await?;
buf.write_all(filename).await?;
if need_zip64 {
write_struct(
&mut buf,
Zip64FieldWithOffset {
field_type: 1,
field_size: 3 * 8 + 4,
uncompressed_size: self.uncompressed_size,
compressed_size: self.compressed_size,
offset: self.offset,
start_disk: 0,
},
)
.await?;
}
Ok(size)
}
}
// wraps an asyncreader and calculates the hash
struct HashWrapper<R> {
inner: R,
hasher: Hasher,
}
impl<R> HashWrapper<R> {
fn new(inner: R) -> Self {
Self {
inner,
hasher: Hasher::new(),
}
}
// consumes self and returns the hash and the reader
fn finish(self) -> (u32, R) {
let crc32 = self.hasher.finalize();
(crc32, self.inner)
}
}
impl<R> AsyncRead for HashWrapper<R>
where
R: AsyncRead + Unpin,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
let this = self.get_mut();
let old_len = buf.filled().len();
ready!(Pin::new(&mut this.inner).poll_read(cx, buf))?;
let new_len = buf.filled().len();
if new_len > old_len {
this.hasher.update(&buf.filled()[old_len..new_len]);
}
Poll::Ready(Ok(()))
}
}
/// Wraps a writer that implements AsyncWrite for creating a ZIP archive
///
/// This will create a ZIP archive on the fly with files added with
/// 'add_entry'. To Finish the file, call 'finish'
/// Example:
/// ```no_run
/// use anyhow::{Error, Result};
/// use tokio::fs::File;
///
/// use pbs_tools::zip::{ZipEncoder, ZipEntry};
///
/// #[tokio::main]
/// async fn main() -> Result<(), Error> {
/// let target = File::open("foo.zip").await?;
/// let mut source = File::open("foo.txt").await?;
///
/// let mut zip = ZipEncoder::new(target);
/// zip.add_entry(ZipEntry::new(
/// "foo.txt",
/// 0,
/// 0o100755,
/// true,
/// ), Some(source)).await?;
///
/// zip.finish().await?;
///
/// Ok(())
/// }
/// ```
pub struct ZipEncoder<W>
where
W: AsyncWrite + Unpin,
{
byte_count: usize,
files: Vec<ZipEntry>,
target: Option<W>,
}
impl<W: AsyncWrite + Unpin> ZipEncoder<W> {
pub fn new(target: W) -> Self {
Self {
byte_count: 0,
files: Vec::new(),
target: Some(target),
}
}
pub async fn add_entry<R: AsyncRead + Unpin>(
&mut self,
mut entry: ZipEntry,
content: Option<R>,
) -> Result<(), Error> {
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during add entry"))?;
entry.offset = self.byte_count.try_into()?;
self.byte_count += entry.write_local_header(&mut target).await?;
if let Some(content) = content {
let mut reader = HashWrapper::new(content);
let mut enc = DeflateEncoder::with_quality(target, Level::Fastest);
enc.compress(&mut reader).await?;
let total_in = enc.total_in();
let total_out = enc.total_out();
target = enc.into_inner();
let (crc32, _reader) = reader.finish();
self.byte_count += total_out as usize;
entry.compressed_size = total_out;
entry.uncompressed_size = total_in;
entry.crc32 = crc32;
}
self.byte_count += entry.write_data_descriptor(&mut target).await?;
self.target = Some(target);
self.files.push(entry);
Ok(())
}
async fn write_eocd(
&mut self,
central_dir_size: usize,
central_dir_offset: usize,
) -> Result<(), Error> {
let entrycount = self.files.len();
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during write_eocd"))?;
let mut count = entrycount as u16;
let mut directory_size = central_dir_size as u32;
let mut directory_offset = central_dir_offset as u32;
if central_dir_size > u32::MAX as usize
|| central_dir_offset > u32::MAX as usize
|| entrycount > u16::MAX as usize
{
count = 0xFFFF;
directory_size = 0xFFFFFFFF;
directory_offset = 0xFFFFFFFF;
write_struct(
&mut target,
Zip64EOCDRecord {
signature: ZIP64_EOCD_RECORD,
field_size: 44,
version_made_by: VERSION_MADE_BY,
version_needed: VERSION_NEEDED,
disk_number: 0,
disk_number_central_dir: 0,
disk_record_count: entrycount.try_into()?,
total_record_count: entrycount.try_into()?,
directory_size: central_dir_size.try_into()?,
directory_offset: central_dir_offset.try_into()?,
},
)
.await?;
let locator_offset = central_dir_offset + central_dir_size;
write_struct(
&mut target,
Zip64EOCDLocator {
signature: ZIP64_EOCD_LOCATOR,
disk_number: 0,
offset: locator_offset.try_into()?,
disk_count: 1,
},
)
.await?;
}
write_struct(
&mut target,
EndOfCentralDir {
signature: END_OF_CENTRAL_DIR,
disk_number: 0,
start_disk: 0,
disk_record_count: count,
total_record_count: count,
directory_size,
directory_offset,
comment_len: 0,
},
)
.await?;
self.target = Some(target);
Ok(())
}
pub async fn finish(&mut self) -> Result<(), Error> {
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during finish"))?;
let central_dir_offset = self.byte_count;
let mut central_dir_size = 0;
for file in &self.files {
central_dir_size += file.write_central_directory_header(&mut target).await?;
}
self.target = Some(target);
self.write_eocd(central_dir_size, central_dir_offset)
.await?;
self.target
.take()
.ok_or_else(|| format_err!("had no target for flush"))?
.flush()
.await?;
Ok(())
}
}
/// Zip a local directory and write encoded data to target. "source" has to point to a valid
/// directory, it's name will be the root of the zip file - e.g.:
/// source:
/// /foo/bar
/// zip file:
/// /bar/file1
/// /bar/dir1
/// /bar/dir1/file2
/// ...
/// ...except if "source" is the root directory
pub async fn zip_directory<W>(target: W, source: &Path) -> Result<(), Error>
where
W: AsyncWrite + Unpin + Send,
{
use walkdir::WalkDir;
use std::os::unix::fs::MetadataExt;
let base_path = source.parent().unwrap_or_else(|| Path::new("/"));
let mut encoder = ZipEncoder::new(target);
for entry in WalkDir::new(&source).into_iter() {
match entry {
Ok(entry) => {
let entry_path = entry.path().to_owned();
let encoder = &mut encoder;
if let Err(err) = async move {
let entry_path_no_base = entry.path().strip_prefix(base_path)?;
let metadata = entry.metadata()?;
let mtime = match metadata.modified().unwrap_or_else(|_| SystemTime::now()).duration_since(SystemTime::UNIX_EPOCH) {
Ok(dur) => dur.as_secs() as i64,
Err(time_error) => -(time_error.duration().as_secs() as i64)
};
let mode = metadata.mode() as u16;
if entry.file_type().is_file() {
let file = tokio::fs::File::open(entry.path()).await?;
let ze = ZipEntry::new(
&entry_path_no_base,
mtime,
mode,
true,
);
encoder.add_entry(ze, Some(file)).await?;
} else if entry.file_type().is_dir() {
let ze = ZipEntry::new(
&entry_path_no_base,
mtime,
mode,
false,
);
let content: Option<tokio::fs::File> = None;
encoder.add_entry(ze, content).await?;
}
// ignore other file types
let ok: Result<(), Error> = Ok(());
ok
}
.await
{
eprintln!(
"zip: error encoding file or directory '{}': {}",
entry_path.display(),
err
);
}
}
Err(err) => {
eprintln!("zip: error reading directory entry: {}", err);
}
}
}
encoder.finish().await
}

View File

@ -22,9 +22,8 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
proxmox-async = "0.2"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox = { version = "0.14.0", features = [ "sortable-macro" ] }
proxmox-router = { version = "1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
@ -34,4 +33,5 @@ pbs-config = { path = "../pbs-config" }
pbs-client = { path = "../pbs-client" }
pbs-datastore = { path = "../pbs-datastore" }
pbs-fuse-loop = { path = "../pbs-fuse-loop" }
pbs-runtime = { path = "../pbs-runtime" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -7,8 +7,7 @@ use serde_json::Value;
use proxmox::sys::linux::tty;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_router::cli::{
complete_file_name, format_and_print_result_full, get_output_format,
CliCommand, CliCommandMap, ColumnConfig,
format_and_print_result_full, get_output_format, CliCommand, CliCommandMap, ColumnConfig,
OUTPUT_FORMAT,
};
use proxmox_schema::{api, ApiType, ReturnType};
@ -315,7 +314,7 @@ fn import_master_pubkey(path: String) -> Result<(), Error> {
let target_path = place_default_master_pubkey()?;
replace_file(&target_path, &pem_data, CreateOptions::new(), true)?;
replace_file(&target_path, &pem_data, CreateOptions::new())?;
println!("Imported public master key to {:?}", target_path);
@ -348,7 +347,7 @@ fn create_master_key() -> Result<(), Error> {
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
let filename_pub = "master-public.pem";
println!("Writing public master key to {}", filename_pub);
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new(), true)?;
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
let cipher = openssl::symm::Cipher::aes_256_cbc();
let priv_key: Vec<u8> =
@ -356,7 +355,7 @@ fn create_master_key() -> Result<(), Error> {
let filename_priv = "master-private.pem";
println!("Writing private master key to {}", filename_priv);
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new(), true)?;
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
Ok(())
}
@ -452,35 +451,35 @@ fn paper_key(
pub fn cli() -> CliCommandMap {
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_import_with_master_key_cmd_def = CliCommand::new(&API_METHOD_IMPORT_WITH_MASTER_KEY)
.arg_param(&["master-keyfile"])
.completion_cb("master-keyfile", complete_file_name)
.completion_cb("master-keyfile", pbs_tools::fs::complete_file_name)
.arg_param(&["encrypted-keyfile"])
.completion_cb("encrypted-keyfile", complete_file_name)
.completion_cb("encrypted-keyfile", pbs_tools::fs::complete_file_name)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_show_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_SHOW_MASTER_PUBKEY)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_show_cmd_def = CliCommand::new(&API_METHOD_SHOW_KEY)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
.arg_param(&["path"])
.completion_cb("path", complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
CliCommandMap::new()
.insert("create", key_create_cmd_def)

View File

@ -17,15 +17,11 @@ use proxmox::tools::fs::{file_get_json, replace_file, CreateOptions, image_size}
use proxmox_router::{ApiMethod, RpcEnvironment, cli::*};
use proxmox_schema::api;
use proxmox_time::{strftime_local, epoch_i64};
use proxmox_async::blocking::TokioWriterAdapter;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
Authid, CryptMode, Fingerprint, GroupListItem, HumanByte,
PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem,
StorageStatus,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, Authid, CryptMode, GroupListItem,
PruneListItem, SnapshotListItem, StorageStatus, Fingerprint, PruneOptions,
};
use pbs_client::{
BACKUP_SOURCE_SCHEMA,
@ -49,7 +45,7 @@ use pbs_client::tools::{
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
connect_rate_limited, extract_repository_from_value,
extract_repository_from_value,
key_source::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
@ -69,6 +65,7 @@ use pbs_datastore::manifest::{
};
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
use pbs_tools::json;
use pbs_tools::crypt_config::CryptConfig;
@ -129,7 +126,7 @@ fn record_repository(repo: &BackupRepository) {
let new_data = json!(map);
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new(), false);
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
}
async fn api_datastore_list_snapshots(
@ -487,7 +484,7 @@ fn spawn_catalog_upload(
encrypt: bool,
) -> Result<CatalogUploadResult, Error> {
let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
let catalog_stream = proxmox_async::blocking::StdChannelStream(catalog_rx);
let catalog_stream = pbs_tools::blocking::StdChannelStream(catalog_rx);
let catalog_chunk_size = 512*1024;
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
@ -585,14 +582,6 @@ fn spawn_catalog_upload(
schema: CHUNK_SIZE_SCHEMA,
optional: true,
},
rate: {
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
optional: true,
},
burst: {
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
optional: true,
},
"exclude": {
type: Array,
description: "List of paths or patterns for matching files to exclude.",
@ -641,17 +630,6 @@ async fn create_backup(
verify_chunk_size(size)?;
}
let rate = match param["rate"].as_str() {
Some(s) => Some(s.parse::<HumanByte>()?),
None => None,
};
let burst = match param["burst"].as_str() {
Some(s) => Some(s.parse::<HumanByte>()?),
None => None,
};
let rate_limit = RateLimitConfig::with_same_inout(rate, burst);
let crypto = crypto_parameters(&param)?;
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
@ -746,7 +724,7 @@ async fn create_backup(
let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
let client = connect_rate_limited(&repo, rate_limit)?;
let client = connect(&repo)?;
record_repository(&repo);
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
@ -1063,14 +1041,6 @@ We do not extract '.pxar' archives when writing to standard output.
"###
},
rate: {
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
optional: true,
},
burst: {
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
optional: true,
},
"allow-existing-dirs": {
type: Boolean,
description: "Do not fail if directories already exists.",
@ -1101,18 +1071,8 @@ async fn restore(param: Value) -> Result<Value, Error> {
let archive_name = json::required_string_param(&param, "archive-name")?;
let rate = match param["rate"].as_str() {
Some(s) => Some(s.parse::<HumanByte>()?),
None => None,
};
let burst = match param["burst"].as_str() {
Some(s) => Some(s.parse::<HumanByte>()?),
None => None,
};
let client = connect(&repo)?;
let rate_limit = RateLimitConfig::with_same_inout(rate, burst);
let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo);
let path = json::required_string_param(&param, "snapshot")?;
@ -1172,7 +1132,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
if archive_name == MANIFEST_BLOB_NAME {
if let Some(target) = target {
replace_file(target, &backup_index_data, CreateOptions::new(), false)?;
replace_file(target, &backup_index_data, CreateOptions::new())?;
} else {
let stdout = std::io::stdout();
let mut writer = stdout.lock();
@ -1469,13 +1429,13 @@ fn main() {
.arg_param(&["backupspec"])
.completion_cb("repository", complete_repository)
.completion_cb("backupspec", complete_backup_source)
.completion_cb("keyfile", complete_file_name)
.completion_cb("master-pubkey-file", complete_file_name)
.completion_cb("keyfile", pbs_tools::fs::complete_file_name)
.completion_cb("master-pubkey-file", pbs_tools::fs::complete_file_name)
.completion_cb("chunk-size", complete_chunk_size);
let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
.completion_cb("repository", complete_repository)
.completion_cb("keyfile", complete_file_name);
.completion_cb("keyfile", pbs_tools::fs::complete_file_name);
let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
.completion_cb("repository", complete_repository);
@ -1488,7 +1448,7 @@ fn main() {
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_archive_name)
.completion_cb("target", complete_file_name);
.completion_cb("target", pbs_tools::fs::complete_file_name);
let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
.arg_param(&["group"])
@ -1541,6 +1501,6 @@ fn main() {
let rpcenv = CliEnvironment::new();
run_cli_command(cmd_def, rpcenv, Some(|future| {
proxmox_async::runtime::main(future)
pbs_runtime::main(future)
}));
}

View File

@ -94,7 +94,7 @@ pub fn mount_cmd_def() -> CliCommand {
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_pxar_archive_name)
.completion_cb("target", complete_file_name)
.completion_cb("target", pbs_tools::fs::complete_file_name)
}
pub fn map_cmd_def() -> CliCommand {
@ -135,7 +135,7 @@ fn mount(
if verbose {
// This will stay in foreground with debug output enabled as None is
// passed for the RawFd.
return proxmox_async::runtime::main(mount_do(param, None));
return pbs_runtime::main(mount_do(param, None));
}
// Process should be daemonized.
@ -151,7 +151,7 @@ fn mount(
Ok(ForkResult::Child) => {
drop(pr);
nix::unistd::setsid().unwrap();
proxmox_async::runtime::main(mount_do(param, Some(pw)))
pbs_runtime::main(mount_do(param, Some(pw)))
}
Err(_) => bail!("failed to daemonize process"),
}

Some files were not shown because too many files have changed in this diff Show More