Compare commits

...

398 Commits

Author SHA1 Message Date
96f35520a0 bump version to 1.0.5-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-25 15:30:06 +01:00
490560e0c6 restore: print to STDERR
else restoring to STDOUT is broken..

Reported-by: Dominic Jäger <d.jaeger@proxmox.com>

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-25 14:38:02 +01:00
52f53d8280 control: update versions 2020-11-25 10:35:51 +01:00
27b8a3f671 bump version to 1.0.4-1 2020-11-25 08:03:11 +01:00
abf9b6da42 docs: fix renamed commands 2020-11-25 08:03:11 +01:00
0c9209b04c cli: rename command "upload-log" to "snapshot upload-log" 2020-11-25 07:57:39 +01:00
edebd52374 cli: rename command "forget" to "snapshot forget" 2020-11-25 07:57:39 +01:00
61205f00fb cli: rename command "files" to "snapshot files" 2020-11-25 07:57:39 +01:00
a303e00289 fingerprint: add new() method 2020-11-25 07:57:39 +01:00
af9f72e9d8 fingerprint: add bytes() accessor
needed for libproxmox-backup-qemu0

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-25 06:34:34 +01:00
5176346b30 ui: fix broken gettext use
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-25 00:21:17 +01:00
731eeef25b cli: use new alias feature for "snapshots"
Now maps to "snapshot list".
2020-11-24 13:26:43 +01:00
a65e3e4bc0 client: add 'snapshot notes show/update' command
to show and update snapshot notes from the cli

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-24 11:44:19 +01:00
027eb2bbe6 bump version to 1.0.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-24 08:56:18 +01:00
6982a54701 gui: add snapshot/file fingerprint tooltip
display short key ID, like backend's Display trait.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
035c40e638 list_snapshots: return manifest fingerprint
for display in clients.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
79c535955d refactor BackupInfo -> SnapshotListItem helper
before adding more fields to the tuple, let's just create the struct
inside the match arms to improve readability.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
8b7f8d3f3d expose previous backup time in backup env
and use this information to add more information to client backup log
and guide the download manifest decision.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
866c859a1e bump version to 1.0.2-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-24 08:33:20 +01:00
23e4e90540 verification: fix message in notification mail
the errors Vec can contain failed groups as well (e.g., if a group has
no or an invalid owner).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:33:20 +01:00
a4fa3fc241 verification job: log failed dirs
else users have to manually search through a potentially very long task
log to find the entries that are different.. this is the same summary
printed at the end of a manual verify task.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:33:20 +01:00
81d10c3b37 cleanup: remove dead code 2020-11-24 08:03:00 +01:00
f1e2904150 paperkey: refactor common code
from formatting functions to main function, and pass along the key data
lines instead of the full string.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:57:21 +01:00
23f9503a31 client: check fingerprint after downloading manifest
this is stricter than the check that happened on manifest load, as it
also fails if the manifest is signed but we don't have a key available.

add some additional output at the start of a backup to indicate whether
a previous manifest is available to base the backup on.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:55:12 +01:00
a0ef68b93c manifest: check fingerprint when loading with key
otherwise loading will run into the signature mismatch which is
technically true, but not the complete picture in this case.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:49:51 +01:00
6b127e6ea0 fix #3139: add key fingerprint to manifest
if the manifest is signed/the contained archives/blobs are encrypted.
stored in 'unprotected' area, since there is already a strong binding
between key and manifest via the signature, and this avoids breaking
backwards compatibility for a simple usability improvement.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:45:11 +01:00
5e17dbf2bb cli: cleanup 'key show' - use format_and_print_result_full
We now expose all key derivation functions on the cli, so users can
choose between scrypt or pbkdf2.
2020-11-24 07:32:34 +01:00
dfb04575ad client: add 'key show' command
for (pretty-)printing a keyfile.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:15:29 +01:00
6f2626ae19 client: print key fingerprint and master key
for operations where it makes sense.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:11:26 +01:00
37e60ddcde key: add fingerprint to key config
and set/generate it on
- key creation
- key passphrase change
- key decryption if not already set
- key encryption with master key

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:03:46 +01:00
05cdc05347 crypt config: add fingerprint mechanism
by computing the ID digest of a hash of a static string.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:03:16 +01:00
6364115b4b OnlineHelpInfo.js problems
Anybody known why I always get the following diff:
2020-11-23 12:57:41 +01:00
2133cd9103 update debian/control 2020-11-23 12:13:58 +01:00
01f84fcce1 ui: datastore content: use our keep field for group pruning
sets some defaults and provides the clear trigger, so less code and
slightly nicer UX.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-21 19:52:03 +01:00
08b3823025 bump dependency on proxmox to 0.7.1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-20 17:38:34 +01:00
968a0ab261 fix systemd-encoded upid strings in http client
since we systemd-encode parts of the upid string, and those can contain
characters that are invalid in urls (e.g. '\'), we have to percent encode
those

add a 'percent_encode_component' helper, so that we can maybe change
the AsciiSet for all uses at the same time

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-19 11:01:19 +01:00
21b552848a prune sim: make numberfields more similar to PBS's
by creating a new class that adds a clear trigger and also uses the
clear-trigger image. Code was taken from the one in PBS's prune window,
but we have default values here, so a bit of adapting was necessary. For
example, we don't want to reset to the original value (which might have
been one of the defaults) when clearing, but always to 'null'.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-19 09:47:51 +01:00
fd19256470 gc: treat .bad files like regular chunks
Simplify the phase 2 code by treating .bad files just like regular
chunks, with the exception of stat logging.

To facilitate, we need to touch .bad files in phase 1. We only do this
under the condition that 1) the original chunk is missing (as before),
and 2) the original chunk is still referenced somewhere (since the code
lives in the error handler for a failed chunk touch, it only gets called
for chunks we expect to be there, i.e. ones that are referenced).

Untouched they will then be cleaned up after 24 hours (or after the last
longer-running task finishes).

Reason 2) is also a fix for .bad files not being cleaned up at all if
the original is no longer referenced anywhere (e.g. a user deleting all
snapshots after seeing some corrupt chunks appear).

cond_touch_path is introduced to touch arbitrary paths in the chunk
store with the same logic as touching chunks.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-18 14:04:49 +01:00
1ed022576c api: include store in invalid owner errors
since a group might exist in plenty stores

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:11:24 +01:00
f6aa7b38bf drop now unused BackupInfo::list_backups
all global backup listing now happens via BackupGroup

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:11:21 +01:00
fdfcb74d67 api: filter snapshot counts
unprivileged users should only see the counts related to their part of
the datastore.

while we're at it, switch to a list groups, filter groups, count
snapshots approach (like list_snapshots) to speedup calls to this
endpoint when many unprivileged users share a datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:05:50 +01:00
98afc7b152 api: make expensive parts of datastore status opt-in
used in the PBS GUI, but also for PVE usage queries which don't need all
the extra expensive information..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:05:47 +01:00
0d08fceeb9 improve group/snapshot listing
by listing groups first, then filtering, then listing group snapshots.

this cuts down the number of openat/getdirents calls for users that just
have a partial view of the datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 10:37:04 +01:00
3c945d73c2 client/http_client: add put method
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 16:59:14 +01:00
58fcbf5ab7 client: expose all-file-systems option
Useful to avoid the need for a long (and possibly changing) list of include-dev
options in certain situations, e.g. nested ZFS file systems. The option is
already implemented and seems to work as expected. The checks for virtual
filesystems are not affected by this option.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-16 16:59:14 +01:00
3a3f31c947 ui: datastores: hide "no datastore" box by default
avoids that it shows during store load, we do not know if there are
no datastores at that point and have already a loading mask.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-16 16:59:14 +01:00
8fc63287df ui: improve comment behaviour for datastore Summary
when we could not load the config (e.g. missing permissions)
show the comment from the global datastore-list

also show a messagebox for a load error instead of setting
the text of the comment box

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
172473e4de ui: DataStoreList: show message when there are no datastores
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
76f549debb ui: DataStoreList: remove datastores also from hash
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
c9097ff801 pxar: avoid including archive root's exclude patterns in .pxarexclude-cli
The patterns from the archive root's .pxarexclude file are already present in
self.patterns when encode_pxarexclude_cli is called. Pass along the number of
CLI patterns and slice accordingly.

Suggested-By: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 13:05:09 +01:00
fb01fd3af6 visibility cleanups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:53:50 +01:00
fa4bcbcad0 pxar: only generate .pxarexclude-cli if there were CLI parameters
previously a .pxarexclude entry in the root of the archive caused the file to
be generated as well, because the patterns are read before calling
generate_directory_file_list and within the function it wasn't possible to
distinguish between a pattern coming from the CLI and a pattern coming from
archive/root/.pxarexclude

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:08 +01:00
189cdb7427 pxar: include .pxarexclude files in the archive
The documentation states:
.pxarexclude files are treated as regular files and will be included in the
backup archive.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:06 +01:00
874bd5454d pxar: fix anchored exclusion at archive root
There is no leading slash in an entry's full_path, causing an anchored
exclude at the root level to fail, e.g. having "/name" as the content of the
file archive/root/.pxarexclude didn't match the file archive/root/name

Fix this by prepending a leading slash before matching.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:04 +01:00
b649887e9a remove unused function
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:15:15 +01:00
8c62c15f56 follouwp: whitespace cleanup
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 11:02:45 +01:00
51ac17b56e api: apt/versions: fix running_kernel string for unknown package case
Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-12 11:02:20 +01:00
fc5a012068 manager: versions: non-verbose should actually print server pkg info
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 10:28:03 +01:00
5e293f1315 apt: use typed response for get_versions
...and cleanup get_versions for manager CLI.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-12 10:15:32 +01:00
87367decf2 ui: tell ESLint to be strict in check target
the .lint-incremental target, which is implicitly used by the install
target, is still more forgiving to allow faster "change, build, test"
iteration when developing.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 09:54:39 +01:00
f792220dd4 d/control: update for new pin-project dependency
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 09:54:39 +01:00
97030c9407 cleanup clippy leftovers
this used to contain a pointer cast, now it doesn't

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 09:43:38 +01:00
5d1d0f5d6c use pin-project to remove more unsafe blocks
we already have it in our dependency tree, so use it

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 09:43:38 +01:00
294466ee61 manager: versions: unify printing
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
c100fe9108 add versions command to proxmox-backup-manager
Add the versions command to proxmox-backup-manager with a similar output
to pveversion [-v]. It prints the packages line by line with only the
package name, followed by the version and, for proxmox-backup and
proxmox-backup-server, some additional information (running kernel,
running version).

In addition it supports the optional output-format parameter which can
be used to print the complete data in either json, json-pretty or text
format. If output-format is specified, the --verbose parameter is
ignored and the detailed list of packages is printed.

With the addition of the versions command, the report is extended as
well.

Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-11 18:30:33 +01:00
e754da3ac2 api: versions: add version also in server package unknown case
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
bc1e52bc38 api: versions: rust fmt cleanups
line length limit is 100

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
6f0073bbb5 api: apt update info: do not serialize extra info if none
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
2decf85d6e add extra_info field to APTUpdateInfo
Add an optional string field to APTUpdateInfo which can be used for
extra information.

This is used for passing running kernel and running version information
in the versions API call together with proxmox-backup and
proxmox-backup-server.

Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-11 16:39:11 +01:00
1d8f849457 api2/node/syslog: use 'real_service_name' here also
for now this only does the 'postfix' -> 'postfix@-' conversion,
fixes the issue that we only showed the 'postfix' service syslog
(which is rather empty in a default setup) instead of the instance one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 16:36:42 +01:00
beb07279b6 log source of encryption key
This patch prints the source of the encryption key when running
operations with proxmox-backup-client.

Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-11-11 16:35:20 +01:00
8c6854c8fd inform user when using default encryption key
Currently if you generate a default encryption key:
`proxmox-backup-client key create --kdf none`

all backup operations which don't explicitly disable encryption will be
encrypted with this key.

I found it quite surprising, that my backups were all encrypted without
me explicitly specfying neither key nor encryption mode

This patch informs the user when the default key is used (and no
crypt-mode is provided explicitly)

Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-11-11 16:35:20 +01:00
57f472d9bb report: use '$' instead of '#' for showing commands
since some files can contain '#' character for comments. (i.e.,
/etc/hosts)

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-11-11 16:19:37 +01:00
94ffca10a2 report: fix grammar error
Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-11-11 16:19:33 +01:00
0a274ab0a0 ui: UserView: render name as 'Firstname Lastname'
instead of only the firstname

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 14:09:40 +01:00
c0026563b0 make user properties deletable
by using our usual pattern for the update call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 14:09:40 +01:00
e411924c7c rest: check for disabled token (user)
when authenticating a token, and not just when authenticating a
user/ticket.

Reported-By: Dominik Jäger <d.jaeger@proxmox.com>

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-11 12:21:29 +01:00
709c15abaa bump version to 1.0.1-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:21:30 +01:00
b404e4d930 d/control: check in new dependnecies to generated control
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:21:30 +01:00
f507580c3f docs: faq: fix first releases
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
291b786076 docs: fix prune retention example
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
06c9059dac daemon: rename method, endless loop, bail on exec error
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
d7c6ad60dd daemon: add hack for sd_notify
sd_notify is not synchronous, iow. it only waits until the message
reaches the queue not until it is processed by systemd

when the process that sent such a message exits before systemd could
process it, it cannot be associated to the correct pid

so in case of reloading, we send a message with 'MAINPID=<newpid>'
to signal that it will change. if now the old process exits before
systemd knows this, it will not accept the 'READY=1' message from the
child, since it rejects the MAINPID change

since there is no (AFAICS) library interface to check the unit status,
we use 'systemctl is-active <SERVICE_NAME>' to check the state until
it is not 'reloading' anymore.

on newer systemd versions, there is 'sd_notify_barrier' which would
allow us to wait for systemd to have all messages from the current
pid to be processed before acknowledging to the child, but on buster
the systemd version is to old...

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 09:43:00 +01:00
0a0ba0785b prune sim: avoid colon to separate keep desc from count
hack for space issues for monthly keeps and >9 counts

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:20:13 +01:00
6ed79592f2 prune sim: make backup schedule a form, bind update button to its validity
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:11:46 +01:00
4c75ee3471 prune sim: do not use unecesarry variable, declare in line
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:11:16 +01:00
6f997da8cd prune sim: set min-heigth for calendar day cells
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:10:43 +01:00
03e40aa4ee ui: datastore add: set default schedule
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:49:01 +01:00
be1d6cbcc6 ui: shorten automatic ID length a bit
Without hyphens, we had 20 hex digits, so ~80 bit which is probably overkill.
Use 12 (13 with hyphen), this is still 48 bit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:40:23 +01:00
ffaca016ad ui: datastore summary: drop removed bytes display
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:27:21 +01:00
71f82a98d7 d/control: add missing dependencies for non ISO installations
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:26:05 +01:00
deef6fbc0c cargo: extend authors list
this was mostly selected by executing

and adding those with more than a hand full of commits, so no hard
feelings here, this was definitively also a team effort to get stuff
polished!

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
4ac529141f bump version to 1.0.0-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
a108a2e967 ui: drop debug beta code
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
ff7a29104c postinst: fix version check for remote.cfg cleanup
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:35:37 +01:00
240b2ffb9b ui: improve activeTab selection from fragment and state
handle invalid fragments for tabs, as well as not rendered tabpanels

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 14:21:54 +01:00
a86e703661 tools::runtime: pin_mut instead of unsafe block
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 14:18:45 +01:00
1ecf4e6d20 async_io: require Unpin for EitherStream and HyperAccept
We use it with Unpin types and this way we get rid of a lot
of `unsafe` blocks.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 14:18:45 +01:00
9f9a661b1a verify: cleanup logging order/messages
otherwise we end up printing warnings before the start message..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:11:36 +01:00
1b1cab8321 verify: log/warn on invalid owner
in order to trigger a notification/make the problem more visible than
just in syslog.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:11:36 +01:00
f4f9a503de ui: add mising panel help buttons
add missing help buttons (question mark, top right) so that we are
consistent and each panel has it.

I chose the IMHO most fitting sections.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 13:53:21 +01:00
a4971d5f90 docs: add ref for sysadmin host admin section
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 13:53:21 +01:00
477ebe6b78 docs: user management: avoid some inconsistencies
The space between '--' and 'path' in two of the commands was wrong. The other
changes make the names of the store and token consistent with the rest of the
section and should improve readability.

Also add the Datastore.Verify permission in the output of the command:
proxmox-backup-manager user permissions john@pbs --path /datastore/store1
A DatastoreAdmin now has this permission and that's what john@pbs is in the
example.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 13:47:52 +01:00
38efbfc148 ui: app: fix fixme
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:38:30 +01:00
10052ea644 remote.cfg: rename userid to 'auth-id'
and fixup config file on upgrades accordingly

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 13:25:24 +01:00
b57619ea29 ui: datastores sync: future proof and move local store column in front
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:22:54 +01:00
445b0043b2 ui: show (local)datastore column only in global sync/verifyview
its rather hacky, but our cbind mixin does not support columns (yet).
if it does sometime in the future, we could use that instead

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 13:14:47 +01:00
8b62cbe752 docs: update package repositories
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:14:04 +01:00
81f99362d9 docs: installation: don't mention ext3 as an option anymore
Support for ext3 was removed by commit 0abf0d3683b74421eca24ba61d1d4e100d35211a
in pve-installer.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 13:13:44 +01:00
414c23facb fix #3060:: improve get_owner error handling
log invalid owners to system log, and continue with next group just as
if permission checks fail for the following operations:
- verify store with limited permissions
- list store groups
- list store snapshots

all other call sites either handle it correctly already (sync/pull), or
operate on a single group/snapshot and can bubble up the error.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 12:58:44 +01:00
c5608cf86c encryption: add best practice for storing master key
Further clarify that the paperkey should be a last resort
recovery option, after a password manager and usb drive.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-10 12:51:30 +01:00
5d08c750ef HttpsConnector: include destination on connect errors
for more useful log output
old:
Nov 10 11:50:51 foo pvestatd[3378]: proxmox-backup-client failed: Error: error trying to connect: tcp connect error: No route to host (os error 113)
new:
Nov 10 11:55:21 foo pvestatd[3378]: proxmox-backup-client failed: Error: error trying to connect: error connecting to https://thebackuphost:8007/ - tcp connect error: No route to host (os error 113)

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 11:58:19 +01:00
f3fde36beb client: error context when building HttpClient
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 11:58:19 +01:00
0c83e8891e ui: fix task description 2020-11-10 11:53:39 +01:00
133de2dd1f ui: add/fix help buttons
added a few more help buttons were appropriate:

* GC and Prune schedule windows
* Create Directory window
* API Tokens, link directly to token section
* verify jobs window

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:51:03 +01:00
c8219747f0 ui: add all online help refs found in docs
recommit the onlinehelp after the scanrefs script has been adapted and
the docs are up to date

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:56 +01:00
0247f794e9 docs: add network management reference
needed in order for the help button in the network edit window to work.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:17 +01:00
710f787c41 docs: add maintenance chapter prefix to verification ref
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:12 +01:00
d8916a326c scanrefs: only scan docs, not JS files
This is a temporary hack until we find a sensible way to scan the
proxmox-widget-toolkit JS files as well.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:09 +01:00
924d6d4072 prune sim: show count for rule
and rename 'all zero' to 'keep-all' to make it consistent with the prune dialog
in PBS.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 11:47:37 +01:00
984ac33d5c ui: subscription: usage chart: render date as ISO 8601
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:22 +01:00
0a4dfd63c9 ui: usage graph: show axis and set maximum
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:05 +01:00
a6e746f652 ui: datastore list summary: add more padding between elements
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:05 +01:00
30f73fa2e0 fix bug #3060: continue sync if we cannot aquire the group lock 2020-11-10 11:29:36 +01:00
9f0ee346e9 ui: Datastores Summary: change layout and chart
changes the layout to look i little bit more like the statistics panel
we have for ceph in pve, while changing to the UsageChart and adding
some more datastore infos (from last garbage collect)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
48d6dede4a ui: refactor calculate_dedup_factor
so that we can reuse this

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
8432e4a655 ui: add panel/UsageChart
heavily inspired by pveRunningChart, without the dynamically adding
of data and specific for the usage of datastores

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
b35eb0a175 api2/status/datastore-usage: add gc-status and history start and delta
so that we can show more info and calculate the points in time for the
history

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
c3a1b34ed3 ui: subscription: add more button icons, small UX fix
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:42:45 +01:00
bb26843cd6 ui/docs: add get help onlineHelp
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:35:35 +01:00
ee0ab12dd0 ui: move disks/directory stuff to tab panel
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:15:44 +01:00
d5f7755467 docs: online help scanner: also include help tool links
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:15:08 +01:00
5c64e83b1e ui: datastore: set onlineHelp for chaging group owner
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:53:05 +01:00
0f6f99b4ec ui: prune: set onlineHelp
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:51:30 +01:00
f668862ae0 ui: prune: add clear-trigger to keep fields
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:51:20 +01:00
c960d2b501 bail if mount point already exists for directories
similar to what we do for zfs. By bailing before partitioning, the disk is
still considered unused after a failed attempt.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:25:58 +01:00
f5d9f2534b mount zpools created via API under /mnt/datastore
as we do for other file systems

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:25:58 +01:00
9a3ddcea33 ui: utils: eslint format fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:24:35 +01:00
030464d3a9 docs: s/DataStore/Datastore/
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:24:13 +01:00
3f30b32c2e ui: prune: show count for rule
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:24:13 +01:00
5eafe6aabc ui: prune: show which rule keeps backup
and adjust layout so the description fits.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:24:13 +01:00
2c9f274efa ui: add help tool to user and remote config 2020-11-10 09:23:22 +01:00
31112c79ac ui: add help tool to datastore panel 2020-11-10 09:15:12 +01:00
d89f91b538 ui: acl editor: disallow path editing for datastore permission views
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 08:19:17 +01:00
a6310ec294 ui: fix widget height in dashboard 2020-11-10 08:12:35 +01:00
98d9323534 ui: add link to www.proxmox.com for subscription plans 2020-11-10 08:07:49 +01:00
09f1f28800 ui: ACL view: fix path filtering
and add some comments about actual behavior of those config
properties..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 07:33:20 +01:00
e1da9ca4bb ui: datastore dashboard: use gauge for usage, rework layout a bit
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 19:26:48 +01:00
625c7bfc0b ui: task summary: enable grid mouse track over
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 19:25:43 +01:00
d9503950e3 ui: tasl summary: add pointer cursor if clickable
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 18:09:05 +01:00
376e927980 ui: datastore summary: increase usage graph height
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:55:59 +01:00
5204cbcf0f ui: datastore summary: add line chart icon to full-estimation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:48:53 +01:00
e373dcc564 ui: datastore/content: improve action button layout
Fix font-size to 14px to improve font-awesome rendering, add some
slight margin between the buttons so that they are not glued
together, add a slight text-shadow on mouse over.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:45:08 +01:00
137a6ebcad apt: allow changelog retrieval from enterprise repo
If a package is or will be installed from the enterprise repo, retrieve
the changelog from there as well (securely via HTTPS and authenticated
with the subcription key).

Extends the get_string method to take additional headers, in this case
used for 'Authorization'. Hyper does not have built-in basic auth
support AFAICT but it's simple enough to just build the header manually.

Take the opportunity and also set the User-Agent sensibly for GET
requests, just like for POST.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-09 17:28:58 +01:00
ed1329ecf7 ui: make Datastore clickable again
by showing the previously added pbsDataStores panel

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
2371c1e371 ui: add Panels necessary for Datastores Overview
a panel for a single datastore that gets updated from an external caller
shows the usage, estimated full date, history and task summary grid

a panel that dynamically generates the panel above for each datastore

and a tabpanel that includes the panel above, as well as a global
syncview, verifiyview and aclview

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
63c07d950c ui: TaskSummary: handle less defined parameters of tasks
this makes it a little easier to provide good data, without
hardcoding all types in the source object

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
a3cdb19e33 ui: TaskSummary: add subPanelModal and datastore parameters
in preparation for the per-datastore grid

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
4623cd6497 ui: TaskSummary: move state/types/titles out of the controller
it seems that under certain circumstances, extjs does not initialize
or remove the content from objects in controllers

move it to the view, were they always exist

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
ab81bb13ad ui: make Sync/VerifyView and Edit usable without datastore
we want to use this panel again for a 'global' overview, without
any datastore preselected, so we have to handle that, and
adding a datastore selector in the editwindow

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
616650a198 ui: Utils: add parse_datastore_worker_id
to parse the datastore out of a worker_id
for this we need some regexes that are the same as in the backend

for now we only parse out the datastore, but we can extend this
in the future to parse relevant info (e.g. remote for syncs,
id/type for backups)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
78763d21b1 ui: refactor render_size_usage to Utils
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
f2d6324958 ui: refactor render_estimate
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
6e880f19cc api2/node/tasks: add check_job_store and use it
to easily check the store of a worker_id
this fixes the issue that one could not filter by type 'syncjob' and
datastore simultaneously

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
64623f329e ui: recommit onlinehelp
now that the last commit fixed the title generation

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:36:00 +01:00
407f3fb994 scanrefs: remove term prefix from title
It can happen, that a title is defined as term in the following way:
:term:`My title`

This patch checks for it and strips the leading part and the last `.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-09 16:35:29 +01:00
0eb0c4bd63 proxy: fix log message for auth log rotation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:34:03 +01:00
82422c115a ui: admin/summary: add versions button/window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:33:22 +01:00
ed2beb334d api: node/apt: add versions call
very basic, based on API/concepts of PVE one.

Still missing, addint an extra_info string option to APTUpdateInfo
and pass along running kernel/PBS version there.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:31:56 +01:00
f3b4820d06 www: show more ACLs in datastore panel
since just the ACLs defined on the exact datastore path don't give
anywhere near a complete picture of who has access to it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-09 15:19:15 +01:00
8f7cd96df4 installation: minor wording fix
very minor but worthwhile edits

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-09 15:18:44 +01:00
4accbc5853 backup-client: encryption: discuss paperkey command
adds a paragraph to the encryption section about
encoding the master key into a qr code for printing

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-09 15:18:44 +01:00
2791318ff1 fix bug #3121: forbid removing used reemotes 2020-11-09 12:48:29 +01:00
47208b4147 pxar: log when skipping mount points
Clippy complains about the number of paramters we have for
create_archive and it really does need to be made somewhat
less awkward and more usable. For now we just log to stderr
as we previously did. Added todo-comments for this.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-09 12:43:16 +01:00
b783591fb5 ui: datastore content: ensure action column is wide enough
with the "change owner" action added we now need more than the
default of 100 px, so increase to 120 px for now.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:31:14 +01:00
9dd6175808 ui: token selector: use same layout as auth id selector
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:24:54 +01:00
5e8b97178e ui: auth/token selector: tell ExtJS we injected data into the store
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:21:02 +01:00
38260cddf5 tools apt: include package name in filter data
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 08:55:08 +01:00
80b0423d54 bump version to 0.9.7-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 07:37:05 +01:00
b690bb69eb prune sim: align documentation style with sphinx/alabaster ones
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 14:02:27 +01:00
8a40e22691 docs: scroll navigation to current active section
Add a custom JavaScript file to all HTML rendered docs output.

For now it only hosts a small code snipped which gets the current
active section link and bring it into view.
Needs to be triggered after DOM is initially loaded (which is still
before *all* resources like images, iframes, ... are necessarily
loaded), else the query cannot work.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 13:29:09 +01:00
f5c6a2c956 prune sim: slight layout adaptions
add some margin to the calendar table, to not make it seem glued to
the left and top, this follow what ExtJS does in general.

Further, adapt layout flex so that docs has 2/5 and calendar has 3/5
of space on small screens (e.g., 720p), makes it look much better
there.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 13:24:27 +01:00
6d5803399b ui: add some onlineHelp reference uses for pruning
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 16:03:07 +01:00
3896f80cb3 docs: expand prune section, mention simulator, add onlineHelp refs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 15:51:09 +01:00
60d2a6157a prune sim: make prune options panel scrollable
Else it's cutoff on 720p resolution

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:33:15 +01:00
b83b12cf80 prune sim: add daily 00:00 as predefined schedule in selector
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
86847f487b prune sim: allow simulating up to 5 years
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
1b03910dea prune sim: spell out PBS, add some flex to layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
435a6c5e0a prune sim: fine tune calendar layout/style
Avoid black on white, to much contrast hurts the eye, use a dark grey
instead.

Highlight Sundays, and show month boundaries explicitly with strong
dashed border.

Factor out some manual set styles to classes and use them instead,
decoupling logic and styling a bit more.

Use span elements for plain text stuff, which should not be a block
(e.g., div) element.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
1f4befe136 prune sim: enable calendar by default
it has a really good non-intrusive layout now, so show it's glory by
default.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:36:58 +01:00
7f0f366675 prune sim: do not continue with reload if we caught an exception
as we then try to dereference hours which is null, for example.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:35:58 +01:00
362e69610c prune sim: set update button handler directly
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:35:26 +01:00
bad26df102 prune sim: factor out toggling color, and default to true
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:34:20 +01:00
790627b4bf prune sim: avoid unnecessary viewmodel formula
we set a reference on the checkbox, so we get this for free

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:33:08 +01:00
6de14a55ed prune sim: fix numberfield spinner scroll with firefox
copied over from widget toolkit

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:32:04 +01:00
8b24c6880a prune sim: eslint fixes, do not define console
really not required nowadays, and we do not use it anyway here..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:31:14 +01:00
5174956548 prune sim: improve documentation layout
Better line height, some margin on the edges, and max width to avoid
very long lines on wide displays.

Avoid to much contrast by using black on white, use a very dark grey
instead.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:28:50 +01:00
d669a739b2 ui: datastore: backup owner change: fix layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:48:08 +01:00
c7fa61619e ui: move backup group owner changer into window folder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:47:45 +01:00
009a04f8d0 ui: auth-id selector: validity, code-style and layout fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:46:08 +01:00
0953044cfb ui: use AuthidSelector for selecting new owner
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 19:06:35 +01:00
d923671a7b ui: use AuthidSelector for sync job owner
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 19:06:34 +01:00
db8a606707 proxmox-backup-proxy: remove unnecessary alias
the basedir is already /usr/share/javascript/proxmox-backup/
so adding a subdir of that as alias is not needed

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-06 18:08:18 +01:00
b614b29bea ui: datastore: add option view tab
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
65595e169f ui: add NotifyOptions edit window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
10db4717f1 docs: maintenance: document notifications
can surely be improved, just to have anything..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
1d9d2f0f7c ui: utils: add property format string helpers from PVE
slightly adapted, i.e., the delete_if_default helper always sets the
delete property to an array if not existing.

Also, filtering out undefined values when printing properties.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
ad53c1d6dd api: datastore: allow to set "verify-new" option over API
Until now, one could only set this by editing the configuration file
manually.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:24:14 +01:00
beeadb8a4b Remove reference to backup@pam 2020-11-06 16:32:35 +01:00
b997524912 Add screenshots
For:
- api tokens
- new user management interface
- updatae server administration

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:30:59 +01:00
cc4a9d250a maintenance: add verification and prune to section
Includes new screen shots of interface

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:29:59 +01:00
6227b9bab0 Update where to find certain items since GUI update
- Sync jobs in datastore
- "User management" is now section of Access Control

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:28:47 +01:00
f608e74c8b datastore: description of new datastore view
- Add screenshots from new datastore view
- Add description of comment field in create datastore window
- Add description of each tab in the datastore panel
- Update instructions to add datastore from GUI

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:28:16 +01:00
08379a21d1 backup-client: add section on change-owner command
Add section "Changing the Owner of a Backup Group"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:27:20 +01:00
8f1d972149 installation & gui: Formatting fixup
Fix some minor formatting errors in the docs

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:26:09 +01:00
b59c308219 Vec::new is Vec's default default
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-06 14:55:34 +01:00
0224c3c273 client: properly complete new-owner
with remote Authids, not local Userids.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-06 14:54:08 +01:00
f0609851fc www: add AuthidSelector
similar to TokenSelector, but with different fields / mapping of data.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 13:06:16 +01:00
dbd45a72c3 tasks: allow access to job tasks
if the user/token could have either configured/manually executed the
task, but it was either executed via the schedule (root@pam) or
another user/token.

without this change, semi-privileged users (that cannot read all tasks
globally, but are DatastoreAdmin) could schedule jobs, but not read
their logs once the schedule executes them. it also makes sense for
multiple such users to see eachothers manually executed jobs, as long as
the privilege level on the datastore (or remote/remote_store/local
store) itself is sufficient.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:56:06 +01:00
4c979d5450 verify: allow unprivileged access to admin API
which is the one used by the GUI.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:41:41 +01:00
35c80d696f verify: fix unprivileged verification jobs
since the store is not a path parameter, we need to do manual instead of
schema checks. also dropping Datastore.Backup here

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:39:06 +01:00
6823fdc7f9 ui: improve prune simulator layout 2020-11-06 12:12:59 +01:00
3323798b54 include prune simulator in build
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
2020-11-06 09:59:24 +01:00
67fd09791f create prune simulator
A stand-alone ExtJS app that allows experimenting with different backup
schedules and prune parameters.

The HTML for the documentation was taken from the PBS docs and adapted to the
context of the simulator.

For performance reasons, the week table does not use
subcomponents, but raw HTML.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-06 09:13:43 +01:00
1b37ebf6f6 ui: require owner for sync jobs 2020-11-06 08:48:07 +01:00
043406d662 ui: use pbsUserSelector for BackupGroupChangeOwner 2020-11-06 08:48:07 +01:00
61db0851d6 gui: Add button for changing backup group owner
Extension of fix #2847

Adds an action button to the datastore content view,
to change the owner of a backup.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 08:48:00 +01:00
ad54df3178 get rid of backup@pam 2020-11-06 08:39:30 +01:00
71103afd69 fixup: acutally commit all changes..
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 08:24:30 +01:00
6465d809cd ui: move datastore related files into own folder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 08:11:22 +01:00
ae8635c307 www: add remote store selector
(hopefully) improved upon NFS export selection in PVE

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
e0100d618e api: refactor remote client and add remote scan
to allow on-demand scanning of remote datastores accessible for the
configured remote user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
455e5f7110 types: extract DataStoreListItem
for reuse in remote scan API call

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
c26c9390ff config: make notify a property string
For example "gc=never,verify=always,sync=error".
2020-11-05 11:35:14 +01:00
9e45e03aef tools/daemon: fix reload with open connections
instead of await'ing the result of 'create_service' directly,
poll it together with the shutdown_future

if we reached that, fork_restart the new daemon, and await
the open future from 'create_service'

this way the old process still handles open connections until they finish,
while we already start a new process that handles new incoming connections

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 11:14:56 +01:00
e144810d73 pxar: more concise EOF handling
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:32:48 +01:00
3c2dd8ad05 pxar/create: handle ErrorKind::Interrupted for file reads
they are not an error and we should retry the read

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:27:36 +01:00
91e3b38da4 pxar/create: fix endless loop for shrinking files
when a file shrunk during backup, we endlessly looped, reading/copying 0 bytes
we already have code that handles shrunk files, but we forgot to
break from the read loop

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:27:30 +01:00
9d79cec4d5 bump version to 0.9.6-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:13:04 +01:00
4935681cf4 ui: sync jobs: add tooltip for remove vanished
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:07:07 +01:00
669fa672d9 ui: sync jobs: reorder fields
group local ones togeteher on the left side, and source + schedule
on the right side.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:05:48 +01:00
a797583535 ui: sync jobs: fix originalValue of owner and improve label
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:42 +01:00
54ed1b2a71 ui: sync jobs: only set default schedule when creating new jobs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:06 +01:00
8e12e86f0b ui: add shell panel under administration
some users prefer an inline console
we still have the pop-out console in 'Administration'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 18:16:49 +01:00
fe7bdc9d29 proxy: also rotate auth.log file
no need for triggering re-open here, we always re-open that file.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
546b6a23df proxy: logrotate: do not serialize sending async log-reopen commands
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
4fdf13f95f api: factor out auth logger and use for all API authentication failures
we have information here not available in the access log, especially
if the /api2/extjs formatter is used, which encapsulates errors in a
200 response.

So keep the auth log for now, but extend it use from create ticket
calls to all authentication failures for API calls, this ensures one
can also fail2ban tokens.

Do that logging in a central place, which makes it simple but means
that we do not have the user ID information available to include in
the log.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
385681c9ab worker task: fix passing upid to send command
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
be99df2767 log rotate: only add .zst to new file after second rotation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
30200b5c4a ui: fix task description for log rotate
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 14:20:44 +01:00
f47c1d3a2f proxy: use new datastore notify settings 2020-11-04 11:54:29 +01:00
6e545d0058 config: allow to configure who receives job notify emails 2020-11-04 11:54:29 +01:00
84006f98b2 ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 11:39:52 +01:00
42ca9e918a sync: improve log format 2020-11-04 09:10:56 +01:00
ea93bea7bf proxy: log if there are too many open connections 2020-11-04 08:49:35 +01:00
0081903f7c fix bug #2870: use updated tickets 2020-11-04 08:20:36 +01:00
c53797f627 ui: set default deduplication factor to 1.0 2020-11-04 07:12:55 +01:00
e1d367df47 proxy: use env PROXMOX_DEBUG to enable/disable debug output
We only print early connection errors when this env var is set.
2020-11-04 06:55:57 +01:00
71f413cd27 cleanup: use Arc to count open connections 2020-11-04 06:35:44 +01:00
48aa2b93b7 fix #3106: correctly queue incoming connections 2020-11-04 06:24:42 +01:00
641862ddad bump version to 0.9.5-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:41:26 +01:00
2f08ee1fe3 report: add more commands/files to check
add all of our configuration files in /etc/proxmox-backup/ further,
call some ZFS tool to get that status.

Also, use the subscription command form manager, as we often require
more info than the status. Also, adapt formatting a bit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:33:16 +01:00
93f077c5cf report: avoid lazy_static for command/files/.. definitions
those are not in a hot code path, and it is not really much work to
build them on the go..

It may not matther much, but it is unnecessary. Rust will probably
inline most of it anyway..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:27:16 +01:00
941342f70e manager: report: call method directly, avoid HTTPS request
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:23:43 +01:00
9a556c8a30 manager: add report cli command
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
46dce62be6 report: add webui button for system report
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
b0ef9631e6 report: add api endpoint and function to generate report
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
fb0d9833af ui: task filter: add button icons
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 14:49:04 +01:00
bfe4b7d782 ui: task filter: reorder to avoid wasting vertical space
Includes some eslint fixes and label changes as well, was to much
work to split that out in its own commit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 14:48:20 +01:00
185dab7678 ui: add panel/Tasks and use it for the node tasks
this is a panel that is heavily inspired from widget-toolkits
node/Tasks panel, but is adapted to use the extended api calls of
pbs (e.g. since/until filter)

has 'filter' panel (like pmgs log tracker gui), but it is collapsible

if we extend the api calls of the other projects, we can merge this
again into the widget-toolkit one and use that

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
c1fa057cce api2/node/tasks: add optional until filter
so that users select specific time ranges with 'since' and 'until'
(e.g. a single day)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
f66565203a api2/status: remove list_task api call
we do not need it anymore, we can do everything with nodes/NODE/tasks
instead

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
a2a7dd1535 api2/node/tasks: add optional since/typefilter/statusfilter
and change all users of the /status/tasks api call to this

with this change we can now delete the /status/tasks api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
e7dd169fdf api2/node/tasks: change limit behaviour when it is 0
instead of returning 0 elements (which does not really make sense anyway),
change it so that there is no limit anymore (besides usize::MAX)

this is technically a breaking change for the api, but i guess
no one is using limit=0 for anything sensible anyway

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
fa31f4c54c server/worker_task: add tasktype to return the api type of a taskstate
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
038ee59960 cleanup: use const_regex, use BACKUP_ID_REGEX for api too 2020-11-03 06:36:50 +01:00
e1c1533790 fix #3039: use the same ID regex for info and api
in the api we use PROXMOX_SAFE_ID_REGEX for backup ids, but here
(where we use it to list them) we use a local regex

since the first is a superset of the one used here, simply extend
the local one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 06:25:06 +01:00
9de7c71a81 docs: extend managing remotes
with information about required privileges and limitations

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
aa64e06540 sync: add access check tests
should cover all the current scenarios. remote server-side checks can't
be meaningfully unit-tested, but they are simple enough so should
hopefully never break.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
18077ac633 user.cfg/user info: add test constructors
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
a71a009313 proxy: drop now unused UPID import
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 21:08:38 +01:00
b6ba5acd29 proxmox-backup-proxy: use only jobstate for garbage_collection schedule
in case the garbage_collection errors out, we never set the in-memory
state, so if it failed, the last 'good' starttime was considered
for the schedule

this could lead to the job running every minute instead of the
correct schedule

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
4fdf5ddf5b api2/admin/datastore: start the garbage_collection task with our helper
instead of manually, this has the advantage that we now set
the jobstate correctly and can return with an error if it is
currently running (instead of failing in the task)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
c724f65805 server/gc_job: add 'to_stdout'
we will use this for the manual api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
79c9bf55b9 backup/{dynamic, fixed}_index: improve error message for small index files
index files that were smaller than their respective header size,
would fail with

"failed to fill whole buffer"

instead now check explicitely for the size and fail with
"index too small (size)"

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
788d82d9b7 gc: mark_used_chunks: reduce implementation noise
try do reduce some unecessary lines, make match arms more precise so
one can faster see what's actually happening.

Also, avoid
> return Err(format_err!(...))
stuff, just use bail!()

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 21:08:38 +01:00
2f0b92352d garbage collect: improve index error messages
so that in case of a broken index file, the user knows which it is

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 20:08:50 +01:00
b7f2be5137 log rotate task: make task archive limits be binary based
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
72aa1834dc log rotate task: adapt internal jobstate ID, set worker one to None for now
as we have only one logrotate task currently..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
fe4cc5b1a1 server: implement access log rotation with re-open via command socket
re-use the future we already have for task log rotation to trigger
it.

Move the FileLogger in ApiConfig into an Arc, so that we can actually
update it and REST using the new one.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
04b053d87e server: write main daemon PID to run directory
so that we can easily get the main PID of the last recently launched
daemon. Will be used to get the control socket of that one for access
lgo rotate in a future patch

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:50:24 +01:00
b469011fd1 command socket: make create_control_socket private
this is internal for now, use the comanndo socket struct
implementation, and ideally not a new one but the existing ones
created in the proxy and api daemons.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:50:24 +01:00
a68768cf31 server: use generalized commando socket for worker tasks commands
Allows to extend the use of that socket in the future, e.g., for log
rotate re-open signaling.

To reflect this we use a more general name, and change the commandos
to a more clear namespace.

Both are actually somewhat a breaking change, but the single real
world issue it should be able to cause is, that one won't be able to
stop task from older daemons, which still use the older abstract
socket name format.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:48:04 +01:00
f3df613cb7 server: add CommandoSocket where multiple users can register commands
This is a preparatory step to replace the task control socket with it
and provide a "reopen log file" command for the rest server.

Kept it simple by disallowing to register new commands after the
socket gets spawned, this avoids the need for locking.

If we really need that we can always wrap it in a Arc<RWLock<..>> or
something like that, or even nicer, register at compile time.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
056ee78567 config: network: use error message when parsing netmask failed
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
3cd529ea51 tools: file logger: avoid some possible unwraps in log method
writing to a file can explode quite easily.
time formatting to rfc3339 should be more robust, but it has a few
conditions where it could fail, so catch that too (and only really
do it if required).

The writes to stdout are left as is, it normally is redirected to
journal which is in memory, and thus breaks later than most stuff,
and at that point we probably do not care anymore anyway.

It could make sense to actually return a result here..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
3aade17125 tools: log rotate: compressing rotated files
We renamed the last one always to a file without compression
extension, even if it was .zst previously. So always add the correct
ending to the new last one, if compress was true.

Further, we cannot detect if there'd be a compression required if we
rotated (renamed) it already to the file with .zst included.

So check on rotation itself if it would be a "no .zst" -> ",zst"
transition, and call compress there.

it really should be OK now *knocking wood*

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 18:35:13 +01:00
1dc2fe20dd tools: log rotate: fix file ending for compressed files
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 18:35:13 +01:00
645a47ff6e config: support netmask when parsing interfaces file 2020-11-02 14:32:35 +01:00
b1456a8ea7 ui: fix verificationjob task description 2020-11-02 10:15:52 +01:00
a9fcbec9dc file logger: allow reopening file
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 10:03:10 +01:00
346a488e35 pull out /run and /var/log directory constants to buildcfg
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 10:03:10 +01:00
3066f56481 notify: add link to server GUI 2020-11-02 09:12:14 +01:00
07ca4e3609 gc: remove extra empty lines in email notification template 2020-11-02 09:12:14 +01:00
dcd75edb72 ui: fix dashboard subscription
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 08:08:44 +01:00
59af9ca98e sync: allow sync for non-superusers
by requiring
- Datastore.Backup permission for target datastore
- Remote.Read permission for source remote/datastore
- Datastore.Prune if vanished snapshots should be removed
- Datastore.Modify if another user should own the freshly synced
snapshots

reading a sync job entry only requires knowing about both the source
remote and the target datastore.

note that this does not affect the Authid used to authenticate with the
remote, which of course also needs permissions to access the source
datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 07:10:12 +01:00
f1694b062d fix #2864: add owner option to sync
instead of hard-coding 'backup@pam'. this allows a bit more flexibility
(e.g., syncing to a datastore that can directly be used as restore
source) without overly complicating things.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 07:08:05 +01:00
fa7aceeb15 manager: subscription commands s/delete/remove/
no idea why I added it as "delete", for all other such operations we
use the "remove" sub-command...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-01 13:19:30 +01:00
0e16f57e37 apt: sort packages for update notifcation mail
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:58:52 +01:00
bc00289bce add daily update and maintenance task
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:51:26 +01:00
86d602457a api: apt: implement support to send notification email on new updates
again, base idea copied off PVE, but, we safe the information about
which pending version we send a mail out already in a separate
object, to keep the api return type APTUpdateInfo clean.

This also makes a few things a bit easier, as we can update the
package status without saving/restoring the notify information.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:51:26 +01:00
33508b1237 api: implement apt pkg cache
based on the idea of PVE

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:42:49 +01:00
b282557563 api: apt: factor out and improve calling apt update
apt changes some of its state/cache also if it errors out, most of
the time, so we actually want to print both, stderr and stdout.

Further, only warn if its exit code is non-zero, for the same
rationale, it may bring updates available even if it errors (e.g.,
because a future pbs-enterprise repo is additionally configured but
not accessible).

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:59 +01:00
e6513bd5de api/tools: split out apt helpers from api to own module
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
5911f74096 api types: derive Debug for APTUpdateInfo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
0bb74e54b1 worker task: drop debug prints
they are not useful anymore, rather noisy

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
f254a27071 tools: do not unnecessarily prefix module path
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
d0abba3397 trivial: fix typo in comment
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
54adea366c ui: ACL view: do not save grid state
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 11:36:48 +01:00
ba2e4b15da ui: improve ACL view layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 11:33:31 +01:00
0ccdd1b6a4 ui: bump sync/verify grid stateid
so that people get the improved view by default

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:58:57 +01:00
fb66c85363 ui: improve sync job view layout
Avoid overuse of flex, that is as bad as having all to fixed widths.

In spirit similar to the previous commit for the verify panel, see
that for some rationale.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:56:51 +01:00
aae4c30ceb ui: improve verify job view layout, show job-id
Avoid overuse of flex, that is as bad as having all to fixed widths.

* Set date-time fields to 150 px as they are fixed width text.
* Duration is maximal 3 units, so it can be made fixed too.
* Schedule is flex with lower and upper limits, this is useful as
  it's a field which can be both, quite short (daily) or long
  (mon..fri *-10..12-1..7 02:00/30:30)
* Status and comment is flex, this way we always get a filled grid

Move status after last verify date and duration field, increases
information density at the left of the grid - reducing need for eye
movement, also, it groups together the "information about last job"
nicer.

Show job-id by default even if they are auto generated when adding
over the gui, as it can help finding the respective job faster when
getting a mail with an error.

Reported-by: Dietmar Maurer <dietmar@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:56:51 +01:00
0656344ae4 ui: administration: set icons for tabs
orient on PVE, the ones for Updates, ServerStatus, should by
self-explanatory.

Services is in PVE named "System", but reusing that cogs icon makes
similar sense here too, and seems in line with search result of a
"service icons" query.

Syslog is the same as our general log icon, but as we also use this
normally for worker task logs and that is present here too, I
changed the worker task log icon to the alternative list, which
resembles a task view window - so IMO even better than before.

Sync that change also into the always present tasks button at the top
right.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 09:11:11 +01:00
1143f6ca93 cleanup: fix wording in GC status emails 2020-10-31 07:56:42 +01:00
90e94aa280 docs: client: avoid that repo gets detected as email address
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:08:08 +01:00
c0af05e143 docs: fixup bad RST table format
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:05:49 +01:00
4aef06f1b6 docs: add token example to client, and reformat a bit
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:01:22 +01:00
034cf70b72 docs: add API tokens to documentation
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:46:19 +01:00
8b600f9965 api: replace auth_id with auth-id
in parameters, and fix up the completion for the ACL update parameter.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:46:19 +01:00
e4e280183e privs: add some more comments explaining privileges
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:42:30 +01:00
2fc45a97a9 privs: remove PRIV_REMOVE_PRUNE
it's not used anywhere, and not needed either until the day we might
implement push syncs.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:42:26 +01:00
b7ce2e575f verify jobs: add permissions
equivalent to verifying a whole datastore, except for reading job
(entries), which is accessible to regular Datastore.Audit/Backup users
as well.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
09f6a24078 verify: introduce & use new Datastore.Verify privilege
for verifying a whole datastore. Datastore.Backup now allows verifying
only backups owned by the triggering user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
b728a69e7d privs: use Datastore.Modify|Backup to set backup notes
Datastore.Backup is limited to owned groups, as usual.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
1401f4be5f privs: allow reading notes with Datastore.Audit
they are returned when reading the manifest, which just requires
Datastore.Audit as well. Datastore.Read is for reading backup contents,
not metadata.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
fdb4416bae ui: permission path selector: cbind typeAhead to editable
ExtJS throws an exception if 'typeAhead' is true but 'editable' is
false.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 16:31:53 +01:00
abe1edfc95 update d/control
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 16:11:50 +01:00
e4a864bd21 impl From<Authid> for Userid
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 15:19:07 +01:00
7a7368ee08 bump proxmox dependency to 0.7.0 for totp udpates
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 15:19:07 +01:00
e707fd2b3b ui: Utils: add product specific task descriptions
and sort them alphabetically

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-30 14:05:17 +01:00
625a56b75e server/rest: accept also = as token separator
Like we do in Proxmox VE

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:34:26 +01:00
6d8a1ac9e4 server/rest: user constants for HTTP headers
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:33:36 +01:00
362739054e api tokens: add authorization method
and properly decode secret (which is a no-op with the current scheme).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 13:15:14 +01:00
2762481cc8 proxmox-backup-manager: add subscription commands
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:03:58 +01:00
652506e6b8 api: define subscription module and methods as public
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:03:58 +01:00
926d253126 api: define subscription key schema and use it
nicer to have the correct regex checked in parameter verification
already

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 12:57:14 +01:00
1cd951c93e proxy: fix warnings
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 12:49:43 +01:00
3b707fbb8f proxy: split out code to run garbage collection job 2020-10-30 11:01:45 +01:00
b15751bf55 check_schedule cleanup: use &str instead of String
This way we can avoid many clone() calls.
2020-10-30 09:49:50 +01:00
82c05b41fa proxy: extract commonly used logic for scheduling into new function
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-10-30 09:49:50 +01:00
b8d9079835 proxy: move prune logic into new file
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-10-30 09:49:50 +01:00
f8a682a873 ui: user menu: allow changing language while logged in
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 09:46:04 +01:00
b03a19b6e8 bump version to 0.9.4-2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 20:25:37 +01:00
603a6bd183 d/postinst: followup: grep and sed use different regex escaping ..
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 20:25:37 +01:00
83b039af35 d/postinst: make more resilient
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 19:58:41 +01:00
c9299e76fc bump version to 0.9.3-2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 17:20:04 +01:00
2f1a46f748 ui: move user, token and permissions into an access control tab panel
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 16:47:18 +01:00
2b38dfb456 d/control: update
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 16:18:40 +01:00
f487a622ce ui: datastore summary: handle missing snapshot of a types
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 15:52:53 +01:00
906ef6c5bd api2/access/user: fix return type schema
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:20:10 +01:00
ea1853a17b api2/access/user: drop Option, treat empty Vec as None
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:17:54 +01:00
221177ba41 fixup hardcoded paths
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:15:17 +01:00
184a37635b gui: add API token ACLs
and the needed API token selector.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
b2da7fbd1c acls: allow viewing/editing user's token ACLs
even for otherwise unprivileged users.

since effective privileges of an API token are always intersected with
those of their owning user, this does not allow an unprivileged user to
elevate their privileges in practice, but avoids the need to involve a
privileged user to deploy API tokens.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
7fe76d3491 gui: add API token UI
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
e6b5bf69a3 gui: add permissions button to user view
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
4615325f9e manager: add user permissions command
useful for debugging complex ACL setups.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
2156dec5a9 manager: add token commands
to generate, list and delete tokens. adding them to ACLs already works
out of the box.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
16245d540c tasks: allow unpriv users to read their tokens' tasks
and tighten down the return schema while we're at it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
bff8557298 owner checks: handle backups owned by API tokens
a user should be allowed to read/list/overwrite backups owned by their
own tokens, but a token should not be able to read/list/overwrite
backups owned by their owning user.

when changing ownership of a backup group, a user should be able to
transfer ownership to/from their own tokens if the backup is owned by
them (or one of their tokens).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
34aa8e13b6 client/remote: allow using ApiToken + secret
in place of user + password.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
babab85b56 api: add permissions endpoint
and adapt privilege calculation to return propagate flag

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
6746bbb1a2 api: allow listing users + tokens
since it's not possible to extend existing structs, UserWithTokens
duplicates most of user::User.. to avoid duplicating user::ApiToken as
well, this returns full API token IDs, not just the token name part.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
942078c40b api: add API token endpoints
beneath the user endpoint.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
c30816c1f8 REST: extract and handle API tokens
and refactor handling of headers in the REST server while we're at it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
e6dc35acb8 replace Userid with Authid
in most generic places. this is accompanied by a change in
RpcEnvironment to purposefully break existing call sites.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
e10c5c74f6 bump proxmox dependency to 0.6.0 for api tokens and tfa
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:11:39 +01:00
f8adf8f83f config: add token.shadow file
containing pairs of token ids and hashed secret values.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
e0538349e2 api: add Authid as wrapper around Userid
with an optional Tokenname, appended with '!' as delimiter in the string
representation like for PVE.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
0903403ce7 bump version to 0.9.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:58:21 +01:00
b6563f48ad GC: improve task logs
Make it more clear that removed files are chunks (not indexes or
something like that, user cannot know that we do not touch them here)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:47:39 +01:00
932390bd46 GC: fix logging leftover bad chunks
fixes commit b4fb262335, which copied
over the "Removed bad files:" block, but only adapted the log text,
not the actual variable.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:40:29 +01:00
6b7688aa98 ui: datastore: fix sync/verify job removal prompt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:34:31 +01:00
ab0cf7e6a1 ui: drop id field from verify/sync add window
the config is shared between multiple datastores with the ID as, well
the unique ID, but we only show those of a single datastore.

So if a user adds a new one with a fixed ID "12345" but a job with
that ID exists already on another store, they get a error about
duplicate IDs, but cannot relate as that duplicate job is not visible
(filtered away)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:22:43 +01:00
264779e704 server/worker_task: simplify task log writing
instead of prerotating 1000 tasks
(which resulted in 2 writes each time an active worker was finished)
simply append finished tasks to the archive (which will be rotated)

page cache should be good enough so that we can get the task logs fast

since existing installations might have an 'index' file, we
still have to read tasks from there, but only if it exists

this simplifies the TaskListInfoIterator a good amount

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 12:41:20 +01:00
7f3d91003c worker task: remove debug print, faster modulo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 12:35:33 +01:00
14e0862509 api: datstore status: introduce proper structs and restore compatibility
by moving the properties of the storage status out again to the top
level object

also introduce proper structs for the types used, to get type-safety
and better documentation for the api calls

this changes the backup counts from an array of [groups,snapshots] to
an object/struct with { groups, snapshots } and include 'other' types
(though we do not have any at this moment)

this way it is better documented

this also adapts the ui code to cope with the api changes

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 12:31:27 +01:00
9e733dae48 send sync job status emails 2020-10-29 12:22:50 +01:00
bfea476be2 schedule_datastore_sync_jobs: remove unneccessary clone() 2020-10-29 12:22:41 +01:00
385cf2bd9d send_job_status_mail: corectly escape html characters 2020-10-29 11:22:08 +01:00
d6373f3525 garbage_collection: log deduplication factor 2020-10-29 11:13:01 +01:00
01f37e01c3 ui: datastore: use pointer cursor for edit notes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 10:45:37 +01:00
b4fb262335 garbage_collection: log bad chunks (still_bad value) 2020-10-29 10:24:31 +01:00
5499bd3dee fix #2998: encode mtime as i64 instead of u64
saves files mtime as i64 instead of u64 which enables backup of
files with negative mtime

the catalog_decode_i64 is compatible to encoded u64 values (if < 2^63)
but not reverse, so all "old" catalogs can be read with the new
decoder, but catalogs that contain negative mtimes will decode wrongly
on older clients

also remove the arbitrary maximum value of 2^63 - 1 for
encode_u64 (we just use up to 10 bytes now) and correctly
decode them and update the comments accordingly

adds also test for i64 encode/decode and for compatibility between
u64 encode and i64 decode

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 08:51:10 +01:00
d771a608f5 verify: directly pass manifest to filter function
In order to avoid loading the manifest twice during verify.
2020-10-29 07:59:19 +01:00
227a39b34b bump version to 0.9.2-2
re-use the changelog as this was not released publicly and it's just
a small fix

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-28 23:05:58 +01:00
f9beae9cc9 client: adapt to change datastroe status return schema
fixes commit 16f9f244cf which extended
the return schema of the status API but did not adapted the client
status command to that.

Simply define our own tiny return schema and use that.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-28 22:59:40 +01:00
198 changed files with 11511 additions and 2966 deletions

View File

@ -1,7 +1,16 @@
[package]
name = "proxmox-backup"
version = "0.9.2"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
version = "1.0.5"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
"Christian Ebner <c.ebner@proxmox.com>",
"Fabian Grünbichler <f.gruenbichler@proxmox.com>",
"Stefan Reiter <s.reiter@proxmox.com>",
"Thomas Lamprecht <t.lamprecht@proxmox.com>",
"Wolfgang Bumiller <w.bumiller@proxmox.com>",
"Proxmox Support Team <support@proxmox.com>",
]
edition = "2018"
license = "AGPL-3"
description = "Proxmox Backup"
@ -37,8 +46,9 @@ pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
pin-utils = "0.1.0"
pin-project = "0.4"
pathpatterns = "0.1.2"
proxmox = { version = "0.5.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox = { version = "0.7.2", features = [ "sortable-macro", "api-macro", "websocket" ] }
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox-fuse = "0.1.0"

View File

@ -19,7 +19,8 @@ USR_SBIN := \
SERVICE_BIN := \
proxmox-backup-api \
proxmox-backup-banner \
proxmox-backup-proxy
proxmox-backup-proxy \
proxmox-daily-update \
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release

233
debian/changelog vendored
View File

@ -1,4 +1,233 @@
rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
rust-proxmox-backup (1.0.5-1) unstable; urgency=medium
* client: restore: print meta information exclusively to standard error
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 15:29:58 +0100
rust-proxmox-backup (1.0.4-1) unstable; urgency=medium
* fingerprint: add bytes() accessor
* ui: fix broken gettext use
* cli: move more commands into "snapshot" sub-command
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 06:37:41 +0100
rust-proxmox-backup (1.0.3-1) unstable; urgency=medium
* client: inform user when automatically using the default encryption key
* ui: UserView: render name as 'Firstname Lastname'
* proxmox-backup-manager: add versions command
* pxar: fix anchored exclusion at archive root
* pxar: include .pxarexclude files in the archive
* client: expose all-file-systems option
* api: make expensive parts of datastore status opt-in
* api: include datastore ID in invalid owner errors
* garbage collection: treat .bad files like regular chunks to ensure they
are removed if not referenced anymore
* client: fix issues with encoded UPID strings
* encryption: add fingerprint to key config
* client: add 'key show' command
* fix #3139: add key fingerprint to backup snapshot manifest and check it
when loading with a key
* ui: add snapshot/file fingerprint tooltip
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Nov 2020 08:55:47 +0100
rust-proxmox-backup (1.0.1-1) unstable; urgency=medium
* ui: datastore summary: drop 'removed bytes' display
* ui: datastore add: set default schedule
* prune sim: make backup schedule a form, bind update button to its validity
* daemon: add workaround for race in reloading and systemd 'ready' notification
-- Proxmox Support Team <support@proxmox.com> Wed, 11 Nov 2020 10:18:12 +0100
rust-proxmox-backup (1.0.0-1) unstable; urgency=medium
* fix #3121: forbid removing used remotes
* docs: backup-client: encryption: discuss paperkey command
* pxar: log when skipping mount points
* ui: show also parent ACLs which affect a datastore in its panel
* api: node/apt: add versions call
* ui: make Datastore a selectable panel again. Show a datastore summary
list, and provide unfiltered access to all sync and verify jobs.
* ui: add help tool-button to various paneös
* ui: set various onlineHelp buttons
* zfs: mount new zpools created via API under /mnt/datastore/<id>
* ui: move disks/directory views to its own tab panel
* fix #3060: continue sync if we cannot aquire the group lock
* HttpsConnector: include destination on connect errors
* fix #3060:: improve get_owner error handling
* remote.cfg: rename userid to 'auth-id'
* verify: log/warn on invalid owner
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Nov 2020 14:36:13 +0100
rust-proxmox-backup (0.9.7-1) unstable; urgency=medium
* ui: add remote store selector
* tools/daemon: fix reload with open connections
* pxar/create: fix endless loop for shrinking files
* pxar/create: handle ErrorKind::Interrupted for file reads
* ui: add action-button for changing backup group owner
* docs: add interactive prune simulator
* verify: fix unprivileged verification jobs
* tasks: allow access to job tasks
* drop internal 'backup@pam' owner, sync jobs need to set a explicit owner
* api: datastore: allow to set "verify-new" option over API
* ui: datastore: add Options tab, allowing one to change per-datastore
notification and verify-new options
* docs: scroll navigation bar to current active section
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Nov 2020 07:36:58 +0100
rust-proxmox-backup (0.9.6-1) unstable; urgency=medium
* fix #3106: improve queueing new incoming connections
* fix #2870: sync: ensure a updated ticket is used, if available
* proxy: log if there are too many open connections
* ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
* datastore config: allow to configure who receives job notify emails
* ui: fix task description for log rotate
* proxy: also rotate auth.log file
* ui: add shell panel under administration
* ui: sync jobs: only set default schedule when creating new jobs and some
other small fixes
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Nov 2020 19:12:57 +0100
rust-proxmox-backup (0.9.5-1) unstable; urgency=medium
* ui: user menu: allow one to change the language while staying logged in
* proxmox-backup-manager: add subscription commands
* server/rest: also accept = as token separator
* privs: allow reading snapshot notes with Datastore.Audit
* privs: enforce Datastore.Modify|Backup to set backup notes
* verify: introduce and use new Datastore.Verify privilege
* docs: add API tokens to documentation
* ui: various smaller layout and icon improvements
* api: implement apt pkg cache for caching pending updates
* api: apt: implement support to send notification email on new updates
* add daily update and maintenance task
* fix #2864: add owner option to sync
* sync: allow sync for non-superusers under special conditions
* config: support depreacated netmask when parsing interfaces file
* server: implement access log rotation with re-open via command socket
* garbage collect: improve index error messages
* fix #3039: use the same ID regex for info and api
* ui: administration: allow extensive filtering of the worker task
* report: add api endpoint and function to generate report
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Nov 2020 17:41:17 +0100
rust-proxmox-backup (0.9.4-2) unstable; urgency=medium
* make postinst (update) script more resilient
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 20:09:30 +0100
rust-proxmox-backup (0.9.4-1) unstable; urgency=medium
* implement API-token
* client/remote: allow using API-token + secret
* ui/cli: implement API-token management interface and commands
* ui: add widget to view the effective permissions of a user or token
* ui: datastore summary: handle error when havin zero snapshot of any type
* ui: move user, token and permissions into an access control tab panel
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 17:19:13 +0100
rust-proxmox-backup (0.9.3-1) unstable; urgency=medium
* fix #2998: encode mtime as i64 instead of u64
* GC: log the number of leftover bad chunks we could not yet cleanup, as no
valid one replaced them. Also log deduplication factor.
* send sync job status emails
* api: datstore status: introduce proper structs and restore compatibility
to 0.9.1
* ui: drop id field from verify/sync add window, they are now seen as internal
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 14:58:13 +0100
rust-proxmox-backup (0.9.2-2) unstable; urgency=medium
* rework server web-interface, move more datastore related panels as tabs
inside the datastore view
@ -76,7 +305,7 @@ rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
* ui: datastore: show snapshot manifest comment and allow to edit them
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 21:27:02 +0100
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 23:05:41 +0100
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium

14
debian/control vendored
View File

@ -33,11 +33,12 @@ Build-Depends: debhelper (>= 11),
librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-0.4+default-dev,
librust-pin-utils-0.1+default-dev,
librust-proxmox-0.5+api-macro-dev,
librust-proxmox-0.5+default-dev,
librust-proxmox-0.5+sortable-macro-dev,
librust-proxmox-0.5+websocket-dev,
librust-proxmox-0.7+api-macro-dev (>= 0.7.2-~~),
librust-proxmox-0.7+default-dev (>= 0.7.2-~~),
librust-proxmox-0.7+sortable-macro-dev (>= 0.7.2-~~),
librust-proxmox-0.7+websocket-dev (>= 0.7.2-~~),
librust-proxmox-fuse-0.1+default-dev,
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
@ -78,7 +79,7 @@ Build-Depends: debhelper (>= 11),
uuid-dev,
debhelper (>= 12~),
bash-completion,
pve-eslint,
pve-eslint (>= 7.12.1-1),
python3-docutils,
python3-pygments,
rsync,
@ -104,7 +105,9 @@ Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libzstd1 (>= 1.3.8),
lvm2,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
@ -113,6 +116,7 @@ Depends: fonts-font-awesome,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.

3
debian/control.in vendored
View File

@ -4,7 +4,9 @@ Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libzstd1 (>= 1.3.8),
lvm2,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
@ -13,6 +15,7 @@ Depends: fonts-font-awesome,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.

View File

@ -14,7 +14,7 @@ section = "admin"
build_depends = [
"debhelper (>= 12~)",
"bash-completion",
"pve-eslint",
"pve-eslint (>= 7.12.1-1)",
"python3-docutils",
"python3-pygments",
"rsync",

View File

@ -1,2 +1,2 @@
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbs-enterprise.list
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target

27
debian/postinst vendored
View File

@ -15,12 +15,33 @@ case "$1" in
fi
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
flock -w 30 /etc/proxmox-backup/.datastore.lck sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg
# FIXME: Remove with 1.1
if test -n "$2"; then
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
echo "NOTE: drop all verify schedules from datastore config."
echo "You can now add more flexible verify jobs"
flock -w 30 /etc/proxmox-backup/.datastore.lck \
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
fi
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
if [ -e /etc/proxmox-backup/remote.cfg ]; then
echo "NOTE: Switching over remote.cfg to new field names.."
flock -w 30 /etc/proxmox-backup/.remote.lck \
sed -i \
-e 's/^\s\+userid /\tauth-id /g' \
/etc/proxmox-backup/remote.cfg || true
fi
fi
fi
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi
;;

3
debian/prerm vendored
View File

@ -6,5 +6,6 @@ set -e
# modeled after dh_systemd_start output
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' \
'proxmox-backup.service' 'proxmox-backup-daily-update.timer' >/dev/null || true
fi

View File

@ -1 +1,2 @@
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs

View File

@ -1,10 +1,13 @@
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/proxmox-backup-banner.service /lib/systemd/system/
etc/pbstest-beta.list /etc/apt/sources.list.d/
etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/pbs-enterprise.list /etc/apt/sources.list.d/
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
usr/sbin/proxmox-backup-manager
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/css/ext6-pbs.css

View File

@ -0,0 +1 @@
rm_conffile /etc/apt/sources.list.d/pbstest-beta.list 1.0.0~ proxmox-backup-server

1
debian/rules vendored
View File

@ -38,6 +38,7 @@ override_dh_auto_install:
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
# note: we start/try-reload-restart services manually in postinst
dh_installsystemd --no-start --no-restart-after-upgrade

View File

@ -14,6 +14,11 @@ MANUAL_PAGES := \
proxmox-backup-client.1 \
proxmox-backup-manager.1
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \
prune-simulator/documentation.html \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
# Sphinx documentation setup
SPHINXOPTS =
@ -74,10 +79,11 @@ onlinehelpinfo:
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
.PHONY: html
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
cp custom.css $(BUILDDIR)/html/_static/
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -dm 0755 $(BUILDDIR)/html/prune-simulator
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@ -44,7 +44,7 @@ def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize
js_files.append(os.path.join(root, filename))
for js_file in js_files:
fd = open(js_file).read()
allmatch = re.findall("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd, re.M)
allmatch = re.findall("(?:onlineHelp:|get_help_tool\s*\()\s*[\'\"](.*?)[\'\"]", fd, re.M)
for match in allmatch:
anchor = match
anchor = re.sub('_', '-', anchor) # normalize labels
@ -73,7 +73,9 @@ class ReflabelMapper(Builder):
'link': '/docs/index.html',
'title': 'Proxmox Backup Server Documentation Index',
}
self.env.used_anchors = scan_extjs_files()
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
# as well
#self.env.used_anchors = scan_extjs_files()
if not os.path.isdir(self.outdir):
os.mkdir(self.outdir)
@ -93,6 +95,9 @@ class ReflabelMapper(Builder):
logger.info('traversing section {}'.format(title.astext()))
ref_name = getattr(title, 'rawsource', title.astext())
if (ref_name[:7] == ':term:`'):
ref_name = ref_name[7:-1]
self.env.online_help[labelid] = {'link': '', 'title': ''}
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
self.env.online_help[labelid]['title'] = ref_name
@ -112,15 +117,18 @@ class ReflabelMapper(Builder):
def validate_anchors(self):
#pprint(self.env.online_help)
to_remove = []
for anchor in self.env.used_anchors:
if anchor not in self.env.online_help:
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
for anchor in self.env.online_help:
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
logger.info("[*] anchor {} not used! deleting...".format(anchor))
to_remove.append(anchor)
for anchor in to_remove:
self.env.online_help.pop(anchor, None)
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
# as well
#for anchor in self.env.used_anchors:
# if anchor not in self.env.online_help:
# logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
#for anchor in self.env.online_help:
# if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
# logger.info("[*] anchor {} not used! deleting...".format(anchor))
# to_remove.append(anchor)
#for anchor in to_remove:
# self.env.online_help.pop(anchor, None)
return
def finish(self):

View File

@ -12,31 +12,31 @@ on the backup server.
[[username@]server[:port]:]datastore
The default value for ``username`` is ``root@pam``. If no server is specified,
The default value for ``username`` is ``root@pam``. If no server is specified,
the default is the local host (``localhost``).
You can specify a port if your backup server is only reachable on a different
port (e.g. with NAT and port forwarding).
Note that if the server is an IPv6 address, you have to write it with
square brackets (e.g. [fe80::01]).
Note that if the server is an IPv6 address, you have to write it with square
brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command
line option, or by setting the ``PBS_REPOSITORY`` environment
variable.
You can pass the repository with the ``--repository`` command line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
Here some examples of valid repositories and the real values
================================ ============ ================== ===========
Example User Host:Port Datastore
================================ ============ ================== ===========
mydatastore ``root@pam`` localhost:8007 mydatastore
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ============ ================== ===========
================================ ================== ================== ===========
Example User Host:Port Datastore
================================ ================== ================== ===========
mydatastore ``root@pam`` localhost:8007 mydatastore
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
user\@pbs!token@host:store ``user@pbs!token`` myhostname:8007 mydatastore
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
Environment Variables
---------------------
@ -45,16 +45,16 @@ Environment Variables
The default backup repository.
``PBS_PASSWORD``
When set, this value is used for the password required for the
backup server.
When set, this value is used for the password required for the backup server.
You can also set this to a API token secret.
``PBS_ENCRYPTION_PASSWORD``
When set, this value is used to access the secret encryption key (if
protected by password).
``PBS_FINGERPRINT`` When set, this value is used to verify the server
certificate (only used if the system CA certificates cannot
validate the certificate).
certificate (only used if the system CA certificates cannot validate the
certificate).
Output Format
@ -365,9 +365,22 @@ To set up a master key:
backed up. It can happen, for example, that you back up an entire system, using
a key on that system. If the system then becomes inaccessible for any reason
and needs to be restored, this will not be possible as the encryption key will be
lost along with the broken system. In preparation for the worst case scenario,
you should consider keeping a paper copy of this key locked away in
a safe place.
lost along with the broken system.
It is recommended that you keep your master key safe, but easily accessible, in
order for quick disaster recovery. For this reason, the best place to store it
is in your password manager, where it is immediately recoverable. As a backup to
this, you should also save the key to a USB drive and store that in a secure
place. This way, it is detached from any system, but is still easy to recover
from, in case of emergency. Finally, in preparation for the worst case scenario,
you should also consider keeping a paper copy of your master key locked away in
a safe place. The ``paperkey`` subcommand can be used to create a QR encoded
version of your master key. The following command sends the output of the
``paperkey`` command to a text file, for easy printing.
.. code-block:: console
proxmox-backup-client key paperkey --output-format text > qrkey.txt
Restoring Data
@ -379,11 +392,11 @@ periodic recovery tests to ensure that you can access the data in
case of problems.
First, you need to find the snapshot which you want to restore. The snapshot
command provides a list of all the snapshots on the server:
list command provides a list of all the snapshots on the server:
.. code-block:: console
# proxmox-backup-client snapshots
# proxmox-backup-client snapshot list
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
│ snapshot │ size │ files │
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
@ -535,6 +548,29 @@ To remove the ticket, issue a logout:
# proxmox-backup-client logout
.. _changing-backup-owner:
Changing the Owner of a Backup Group
------------------------------------
By default, the owner of a backup group is the user which was used to originally
create that backup group (or in the case of sync jobs, ``root@pam``). This
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
can not be used to create backups in that same backup group. In case you want
to change the owner of a backup, you can do so with the below command, using a
user that has ``Datastore.Modify`` privileges on the datastore.
.. code-block:: console
# proxmox-backup-client change-owner vm/103 john@pbs
This can also be done from within the web interface, by navigating to the
`Content` section of the datastore that contains the backup group and
selecting the user icon under the `Actions` column. Common cases for this could
be to change the owner of a sync job from ``root@pam``, or to repurpose a
backup group.
.. _backup-pruning:
Pruning and Removing Backups
@ -545,7 +581,7 @@ command:
.. code-block:: console
# proxmox-backup-client forget <snapshot>
# proxmox-backup-client snapshot forget <snapshot>
.. caution:: This command removes all archives in this backup

View File

@ -171,6 +171,7 @@ html_theme_options = {
'extra_nav_links': {
'Proxmox Homepage': 'https://proxmox.com',
'PDF': 'proxmox-backup.pdf',
'Prune Simulator' : 'prune-simulator/index.html',
},
'sidebar_width': '320px',
@ -228,6 +229,10 @@ html_favicon = 'images/favicon.ico'
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_js_files = [
'custom.js',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.

7
docs/custom.js Normal file
View File

@ -0,0 +1,7 @@
window.addEventListener('DOMContentLoaded', (event) => {
let activeSection = document.querySelector("a.current");
if (activeSection) {
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView
activeSection.scrollIntoView({ block: 'center' });
}
});

View File

@ -27,7 +27,7 @@ How long will my Proxmox Backup Server version be supported?
+-----------------------+--------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+====================+===============+============+====================+
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
+-----------------------+--------------------+---------------+------------+--------------------+

View File

@ -4,7 +4,7 @@ Graphical User Interface
Proxmox Backup Server offers an integrated, web-based interface to manage the
server. This means that you can carry out all administration tasks through your
web browser, and that you don't have to worry about installing extra management
tools. The web interface also provides a built in console, so if you prefer the
tools. The web interface also provides a built-in console, so if you prefer the
command line or need some extra control, you have this option.
The web interface can be accessed via https://youripaddress:8007. The default
@ -28,7 +28,6 @@ Login
-----
.. image:: images/screenshots/pbs-gui-login-window.png
:width: 250
:align: right
:alt: PBS login window
@ -44,14 +43,13 @@ GUI Overview
------------
.. image:: images/screenshots/pbs-gui-dashboard.png
:width: 250
:align: right
:alt: PBS GUI Dashboard
The Proxmox Backup Server web interface consists of 3 main sections:
* **Header**: At the top. This shows version information, and contains buttons to view
documentation, monitor running tasks, and logout.
documentation, monitor running tasks, set the language and logout.
* **Sidebar**: On the left. This contains the configuration options for
the server.
* **Configuration Panel**: In the center. This contains the control interface for the
@ -79,18 +77,17 @@ Configuration
The Configuration section contains some system configuration options, such as
time and network configuration. It also contains the following subsections:
* **User Management**: Add users and manage accounts
* **Permissions**: Manage permissions for various users
* **Access Control**: Add and manage users, API tokens, and the permissions
associated with these items
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
* **Sync Jobs**: Manage and run sync jobs to remotes
* **Subscription**: Upload a subscription key and view subscription status
* **Subscription**: Upload a subscription key, view subscription status and
access a text-based system report.
Administration
^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
:width: 250
:align: right
:alt: Administration: Server Status overview
@ -105,7 +102,6 @@ tasks and information. These are:
* **Tasks**: Task history with multiple filter options
.. image:: images/screenshots/pbs-gui-disks.png
:width: 250
:align: right
:alt: Administration: Disks
@ -120,16 +116,21 @@ The administration menu item also contains a disk management subsection:
Datastore
^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore.png
:width: 250
.. image:: images/screenshots/pbs-gui-datastore-summary.png
:align: right
:alt: Datastore Configuration
The Datastore section provides an interface for creating and managing
datastores. It contains a subsection for each datastore on the system, in
which you can use the top panel to view:
The Datastore section contains interfaces for creating and managing
datastores. It contains a button to create a new datastore on the server, as
well as a subsection for each datastore on the system, in which you can use the
top panel to view:
* **Summary**: Access a range of datastore usage statistics
* **Content**: Information on the datastore's backup groups and their respective
contents
* **Statistics**: Usage statistics for the datastore
* **Permissions**: View and manage permissions for the datastore
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
collection <garbage-collection>` operations, and run garbage collection
manually
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
datastore

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -9,7 +9,7 @@ Debian_ from the provided package repository.
.. include:: package-repositories.rst
Server installation
Server Installation
-------------------
The backup server stores the actual backed up data and provides a web based GUI
@ -37,22 +37,21 @@ Download the ISO from |DOWNLOADS|.
It includes the following:
* The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
system
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
* Our Linux kernel with ZFS support
* Proxmox Linux kernel with ZFS support
* Complete tool-set to administer backups and all necessary resources
* Web based GUI management interface
* Web based management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
Install `Proxmox Backup`_ server on Debian
Install `Proxmox Backup`_ Server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages which can be installed on top of a
@ -84,11 +83,11 @@ support, and a set of common and useful packages.
when LVM_ or ZFS_ is used. The network configuration is completely up to you
as well.
.. note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. Note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
Install Proxmox Backup server on `Proxmox VE`_
Install Proxmox Backup Server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After configuring the
@ -104,14 +103,14 @@ After configuring the
server to store backups. Should the hypervisor server fail, you can
still access the backups.
.. note::
You can access the web interface of the Proxmox Backup Server with your web
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
.. Note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
Client installation
Client Installation
-------------------
Install `Proxmox Backup`_ client on Debian
Install `Proxmox Backup`_ Client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages to be installed on

View File

@ -127,8 +127,7 @@ language.
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
.. todo:: further explain the software stack
.. _get_help:
Getting Help
------------

View File

@ -1,13 +1,184 @@
Maintenance Tasks
=================
.. _maintenance_pruning:
Pruning
-------
Prune lets you specify which backup snapshots you want to keep. The
following retention options are available:
``keep-last <N>``
Keep the last ``<N>`` backup snapshots.
``keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is kept.
``keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is kept.
``keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is kept.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is kept.
``keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.
Unfinished and incomplete backups will be removed by the prune command unless
they are newer than the last successful backup. In this case, the last failed
backup is retained.
Prune Simulator
^^^^^^^^^^^^^^^
You can use the built-in `prune simulator <prune-simulator/index.html>`_
to explore the effect of different retetion options with various backup
schedules.
Manual Pruning
^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-content-prune-group.png
:target: _images/pbs-gui-datastore-content-prune-group.png
:align: right
:alt: Prune and garbage collection options
To access pruning functionality for a specific backup group, you can use the
prune command line option discussed in :ref:`backup-pruning`, or navigate to
the **Content** tab of the datastore and click the scissors icon in the
**Actions** column of the relevant backup group.
Prune Schedules
^^^^^^^^^^^^^^^
To prune on a datastore level, scheduling options can be found under the
**Prune & GC** tab of the datastore. Here you can set retention settings and
edit the interval at which pruning takes place.
.. image:: images/screenshots/pbs-gui-datastore-prunegc.png
:target: _images/pbs-gui-datastore-prunegc.png
:align: right
:alt: Prune and garbage collection options
Retention Settings Example
^^^^^^^^^^^^^^^^^^^^^^^^^^
The backup frequency and retention of old backups may depend on how often data
changes, and how important an older state may be, in a specific work load.
When backups act as a company's document archive, there may also be legal
requirements for how long backup snapshots must be kept.
For this example, we assume that you are doing daily backups, have a retention
period of 10 years, and the period between backups stored gradually grows.
- **keep-last:** ``3`` - even if only daily backups, an admin may want to create
an extra one just before or after a big upgrade. Setting keep-last ensures
this.
- **keep-hourly:** not set - for daily backups this is not relevant. You cover
extra manual backups already, with keep-last.
- **keep-daily:** ``13`` - together with keep-last, which covers at least one
day, this ensures that you have at least two weeks of backups.
- **keep-weekly:** ``8`` - ensures that you have at least two full months of
weekly backups.
- **keep-monthly:** ``11`` - together with the previous keep settings, this
ensures that you have at least a year of monthly backups.
- **keep-yearly:** ``9`` - this is for the long term archive. As you covered the
current year with the previous options, you would set this to nine for the
remaining ones, giving you a total of at least 10 years of coverage.
We recommend that you use a higher retention period than is minimally required
by your environment; you can always reduce it if you find it is unnecessarily
high, but you cannot recreate backup snapshots from the past.
.. _maintenance_gc:
Garbage Collection
------------------
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
Proxmox Backup Server using the ``garbage-collection`` subcommand of
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
collection on an entire datastore and the ``status`` subcommand to see
attributes relating to the :ref:`garbage collection <garbage-collection>`.
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
start garbage collection on an entire datastore and the ``status`` subcommand to
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
.. todo:: Add section on verification
This functionality can also be accessed in the GUI, by navigating to **Prune &
GC** from the top panel. From here, you can edit the schedule at which garbage
collection runs and manually start the operation.
.. _maintenance_verification:
Verification
------------
.. image:: images/screenshots/pbs-gui-datastore-verifyjob-add.png
:target: _images/pbs-gui-datastore-verifyjob-add.png
:align: right
:alt: Adding a verify job
Proxmox Backup offers various verification options to ensure that backup data is
intact. Verification is generally carried out through the creation of verify
jobs. These are scheduled tasks that run verification at a given interval (see
:ref:`calendar-events`). With these, you can set whether already verified
snapshots are ignored, as well as set a time period, after which verified jobs
are checked again. The interface for creating verify jobs can be found under the
**Verify Jobs** tab of the datastore.
.. Note:: It is recommended that you reverify all backups at least monthly, even
if a previous verification was successful. This is becuase physical drives
are susceptible to damage over time, which can cause an old, working backup
to become corrupted in a process known as `bit rot/data degradation
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
have a regularly recurring (hourly/daily) verification job, which checks new
and expired backups, then another weekly/monthly job that will reverify
everything. This way, there will be no surprises when it comes to restoring
data.
Aside from using verify jobs, you can also run verification manually on entire
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
tab of the datastore and either click *Verify All*, or select the *V.* icon from
the *Actions* column in the table.
.. _maintenance_notification:
Notifications
-------------
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
By default, notifications are send to the email address configured for the
`root@pam` user. You can set that user for each datastore.
You can also change the level of notification received per task type, the
following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task resulting in an error
* Never: do not send any notification at all

View File

@ -59,13 +59,13 @@ Sync Jobs
:alt: Add a Sync Job
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
The configuration information for sync jobs is stored at
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually on the GUI or provide it with a schedule (see
:ref:`calendar-events`) to run regularly.
a local datastore. You can manage sync jobs in the web interface, from the
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
the ``proxmox-backup-manager sync-job`` command. The configuration information
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
sync job, click the add button in the GUI, or use the ``create`` subcommand.
After creating a sync job, you can either start it manually from the GUI or
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
.. code-block:: console
@ -79,4 +79,17 @@ either start it manually on the GUI or provide it with a schedule (see
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
# proxmox-backup-manager sync-job remove pbs2-local
For setting up sync jobs, the configuring user needs the following permissions:
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or set to something other than the configuring user,
``Datastore.Modify`` is required as well.
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.

View File

@ -1,3 +1,5 @@
.. _sysadmin_network_configuration:
Network Management
==================

View File

@ -26,11 +26,8 @@ update``.
.. FIXME for 7.0: change security update suite to bullseye-security
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
During the Proxmox Backup beta phase, only one repository (pbstest) will be
available. Once released, an Enterprise repository for production use and a
no-subscription repository will be provided.
In addition, you need a package repository from Proxmox to get Proxmox Backup
updates.
SecureApt
~~~~~~~~~
@ -72,68 +69,63 @@ Here, the output should be:
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
.. comment
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This will be the default, stable, and recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
This will be the default, stable, and recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
will be available.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
To never miss important security fixes, the superuser (``root@pam`` user) is
notified via email about new packages as soon as they are available. The
change-log and details of each package can be viewed in the GUI (if available).
To never miss important security fixes, the superuser (``root@pam`` user) is
notified via email about new packages as soon as they are available. The
change-log and details of each package can be viewed in the GUI (if available).
Please note that you need a valid subscription key to access this
repository. More information regarding subscription levels and pricing can be
found at https://www.proxmox.com/en/proxmox-backup/pricing.
Please note that you need a valid subscription key to access this
repository. More information regarding subscription levels and pricing can be
found at https://www.proxmox.com/en/proxmox-backup-server/pricing
.. note:: You can disable this repository by commenting out the above
line using a `#` (at the start of the line). This prevents error
messages if you do not have a subscription key. Please configure the
``pbs-no-subscription`` repository in that case.
.. note:: You can disable this repository by commenting out the above line
using a `#` (at the start of the line). This prevents error messages if you do
not have a subscription key. Please configure the ``pbs-no-subscription``
repository in that case.
`Proxmox Backup`_ No-Subscription Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ No-Subscription Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As the name suggests, you do not need a subscription key to access
this repository. It can be used for testing and non-production
use. It is not recommended to use it on production servers, because these
packages are not always heavily tested and validated.
As the name suggests, you do not need a subscription key to access
this repository. It can be used for testing and non-production
use. It is not recommended to use it on production servers, because these
packages are not always heavily tested and validated.
We recommend to configure this repository in ``/etc/apt/sources.list``.
We recommend to configure this repository in ``/etc/apt/sources.list``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
# PBS pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
# PBS pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
# security updates
deb http://security.debian.org/debian-security buster/updates main contrib
# security updates
deb http://security.debian.org/debian-security buster/updates main contrib
`Proxmox Backup`_ Beta Repository
`Proxmox Backup`_ Test Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
During the public beta, there is a repository called ``pbstest``. This one
contains the latest packages and is heavily used by developers to test new
features.
This repository contains the latest packages and is heavily used by developers
to test new features.
.. .. warning:: the ``pbstest`` repository should (as the name implies)
only be used to test new features or bug fixes.
@ -145,7 +137,3 @@ You can access this repository by adding the following line to
:caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs buster pbstest
If you installed Proxmox Backup Server from the official beta ISO, you should
have this repository already configured in
``/etc/apt/sources.list.d/pbstest-beta.list``

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -0,0 +1,102 @@
<!DOCTYPE html>
<html>
<head>
<style>
/* similar to sphinx alabaster theme ones */
body {
max-width: 90ch;
margin-left: 2ch;
margin-right: 2ch;
line-height: 1.4em;
/* avoid the very high contrast of black on white, tone it down a bit */
color: #3E4349;
hyphens: auto;
text-align: left;
font-family: 'Open Sans', sans-serif;
font-size: 17px;
}
h1, h2, h3 {
font-family: Lato, sans-serif;
font-size: 150%;
line-height:1.2
}
tt, code {
background-color: #ecf0f3;
color: #222;
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
div.note {
background-color: #EEE;
border: 1px solid #CCC;
margin: 10px 0;
padding: 0px 20px;
}
p.note-title {
font-weight: bolder;
padding: 0;
margin: 10px 0 0 0;
}
div.note > p.last {
margin: 5px 0 10px 0;
}
</style>
</head>
<body>
<p>A simulator to experiment with different backup schedules and prune
options.</p>
<h3>Schedule</h3>
<p>Select weekdays with the combobox and input hour and minute
specification separated by a colon, i.e. <code>HOUR:MINUTE</code>. Each of
<code>HOUR</code> and <code>MINUTE</code> can be either a single value or
one of the following:</p>
<ul class="simple">
<li>a comma-separated list: e.g., <code>01,02,03</code></li>
<li>a range: e.g., <code>01..10</code></li>
<li>a repetition: e.g, <code>05/10</code> (means starting at <code>5</code> every <code>10</code>)</li>
<li>a combination of the above: e.g., <code>01,05..10,12/02</code></li>
<li>a <code>*</code> for every possible value</li>
</ul>
<h3>Pruning</h3>
<p>Prune lets you systematically delete older backups, retaining backups for
the last given number of time intervals. The following retention options are
available:</p>
<dl class="docutils">
<dt><code class="docutils literal notranslate"><span class="pre">keep-last</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> backup snapshots.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-hourly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> hours. If there is more than one
backup for a single hour, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-daily</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> days. If there is more than one
backup for a single day, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-weekly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> weeks. If there is more than one
backup for a single week, only the latest is kept.
<div class="last admonition note">
<p class="note-title">Note:</p>
<p class="last">Weeks start on Monday and end on Sunday. The software
uses the <a class="reference external" href="https://en.wikipedia.org/wiki/ISO_week_date">ISO week date</a> system and handles weeks at
the end of the year correctly.</p>
</div>
</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-monthly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> months. If there is more than one
backup for a single month, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-yearly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> years. If there is more than one
backup for a single year, only the latest is kept.</dd>
</dl>
<p>The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.</p>
<p>For example, in a week covered by <code>keep-weekly</code>, one backup is
kept while all others are removed; <code>keep-monthly</code> then does not
consider backups from that week anymore, even if part of the week is part of
an earlier month.</p>
</body>
</html>

View File

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<title>PBS Prune Simulator</title>
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
<style>
.cal {
margin: 5px;
}
.cal-day {
vertical-align: top;
width: 150px;
height: 75px; /* this is like min-height when used in tables */
border: #939393 1px solid;
color: #454545;
}
.cal-day-date {
border-bottom: #444 1px solid;
color: #000;
}
.strikethrough {
text-decoration: line-through;
}
.black {
color: #000;
}
.sun {
background-color: #ededed;
}
.first-of-month {
border-right: dashed black 4px;
}
.clear-trigger {
background-image: url(./clear-trigger.png);
}
</style>
<script type="text/javascript" src="extjs/ext-all.js"></script>
<script type="text/javascript" src="prune-simulator.js"></script>
</head>
<body></body>
</html>

View File

@ -0,0 +1,790 @@
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
if (Ext.isFirefox) {
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
}
Ext.onReady(function() {
const NOW = new Date();
const COLORS = {
'keep-last': 'orange',
'keep-hourly': 'purple',
'keep-daily': 'yellow',
'keep-weekly': 'green',
'keep-monthly': 'blue',
'keep-yearly': 'red',
'all zero': 'white',
};
const TEXT_COLORS = {
'keep-last': 'black',
'keep-hourly': 'white',
'keep-daily': 'black',
'keep-weekly': 'white',
'keep-monthly': 'white',
'keep-yearly': 'white',
'all zero': 'black',
};
Ext.define('PBS.prunesimulator.Documentation', {
extend: 'Ext.Panel',
alias: 'widget.prunesimulatorDocumentation',
html: '<iframe style="width:100%;height:100%;border:0px;" src="./documentation.html"/>',
});
Ext.define('PBS.prunesimulator.CalendarEvent', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.prunesimulatorCalendarEvent',
editable: true,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: '0/2:00', text: "Every two hours" },
{ value: '0/6:00', text: "Every six hours" },
{ value: '2,22:30', text: "At 02:30 and 22:30" },
{ value: '00:00', text: "At 00:00" },
{ value: '08..17:00/30', text: "From 08:00 to 17:30 every 30 minutes" },
{ value: 'HOUR:MINUTE', text: "Custom schedule" },
],
},
tpl: [
'<ul class="x-list-plain"><tpl for=".">',
'<li role="option" class="x-boundlist-item">{text}</li>',
'</tpl></ul>',
],
displayTpl: [
'<tpl for=".">',
'{value}',
'</tpl>',
],
});
Ext.define('PBS.prunesimulator.DayOfWeekSelector', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.prunesimulatorDayOfWeekSelector',
editable: false,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: 'mon', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[1]) },
{ value: 'tue', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[2]) },
{ value: 'wed', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[3]) },
{ value: 'thu', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[4]) },
{ value: 'fri', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[5]) },
{ value: 'sat', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[6]) },
{ value: 'sun', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[0]) },
],
},
});
Ext.define('pbs-prune-list', {
extend: 'Ext.data.Model',
fields: [
{
name: 'backuptime',
type: 'date',
dateFormat: 'timestamp',
},
{
name: 'mark',
type: 'string',
},
{
name: 'keepName',
type: 'string',
},
],
});
Ext.define('PBS.prunesimulator.PruneList', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorPruneList',
initComponent: function() {
let me = this;
if (!me.store) {
throw "no store specified";
}
me.items = [
{
xtype: 'grid',
store: me.store,
border: false,
columns: [
{
header: 'Backup Time',
dataIndex: 'backuptime',
renderer: function(value, metaData, record) {
let text = Ext.Date.format(value, 'Y-m-d H:i:s');
if (record.data.mark === 'keep') {
if (me.useColors) {
let bgColor = COLORS[record.data.keepName];
let textColor = TEXT_COLORS[record.data.keepName];
return '<div style="background-color: ' + bgColor + '; ' +
'color: ' + textColor + ';">' + text + '</div>';
} else {
return text;
}
} else {
return '<div style="text-decoration: line-through;">' + text + '</div>';
}
},
flex: 1,
sortable: false,
},
{
header: 'Keep (reason)',
dataIndex: 'mark',
renderer: function(value, metaData, record) {
if (record.data.mark === 'keep') {
if (record.data.keepCount) {
return 'keep (' + record.data.keepName +
': ' + record.data.keepCount + ')';
} else {
return 'keep (' + record.data.keepName + ')';
}
} else {
return value;
}
},
width: 200,
sortable: false,
},
],
},
];
me.callParent();
},
});
Ext.define('PBS.prunesimulator.WeekTable', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorWeekTable',
reload: function() {
let me = this;
let backups = me.store.data.items;
let html = '<table class="cal">';
let now = new Date(NOW.getTime());
let skip = 7 - parseInt(Ext.Date.format(now, 'N'), 10);
let tableStartDate = Ext.Date.add(now, Ext.Date.DAY, skip);
let bIndex = 0;
for (let i = 0; bIndex < backups.length; i++) {
html += '<tr>';
for (let j = 0; j < 7; j++) {
let date = Ext.Date.subtract(tableStartDate, Ext.Date.DAY, j + 7 * i);
let currentDay = Ext.Date.format(date, 'd/m/Y');
let dayOfWeekCls = Ext.Date.format(date, 'D').toLowerCase();
let firstOfMonthCls = Ext.Date.format(date, 'd') === '01'
? 'first-of-month'
: '';
html += `<td class="cal-day ${dayOfWeekCls} ${firstOfMonthCls}">`;
const isBackupOnDay = function(backup, day) {
return backup && Ext.Date.format(backup.data.backuptime, 'd/m/Y') === day;
};
let backup = backups[bIndex];
html += '<table><tr>';
html += `<th class="cal-day-date">${Ext.Date.format(date, 'D, d M Y')}</th>`;
while (isBackupOnDay(backup, currentDay)) {
html += '<tr><td>';
let text = Ext.Date.format(backup.data.backuptime, 'H:i');
if (backup.data.mark === 'remove') {
html += `<span class="strikethrough">${text}</span>`;
} else {
if (backup.data.keepCount) {
text += ` (${backup.data.keepName} ${backup.data.keepCount})`;
} else {
text += ` (${backup.data.keepName})`;
}
if (me.useColors) {
let bgColor = COLORS[backup.data.keepName];
let textColor = TEXT_COLORS[backup.data.keepName];
html += `<span style="background-color: ${bgColor};
color: ${textColor};">${text}</span>`;
} else {
html += `<span class="black">${text}</span>`;
}
}
html += '</td></tr>';
backup = backups[++bIndex];
}
html += '</table>';
html += '</div>';
html += '</td>';
}
html += '</tr>';
}
me.setHtml(html);
},
initComponent: function() {
let me = this;
if (!me.store) {
throw "no store specified";
}
let reload = function() {
me.reload();
};
me.store.on("datachanged", reload);
me.callParent();
me.reload();
},
});
Ext.define('PBS.PruneSimulatorKeepInput', {
extend: 'Ext.form.field.Number',
alias: 'widget.prunesimulatorKeepInput',
allowBlank: true,
fieldGroup: 'keep',
minValue: 1,
listeners: {
afterrender: function(field) {
this.triggers.clear.setVisible(field.value !== null);
},
change: function(field, newValue, oldValue) {
this.triggers.clear.setVisible(newValue !== null);
},
},
triggers: {
clear: {
cls: 'clear-trigger',
weight: -1,
handler: function() {
this.triggers.clear.setVisible(false);
this.setValue(null);
},
},
},
});
Ext.define('PBS.PruneSimulatorPanel', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorPanel',
viewModel: {
},
getValues: function() {
let me = this;
let values = {};
Ext.Array.each(me.query('[isFormField]'), function(field) {
let data = field.getSubmitData();
Ext.Object.each(data, function(name, val) {
values[name] = val;
});
});
return values;
},
controller: {
xclass: 'Ext.app.ViewController',
init: function(view) {
this.reloadFull(); // initial load
this.switchColor(true);
},
control: {
'field[fieldGroup=keep]': { change: 'reloadPrune' },
},
reloadFull: function() {
let me = this;
let view = me.getView();
let params = view.getValues();
let [hourSpec, minuteSpec] = params['schedule-time'].split(':');
if (!hourSpec || !minuteSpec) {
Ext.Msg.alert('Error', 'Invalid schedule');
return;
}
let matchTimeSpec = function(timeSpec, rangeMin, rangeMax) {
let specValues = timeSpec.split(',');
let matches = {};
let assertValid = function(value) {
let num = Number(value);
if (isNaN(num)) {
throw value + " is not an integer";
} else if (value < rangeMin || value > rangeMax) {
throw "number '" + value + "' is not in the range '" + rangeMin + ".." + rangeMax + "'";
}
return num;
};
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {
let [start, step] = value.split('/');
start = assertValid(start);
step = assertValid(step);
for (let i = start; i <= rangeMax; i += step) {
matches[i] = 1;
}
} else if (value === '*') {
for (let i = rangeMin; i <= rangeMax; i++) {
matches[i] = 1;
}
} else {
value = assertValid(value);
matches[value] = 1;
}
});
return Object.keys(matches);
};
let hours, minutes;
try {
hours = matchTimeSpec(hourSpec, 0, 23);
minutes = matchTimeSpec(minuteSpec, 0, 59);
} catch (err) {
Ext.Msg.alert('Error', err);
return;
}
let backups = me.populateFromSchedule(
params['schedule-weekdays'],
hours,
minutes,
params.numberOfWeeks,
);
me.pruneSelect(backups, params);
view.pruneStore.setData(backups);
},
reloadPrune: function() {
let me = this;
let view = me.getView();
let params = view.getValues();
let backups = [];
view.pruneStore.getData().items.forEach(function(item) {
backups.push({
backuptime: item.data.backuptime,
});
});
me.pruneSelect(backups, params);
view.pruneStore.setData(backups);
},
// backups are sorted descending by date
populateFromSchedule: function(weekdays, hours, minutes, weekCount) {
let weekdayFlags = [
weekdays.includes('sun'),
weekdays.includes('mon'),
weekdays.includes('tue'),
weekdays.includes('wed'),
weekdays.includes('thu'),
weekdays.includes('fri'),
weekdays.includes('sat'),
];
let todaysDate = new Date(NOW.getTime());
let timesOnSingleDay = [];
hours.forEach(function(hour) {
minutes.forEach(function(minute) {
todaysDate.setHours(hour);
todaysDate.setMinutes(minute);
timesOnSingleDay.push(todaysDate.getTime());
});
});
// ordering here and iterating backwards through days
// ensures that everything is ordered
timesOnSingleDay.sort(function(a, b) {
return a < b;
});
let backups = [];
for (let i = 0; i < 7 * weekCount; i++) {
let daysDate = Ext.Date.subtract(todaysDate, Ext.Date.DAY, i);
let weekday = parseInt(Ext.Date.format(daysDate, 'w'), 10);
if (weekdayFlags[weekday]) {
timesOnSingleDay.forEach(function(time) {
backups.push({
backuptime: Ext.Date.subtract(new Date(time), Ext.Date.DAY, i),
});
});
}
}
return backups;
},
pruneMark: function(backups, keepCount, keepName, idFunc) {
if (!keepCount) {
return;
}
let alreadyIncluded = {};
let newlyIncluded = {};
let newlyIncludedCount = 0;
let finished = false;
backups.forEach(function(backup) {
let mark = backup.mark;
let id = idFunc(backup);
if (finished || alreadyIncluded[id]) {
return;
}
if (mark) {
if (mark === 'keep') {
alreadyIncluded[id] = true;
}
return;
}
if (!newlyIncluded[id]) {
if (newlyIncludedCount >= keepCount) {
finished = true;
return;
}
newlyIncluded[id] = true;
newlyIncludedCount++;
backup.mark = 'keep';
backup.keepName = keepName;
backup.keepCount = newlyIncludedCount;
} else {
backup.mark = 'remove';
}
});
},
// backups need to be sorted descending by date
pruneSelect: function(backups, keepParams) {
let me = this;
if (Number(keepParams['keep-last']) +
Number(keepParams['keep-hourly']) +
Number(keepParams['keep-daily']) +
Number(keepParams['keep-weekly']) +
Number(keepParams['keep-monthly']) +
Number(keepParams['keep-yearly']) === 0) {
backups.forEach(function(backup) {
backup.mark = 'keep';
backup.keepName = 'keep-all';
});
return;
}
me.pruneMark(backups, keepParams['keep-last'], 'keep-last', function(backup) {
return backup.backuptime;
});
me.pruneMark(backups, keepParams['keep-hourly'], 'keep-hourly', function(backup) {
return Ext.Date.format(backup.backuptime, 'H/d/m/Y');
});
me.pruneMark(backups, keepParams['keep-daily'], 'keep-daily', function(backup) {
return Ext.Date.format(backup.backuptime, 'd/m/Y');
});
me.pruneMark(backups, keepParams['keep-weekly'], 'keep-weekly', function(backup) {
// ISO-8601 week and week-based year
return Ext.Date.format(backup.backuptime, 'W/o');
});
me.pruneMark(backups, keepParams['keep-monthly'], 'keep-monthly', function(backup) {
return Ext.Date.format(backup.backuptime, 'm/Y');
});
me.pruneMark(backups, keepParams['keep-yearly'], 'keep-yearly', function(backup) {
return Ext.Date.format(backup.backuptime, 'Y');
});
backups.forEach(function(backup) {
backup.mark = backup.mark || 'remove';
});
},
toggleColors: function(checkbox, checked) {
this.switchColor(checked);
},
switchColor: function(useColors) {
let me = this;
let view = me.getView();
const getStyle = name =>
`background-color: ${COLORS[name]}; color: ${TEXT_COLORS[name]};`;
for (const field of view.query('[isFormField]')) {
if (field.fieldGroup !== 'keep') {
continue;
}
if (useColors) {
field.setFieldStyle(getStyle(field.name));
} else {
field.setFieldStyle('background-color: white; color: #444;');
}
}
me.lookup('weekTable').useColors = useColors;
me.lookup('pruneList').useColors = useColors;
me.reloadPrune();
},
},
keepItems: [
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-last',
fieldLabel: 'keep-last',
value: 4,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-hourly',
fieldLabel: 'keep-hourly',
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-daily',
fieldLabel: 'keep-daily',
value: 5,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-weekly',
fieldLabel: 'keep-weekly',
value: 2,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-monthly',
fieldLabel: 'keep-monthly',
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-yearly',
fieldLabel: 'keep-yearly',
},
],
initComponent: function() {
var me = this;
me.pruneStore = Ext.create('Ext.data.Store', {
model: 'pbs-prune-list',
sorters: { property: 'backuptime', direction: 'DESC' },
});
me.items = [
{
xtype: 'panel',
layout: {
type: 'hbox',
align: 'stretch',
},
border: false,
items: [
{
title: 'View',
layout: 'anchor',
flex: 1,
border: false,
bodyPadding: 10,
items: [
{
xtype: 'checkbox',
name: 'showCalendar',
reference: 'showCalendar',
fieldLabel: 'Show Calendar:',
checked: true,
},
{
xtype: 'checkbox',
name: 'showColors',
reference: 'showColors',
fieldLabel: 'Show Colors:',
checked: true,
handler: 'toggleColors',
},
],
},
{ xtype: "panel", width: 1, border: 1 },
{
xtype: 'form',
layout: 'anchor',
flex: 1,
border: false,
title: 'Simulated Backup Schedule',
defaults: {
labelWidth: 120,
},
bodyPadding: 10,
items: [
{
xtype: 'prunesimulatorDayOfWeekSelector',
name: 'schedule-weekdays',
fieldLabel: 'Day of week',
value: ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'],
allowBlank: false,
multiSelect: true,
padding: '0 0 0 10',
},
{
xtype: 'prunesimulatorCalendarEvent',
name: 'schedule-time',
allowBlank: false,
value: '0/6:00',
fieldLabel: 'Backup schedule',
padding: '0 0 0 10',
},
{
xtype: 'numberfield',
name: 'numberOfWeeks',
allowBlank: false,
fieldLabel: 'Number of weeks',
minValue: 1,
value: 15,
maxValue: 260, // five years
padding: '0 0 0 10',
},
{
xtype: 'button',
name: 'schedule-button',
text: 'Update Schedule',
formBind: true,
handler: 'reloadFull',
},
],
},
],
},
{
xtype: 'panel',
layout: {
type: 'hbox',
align: 'stretch',
},
flex: 1,
border: false,
items: [
{
layout: 'anchor',
title: 'Prune Options',
border: false,
bodyPadding: 10,
scrollable: true,
items: me.keepItems,
flex: 1,
},
{ xtype: "panel", width: 1, border: 1 },
{
layout: 'fit',
title: 'Backups',
border: false,
xtype: 'prunesimulatorPruneList',
store: me.pruneStore,
reference: 'pruneList',
flex: 1,
},
],
},
{
layout: 'anchor',
title: 'Calendar',
autoScroll: true,
flex: 2,
xtype: 'prunesimulatorWeekTable',
reference: 'weekTable',
store: me.pruneStore,
bind: {
hidden: '{!showCalendar.checked}',
},
},
];
me.callParent();
},
});
Ext.create('Ext.container.Viewport', {
layout: 'border',
renderTo: Ext.getBody(),
items: [
{
xtype: 'prunesimulatorPanel',
title: 'Proxmox Backup Server - Prune Simulator',
region: 'west',
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
flex: 3,
maxWidth: 1090,
},
{
xtype: 'prunesimulatorDocumentation',
title: 'Usage',
border: false,
flex: 2,
region: 'center',
},
],
});
});

View File

@ -1,6 +1,8 @@
Storage
=======
.. _storage_disk_management:
Disk Management
---------------
@ -57,7 +59,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
You can also create a ``zpool`` with various raid levels from **Administration
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it on the root directory (default):
mounts it under ``/mnt/datastore/zpool1``:
.. code-block:: console
@ -85,7 +87,7 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
.. _datastore_intro:
:term:`DataStore`
:term:`Datastore`
-----------------
A datastore refers to a location at which backups are stored. The current
@ -107,7 +109,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
Datastore Configuration
~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore.png
.. image:: images/screenshots/pbs-gui-datastore-content.png
:align: right
:alt: Datastore Overview
@ -121,14 +123,17 @@ number of backups to keep in that store. :ref:`backup-pruning` and
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
.. _storage_datastore_create:
Creating a Datastore
^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
:align: right
:alt: Create a datastore
You can create a new datastore from the web GUI, by navigating to **Datastore** in
the menu tree and clicking **Create**. Here:
You can create a new datastore from the web interface, by clicking **Add
Datastore** in the side menu, under the **Datastore** section. In the setup
window:
* *Name* refers to the name of the datastore
* *Backing Path* is the path to the directory upon which you want to create the
@ -136,7 +141,9 @@ the menu tree and clicking **Create**. Here:
* *GC Schedule* refers to the time and intervals at which garbage collection
runs
* *Prune Schedule* refers to the frequency at which pruning takes place
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`backup-pruning`).
* *Prune Options* set the amount of backups which you would like to keep (see
:ref:`backup-pruning`).
* *Comment* can be used to add some contextual information to the datastore.
Alternatively you can create a new datastore from the command line. The
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`

View File

@ -1,3 +1,5 @@
.. _sysadmin_host_administration:
Host System Administration
==========================

View File

@ -41,11 +41,12 @@ users:
:alt: Add a new user
The superuser has full administration rights on everything, so you
normally want to add other users with less privileges. You can create a new
user with the ``user create`` subcommand or through the web interface, under
**Configuration -> User Management**. The ``create`` subcommand lets you specify
many options like ``--email`` or ``--password``. You can update or change any
user properties using the ``update`` subcommand later (**Edit** in the GUI):
normally want to add other users with less privileges. You can add a new
user with the ``user create`` subcommand or through the web
interface, under the **User Management** tab of **Configuration -> Access
Control**. The ``create`` subcommand lets you specify many options like
``--email`` or ``--password``. You can update or change any user properties
using the ``update`` subcommand later (**Edit** in the GUI):
.. code-block:: console
@ -70,7 +71,7 @@ The resulting user list looks like this:
│ root@pam │ 1 │ │ │ │ │ Superuser │
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
Newly created users do not have any permissions. Please read the next
Newly created users do not have any permissions. Please read the Access Control
section to learn how to set access permissions.
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
@ -85,15 +86,77 @@ Or completely remove the user with:
# proxmox-backup-manager user remove john@pbs
.. _user_tokens:
API Tokens
----------
.. image:: images/screenshots/pbs-gui-apitoken-overview.png
:align: right
:alt: API Token Overview
Any authenticated user can generate API tokens which can in turn be used to
configure various clients, instead of directly providing the username and
password.
API tokens serve two purposes:
#. Easy revocation in case client gets compromised
#. Limit permissions for each client/token within the users' permission
An API token consists of two parts: an identifier consisting of the user name,
the realm and a tokenname (``user@realm!tokenname``), and a secret value. Both
need to be provided to the client in place of the user ID (``user@realm``) and
the user password, respectively.
.. image:: images/screenshots/pbs-gui-apitoken-secret-value.png
:align: right
:alt: API secret value
The API token is passed from the client to the server by setting the
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
``TOKENID:TOKENSECRET``.
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
.. code-block:: console
# proxmox-backup-manager user generate-token john@pbs client1
Result: {
"tokenid": "john@pbs!client1",
"value": "d63e505a-e3ec-449a-9bc7-1da610d4ccde"
}
.. note:: The displayed secret value needs to be saved, since it cannot be
displayed again after generating the API token.
The ``user list-tokens`` sub-command can be used to display tokens and their
metadata:
.. code-block:: console
# proxmox-backup-manager user list-tokens john@pbs
┌──────────────────┬────────┬────────┬─────────┐
│ tokenid │ enable │ expire │ comment │
╞══════════════════╪════════╪════════╪═════════╡
│ john@pbs!client1 │ 1 │ │ │
└──────────────────┴────────┴────────┴─────────┘
Similarly, the ``user delete-token`` subcommand can be used to delete a token
again.
Newly generated API tokens don't have any permissions. Please read the next
section to learn how to set access permissions.
.. _user_acl:
Access Control
--------------
By default new users do not have any permission. Instead you need to
specify what is allowed and what is not. You can do this by assigning
roles to users on specific objects like datastores or remotes. The
By default new users and API tokens do not have any permission. Instead you
need to specify what is allowed and what is not. You can do this by assigning
roles to users/tokens on specific objects like datastores or remotes. The
following roles exist:
**NoAccess**
@ -130,7 +193,7 @@ following roles exist:
**RemoteSyncOperator**
Is allowed to read data from a remote.
.. image:: images/screenshots/pbs-gui-permissions-add.png
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
:align: right
:alt: Add permissions for user
@ -148,31 +211,32 @@ The data represented in each field is as follows:
#. The object on which the permission is set. This can be a specific object
(single datastore, remote, etc.) or a top level object, which with
propagation enabled, represents all children of the object also.
#. The user for which the permission is set
#. The user(s)/token(s) for which the permission is set
#. The role being set
You can manage datastore permissions from **Configuration -> Permissions** in the
web interface. Likewise, you can use the ``acl`` subcommand to manage and
monitor user permissions from the command line. For example, the command below
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
``store1``, located at ``/backup/disk1/store1``:
You can manage permissions via **Configuration -> Access Control ->
Permissions** in the web interface. Likewise, you can use the ``acl``
subcommand to manage and monitor user permissions from the command line. For
example, the command below will add the user ``john@pbs`` as a
**DatastoreAdmin** for the datastore ``store1``, located at
``/backup/disk1/store1``:
.. code-block:: console
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --auth-id john@pbs
You can monitor the roles of each user using the following command:
You can list the ACLs of each user/token using the following command:
.. code-block:: console
# proxmox-backup-manager acl list
┌──────────┬──────────────────┬───────────┬────────────────┐
│ ugid │ path │ propagate │ roleid │
╞══════════╪══════════════════╪═══════════╪════════════════╡
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
└──────────┴──────────────────┴───────────┴────────────────┘
┌──────────┬──────────────────┬───────────┬────────────────┐
│ ugid │ path │ propagate │ roleid │
╞══════════╪══════════════════╪═══════════╪════════════════╡
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
└──────────┴──────────────────┴───────────┴────────────────┘
A single user can be assigned multiple permission sets for different datastores.
A single user/token can be assigned multiple permission sets for different datastores.
.. Note::
Naming convention is important here. For datastores on the host,
@ -183,4 +247,40 @@ A single user can be assigned multiple permission sets for different datastores.
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
the remote.
API Token permissions
~~~~~~~~~~~~~~~~~~~~~
API token permissions are calculated based on ACLs containing their ID
independent of those of their corresponding user. The resulting permission set
on a given path is then intersected with that of the corresponding user.
In practice this means:
#. API tokens require their own ACL entries
#. API tokens can never do more than their corresponding user
Effective permissions
~~~~~~~~~~~~~~~~~~~~~
To calculate and display the effective permission set of a user or API token
you can use the ``proxmox-backup-manager user permission`` command:
.. code-block:: console
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Audit (*)
- Datastore.Backup (*)
- Datastore.Modify (*)
- Datastore.Prune (*)
- Datastore.Read (*)
- Datastore.Verify (*)
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Backup (*)

View File

@ -1,13 +1,15 @@
include ../defines.mk
UNITS :=
UNITS := \
proxmox-backup-daily-update.timer \
DYNAMIC_UNITS := \
proxmox-backup-banner.service \
proxmox-backup-daily-update.service \
proxmox-backup.service \
proxmox-backup-proxy.service
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list
clean:
rm -f $(DYNAMIC_UNITS)

1
etc/pbs-enterprise.list Normal file
View File

@ -0,0 +1 @@
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise

View File

@ -1 +0,0 @@
deb http://download.proxmox.com/debian/pbs buster pbstest

View File

@ -0,0 +1,8 @@
[Unit]
Description=Daily Proxmox Backup Server update and maintenance activities
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-daily-update

View File

@ -0,0 +1,10 @@
[Unit]
Description=Daily Proxmox Backup Server update and maintenance activities
[Timer]
OnCalendar=*-*-* 1:00
RandomizedDelaySec=5h
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -9,6 +9,7 @@ After=proxmox-backup.service
Type=notify
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-proxy
ExecReload=/bin/kill -HUP $MAINPID
PIDFile=/run/proxmox-backup/proxy.pid
Restart=on-failure
User=%PROXY_USER%
Group=%PROXY_USER%

View File

@ -7,6 +7,7 @@ After=network.target
Type=notify
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-api
ExecReload=/bin/kill -HUP $MAINPID
PIDFile=/run/proxmox-backup/api.pid
Restart=on-failure
[Install]

View File

@ -2,7 +2,7 @@ use std::io::Write;
use anyhow::{Error};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter {
@ -26,13 +26,13 @@ async fn run() -> Result<(), Error> {
let host = "localhost";
let username = Userid::root_userid();
let auth_id = Authid::root_auth_id();
let options = HttpClientOptions::new()
.interactive(true)
.ticket_cache(true);
let client = HttpClient::new(host, 8007, username, options)?;
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;

View File

@ -1,6 +1,6 @@
use anyhow::{Error};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::*;
async fn upload_speed() -> Result<f64, Error> {
@ -8,13 +8,13 @@ async fn upload_speed() -> Result<f64, Error> {
let host = "localhost";
let datastore = "store2";
let username = Userid::root_userid();
let auth_id = Authid::root_auth_id();
let options = HttpClientOptions::new()
.interactive(true)
.ticket_cache(true);
let client = HttpClient::new(host, 8007, username, options)?;
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox::tools::time::epoch_i64();

View File

@ -1,6 +1,8 @@
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::collections::HashSet;
use proxmox::api::{api, RpcEnvironment, Permission};
use proxmox::api::router::{Router, SubdirMap};
@ -10,10 +12,10 @@ use proxmox::{http_err, list_subdirs_api_method};
use crate::tools::ticket::{self, Empty, Ticket};
use crate::auth_helpers::*;
use crate::api2::types::*;
use crate::tools::{FileLogOptions, FileLogger};
use crate::config::acl as acl_config;
use crate::config::acl::{PRIVILEGES, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
pub mod user;
pub mod domain;
@ -31,7 +33,8 @@ fn authenticate_user(
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&userid) {
let auth_id = Authid::from(userid.clone());
if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
@ -69,8 +72,7 @@ fn authenticate_user(
path_vec.push(part);
}
}
user_info.check_privs(userid, &path_vec, *privilege, false)?;
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
return Ok(false);
}
}
@ -141,20 +143,13 @@ fn create_ticket(
port: Option<u16>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let logger_options = FileLogOptions {
append: true,
prefix_time: true,
..Default::default()
};
let mut auth_log = FileLogger::new("/var/log/proxmox-backup/api/auth.log", logger_options)?;
match authenticate_user(&username, &password, path, privs, port) {
Ok(true) => {
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
auth_log.log(format!("successful auth for user '{}'", username));
crate::server::rest::auth_logger()?.log(format!("successful auth for user '{}'", username));
Ok(json!({
"username": username,
@ -177,7 +172,7 @@ fn create_ticket(
username,
err.to_string()
);
auth_log.log(&msg);
crate::server::rest::auth_logger()?.log(&msg);
log::error!("{}", msg);
Err(http_err!(UNAUTHORIZED, "permission check failed."))
@ -213,9 +208,10 @@ fn change_password(
) -> Result<Value, Error> {
let current_user: Userid = rpcenv
.get_user()
.get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
let current_auth = Authid::from(current_user.clone());
let mut allowed = userid == current_user;
@ -223,7 +219,7 @@ fn change_password(
if !allowed {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&current_user, &[]);
let privs = user_info.lookup_privs(&current_auth, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}
@ -237,6 +233,128 @@ fn change_password(
Ok(Value::Null)
}
#[api(
input: {
properties: {
"auth-id": {
type: Authid,
optional: true,
},
path: {
schema: ACL_PATH_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Sys.Audit on '/access', limited to own privileges otherwise.",
},
returns: {
description: "Map of ACL path to Map of privilege to propagate bit",
type: Object,
properties: {},
additional_properties: true,
},
)]
/// List permissions of given or currently authenticated user / API token.
///
/// Optionally limited to specific path.
pub fn list_permissions(
auth_id: Option<Authid>,
path: Option<String>,
rpcenv: &dyn RpcEnvironment,
) -> Result<HashMap<String, HashMap<String, bool>>, Error> {
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&current_auth_id, &["access"]);
let auth_id = if user_privs & PRIV_SYS_AUDIT == 0 {
match auth_id {
Some(auth_id) => {
if auth_id == current_auth_id {
auth_id
} else if auth_id.is_token()
&& !current_auth_id.is_token()
&& auth_id.user() == current_auth_id.user() {
auth_id
} else {
bail!("not allowed to list permissions of {}", auth_id);
}
},
None => current_auth_id,
}
} else {
match auth_id {
Some(auth_id) => auth_id,
None => current_auth_id,
}
};
fn populate_acl_paths(
mut paths: HashSet<String>,
node: acl_config::AclTreeNode,
path: &str
) -> HashSet<String> {
for (sub_path, child_node) in node.children {
let sub_path = format!("{}/{}", path, &sub_path);
paths = populate_acl_paths(paths, child_node, &sub_path);
paths.insert(sub_path);
}
paths
}
let paths = match path {
Some(path) => {
let mut paths = HashSet::new();
paths.insert(path);
paths
},
None => {
let mut paths = HashSet::new();
let (acl_tree, _) = acl_config::config()?;
paths = populate_acl_paths(paths, acl_tree.root, "");
// default paths, returned even if no ACL exists
paths.insert("/".to_string());
paths.insert("/access".to_string());
paths.insert("/datastore".to_string());
paths.insert("/remote".to_string());
paths.insert("/system".to_string());
paths
},
};
let map = paths
.into_iter()
.fold(HashMap::new(), |mut map: HashMap<String, HashMap<String, bool>>, path: String| {
let split_path = acl_config::split_acl_path(path.as_str());
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
match privs {
0 => map, // Don't leak ACL paths where we don't have any privileges
_ => {
let priv_map = PRIVILEGES
.iter()
.fold(HashMap::new(), |mut priv_map, (name, value)| {
if value & privs != 0 {
priv_map.insert(name.to_string(), value & propagated_privs != 0);
}
priv_map
});
map.insert(path, priv_map);
map
},
}});
Ok(map)
}
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
("acl", &acl::ROUTER),
@ -244,6 +362,10 @@ const SUBDIRS: SubdirMap = &sorted!([
"password", &Router::new()
.put(&API_METHOD_CHANGE_PASSWORD)
),
(
"permissions", &Router::new()
.get(&API_METHOD_LIST_PERMISSIONS)
),
(
"ticket", &Router::new()
.post(&API_METHOD_CREATE_TICKET)

View File

@ -7,6 +7,7 @@ use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::acl;
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
#[api(
properties: {
@ -43,8 +44,23 @@ fn extract_acl_node_data(
path: &str,
list: &mut Vec<AclListItem>,
exact: bool,
token_user: &Option<Authid>,
) {
// tokens can't have tokens, so we can early return
if let Some(token_user) = token_user {
if token_user.is_token() {
return;
}
}
for (user, roles) in &node.users {
if let Some(token_user) = token_user {
if !user.is_token()
|| user.user() != token_user.user() {
continue;
}
}
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
@ -56,6 +72,10 @@ fn extract_acl_node_data(
}
}
for (group, roles) in &node.groups {
if let Some(_) = token_user {
continue;
}
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
@ -71,7 +91,7 @@ fn extract_acl_node_data(
}
for (comp, child) in &node.children {
let new_path = format!("{}/{}", path, comp);
extract_acl_node_data(child, &new_path, list, exact);
extract_acl_node_data(child, &new_path, list, exact, token_user);
}
}
@ -98,7 +118,8 @@ fn extract_acl_node_data(
}
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
permission: &Permission::Anybody,
description: "Returns all ACLs if user has Sys.Audit on '/access/acl', or just the ACLs containing the user's API tokens.",
},
)]
/// Read Access Control List (ACLs).
@ -107,18 +128,26 @@ pub fn read_acl(
exact: bool,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<AclListItem>, Error> {
let auth_id = rpcenv.get_auth_id().unwrap().parse()?;
//let auth_user = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "acl"]);
let auth_id_filter = if (top_level_privs & PRIV_SYS_AUDIT) == 0 {
Some(auth_id)
} else {
None
};
let (mut tree, digest) = acl::config()?;
let mut list: Vec<AclListItem> = Vec::new();
if let Some(path) = &path {
if let Some(node) = &tree.find_node(path) {
extract_acl_node_data(&node, path, &mut list, exact);
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
}
} else {
extract_acl_node_data(&tree.root, "", &mut list, exact);
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
@ -140,9 +169,9 @@ pub fn read_acl(
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
userid: {
"auth-id": {
optional: true,
type: Userid,
type: Authid,
},
group: {
optional: true,
@ -160,7 +189,8 @@ pub fn read_acl(
},
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
permission: &Permission::Anybody,
description: "Requires Permissions.Modify on '/access/acl', limited to updating ACLs of the user's API tokens otherwise."
},
)]
/// Update Access Control List (ACLs).
@ -168,12 +198,35 @@ pub fn update_acl(
path: String,
role: String,
propagate: Option<bool>,
userid: Option<Userid>,
auth_id: Option<Authid>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&current_auth_id, &["access", "acl"]);
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
if let Some(_) = group {
bail!("Unprivileged users are not allowed to create group ACL item.");
}
match &auth_id {
Some(auth_id) => {
if current_auth_id.is_token() {
bail!("Unprivileged API tokens can't set ACL items.");
} else if !auth_id.is_token() {
bail!("Unprivileged users can only set ACL items for API tokens.");
} else if auth_id.user() != current_auth_id.user() {
bail!("Unprivileged users can only set ACL items for their own API tokens.");
}
},
None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
};
}
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -190,11 +243,12 @@ pub fn update_acl(
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref userid) = userid {
} else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
if user_cfg.sections.get(&userid.to_string()).is_none() {
bail!("no such user.");
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
bail!(format!("no such {}.",
if auth_id.is_token() { "API token" } else { "user" }));
}
}
} else {
@ -205,11 +259,11 @@ pub fn update_acl(
acl::check_acl_path(&path)?;
}
if let Some(userid) = userid {
if let Some(auth_id) = auth_id {
if delete {
tree.delete_user_role(&path, &userid, &role);
tree.delete_user_role(&path, &auth_id, &role);
} else {
tree.insert_user_role(&path, &userid, &role, propagate);
tree.insert_user_role(&path, &auth_id, &role, propagate);
}
} else if let Some(group) = group {
if delete {

View File

@ -1,12 +1,16 @@
use anyhow::{bail, Error};
use serde_json::Value;
use serde::{Serialize, Deserialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::{Schema, StringSchema};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::user;
use crate::config::token_shadow;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
@ -16,14 +20,96 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: user::ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: user::EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: user::FIRST_NAME_SCHEMA,
},
lastname: {
schema: user::LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: user::EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: user::ApiToken
},
},
}
)]
#[derive(Serialize,Deserialize)]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub tokens: Vec<user::ApiToken>,
}
impl UserWithTokens {
fn new(user: user::User) -> Self {
Self {
userid: user.userid,
comment: user.comment,
enable: user.enable,
expire: user.expire,
firstname: user.firstname,
lastname: user.lastname,
email: user.email,
tokens: Vec::new(),
}
}
}
#[api(
input: {
properties: {},
properties: {
include_tokens: {
type: bool,
description: "Include user's API tokens in returned list.",
optional: true,
default: false,
},
},
},
returns: {
description: "List users (with config digest).",
type: Array,
items: { type: user::User },
items: { type: UserWithTokens },
},
access: {
permission: &Permission::Anybody,
@ -32,28 +118,60 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
)]
/// List users
pub fn list_users(
_param: Value,
include_tokens: bool,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<user::User>, Error> {
) -> Result<Vec<UserWithTokens>, Error> {
let (config, digest) = user::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
// intentionally user only for now
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let auth_id = Authid::from(userid.clone());
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {
top_level_allowed || user.userid == userid
};
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list.into_iter().filter(filter_by_privs).collect())
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
let tokens: Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
let mut user_to_tokens = tokens
.into_iter()
.fold(
HashMap::new(),
|mut map: HashMap<Userid, Vec<user::ApiToken>>, token: user::ApiToken| {
if token.tokenid.is_token() {
map
.entry(token.tokenid.user().clone())
.or_default()
.push(token);
}
map
});
iter
.map(|user: user::User| {
let mut user = UserWithTokens::new(user);
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
user
})
.collect()
} else {
iter.map(|user: user::User| UserWithTokens::new(user))
.collect()
};
Ok(list)
}
#[api(
@ -150,6 +268,21 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
Ok(user)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[allow(non_camel_case_types)]
pub enum DeletableProperty {
/// Delete the comment property.
comment,
/// Delete the firstname property.
firstname,
/// Delete the lastname property.
lastname,
/// Delete the email property.
email,
}
#[api(
protected: true,
input: {
@ -185,6 +318,14 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
schema: user::EMAIL_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
@ -208,6 +349,7 @@ pub fn update_user(
firstname: Option<String>,
lastname: Option<String>,
email: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -222,6 +364,17 @@ pub fn update_user(
let mut data: user::User = config.lookup("user", userid.as_str())?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => data.comment = None,
DeletableProperty::firstname => data.firstname = None,
DeletableProperty::lastname => data.lastname = None,
DeletableProperty::email => data.email = None,
}
}
}
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
@ -304,12 +457,340 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
#[api(
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
},
},
returns: {
description: "Get API token metadata (with config digest).",
type: user::ApiToken,
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Read user's API token metadata
pub fn read_token(
userid: Userid,
tokenname: Tokenname,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<user::ApiToken, Error> {
let (config, digest) = user::config()?;
let tokenid = Authid::from((userid, Some(tokenname)));
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
config.lookup("token", &tokenid.to_string())
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
returns: {
description: "API token identifier + generated secret.",
properties: {
value: {
type: String,
description: "The API token secret",
},
tokenid: {
type: String,
description: "The API token identifier",
},
},
},
)]
/// Generate a new API token with given metadata
pub fn generate_token(
userid: Userid,
tokenname: Tokenname,
comment: Option<String>,
enable: Option<bool>,
expire: Option<i64>,
digest: Option<String>,
) -> Result<Value, Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
let tokenid_string = tokenid.to_string();
if let Some(_) = config.sections.get(&tokenid_string) {
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
}
let secret = format!("{:x}", proxmox::tools::uuid::Uuid::generate());
token_shadow::set_secret(&tokenid, &secret)?;
let token = user::ApiToken {
tokenid: tokenid.clone(),
comment,
enable,
expire,
};
config.set_data(&tokenid_string, "token", &token)?;
user::save_config(&config)?;
Ok(json!({
"tokenid": tokenid_string,
"value": secret
}))
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Update user's API token metadata
pub fn update_token(
userid: Userid,
tokenname: Tokenname,
comment: Option<String>,
enable: Option<bool>,
expire: Option<i64>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid, Some(tokenname)));
let tokenid_string = tokenid.to_string();
let mut data: user::ApiToken = config.lookup("token", &tokenid_string)?;
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
data.comment = None;
} else {
data.comment = Some(comment);
}
}
if let Some(enable) = enable {
data.enable = if enable { None } else { Some(false) };
}
if let Some(expire) = expire {
data.expire = if expire > 0 { Some(expire) } else { None };
}
config.set_data(&tokenid_string, "token", &data)?;
user::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Delete a user's API token
pub fn delete_token(
userid: Userid,
tokenname: Tokenname,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
let tokenid_string = tokenid.to_string();
match config.sections.get(&tokenid_string) {
Some(_) => { config.sections.remove(&tokenid_string); },
None => bail!("token '{}' of user '{}' does not exist.", tokenname.as_str(), userid),
}
token_shadow::delete_secret(&tokenid)?;
user::save_config(&config)?;
Ok(())
}
#[api(
input: {
properties: {
userid: {
type: Userid,
},
},
},
returns: {
description: "List user's API tokens (with config digest).",
type: Array,
items: { type: user::ApiToken },
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
&Permission::UserParam("userid"),
]),
},
)]
/// List user's API tokens
pub fn list_tokens(
userid: Userid,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<user::ApiToken>, Error> {
let (config, digest) = user::config()?;
let list:Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let filter_by_owner = |token: &user::ApiToken| {
if token.tokenid.is_token() {
token.tokenid.user() == &userid
} else {
false
}
};
Ok(list.into_iter().filter(filter_by_owner).collect())
}
const TOKEN_ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TOKEN)
.put(&API_METHOD_UPDATE_TOKEN)
.post(&API_METHOD_GENERATE_TOKEN)
.delete(&API_METHOD_DELETE_TOKEN);
const TOKEN_ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TOKENS)
.match_all("tokenname", &TOKEN_ITEM_ROUTER);
const USER_SUBDIRS: SubdirMap = &[
("token", &TOKEN_ROUTER),
];
const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER)
.put(&API_METHOD_UPDATE_USER)
.delete(&API_METHOD_DELETE_USER);
.delete(&API_METHOD_DELETE_USER)
.subdirs(USER_SUBDIRS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_USERS)
.post(&API_METHOD_CREATE_USER)
.match_all("userid", &ITEM_ROUTER);
.match_all("userid", &USER_ROUTER);

View File

@ -1,4 +1,4 @@
use std::collections::{HashSet, HashMap};
use std::collections::HashSet;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::sync::{Arc, Mutex};
@ -29,7 +29,7 @@ use crate::backup::*;
use crate::config::datastore;
use crate::config::cached_user_info::CachedUserInfo;
use crate::server::WorkerTask;
use crate::server::{jobstate::Job, WorkerTask};
use crate::tools::{
self,
zip::{ZipEncoder, ZipEntry},
@ -42,16 +42,33 @@ use crate::config::acl::{
PRIV_DATASTORE_READ,
PRIV_DATASTORE_PRUNE,
PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_VERIFY,
};
fn check_backup_owner(
fn check_priv_or_backup_owner(
store: &DataStore,
group: &BackupGroup,
userid: &Userid,
auth_id: &Authid,
required_privs: u64,
) -> Result<(), Error> {
let owner = store.get_owner(group)?;
if &owner != userid {
bail!("backup owner check failed ({} != {})", userid, owner);
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
if privs & required_privs == 0 {
let owner = store.get_owner(group)?;
check_backup_owner(&owner, auth_id)?;
}
Ok(())
}
fn check_backup_owner(
owner: &Authid,
auth_id: &Authid,
) -> Result<(), Error> {
let correct_owner = owner == auth_id
|| (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
if !correct_owner {
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
Ok(())
}
@ -108,19 +125,6 @@ fn get_all_snapshot_files(
Ok((manifest, files))
}
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
let mut group_hash = HashMap::new();
for info in backup_list {
let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
let time_list = group_hash.entry(group_id).or_insert(vec![]);
time_list.push(info);
}
group_hash
}
#[api(
input: {
properties: {
@ -149,44 +153,69 @@ fn list_groups(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
let group_hash = group_backups(backup_list);
let group_info = backup_groups
.into_iter()
.fold(Vec::new(), |mut group_info, group| {
let owner = match datastore.get_owner(&group) {
Ok(auth_id) => auth_id,
Err(err) => {
eprintln!("Failed to get owner of group '{}/{}' - {}",
&store,
group,
err);
return group_info;
},
};
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
return group_info;
}
let mut groups = Vec::new();
let snapshots = match group.list_backups(&datastore.base_path()) {
Ok(snapshots) => snapshots,
Err(_) => {
return group_info;
},
};
for (_group_id, mut list) in group_hash {
let backup_count: u64 = snapshots.len() as u64;
if backup_count == 0 {
return group_info;
}
BackupInfo::sort_list(&mut list, false);
let last_backup = snapshots
.iter()
.fold(&snapshots[0], |last, curr| {
if curr.is_finished()
&& curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
curr
} else {
last
}
})
.to_owned();
let info = &list[0];
group_info.push(GroupListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
last_backup: last_backup.backup_dir.backup_time(),
owner: Some(owner),
backup_count,
files: last_backup.files,
});
let group = info.backup_dir.group();
group_info
});
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all && owner != userid {
continue;
}
let result_item = GroupListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
last_backup: info.backup_dir.backup_time(),
backup_count: list.len() as u64,
files: info.files.clone(),
owner: Some(owner),
};
groups.push(result_item);
}
Ok(groups)
Ok(group_info)
}
#[api(
@ -230,16 +259,12 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
@ -282,16 +307,12 @@ fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
datastore.remove_backup_dir(&snapshot, false)?;
@ -338,47 +359,60 @@ pub fn list_snapshots (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let datastore = DataStore::lookup_datastore(&store)?;
let base_path = datastore.base_path();
let backup_list = BackupInfo::list_backups(&base_path)?;
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => {
let mut groups = Vec::with_capacity(1);
groups.push(BackupGroup::new(backup_type, backup_id));
groups
},
(Some(backup_type), None) => {
BackupInfo::list_backup_groups(&base_path)?
.into_iter()
.filter(|group| group.backup_type() == backup_type)
.collect()
},
(None, Some(backup_id)) => {
BackupInfo::list_backup_groups(&base_path)?
.into_iter()
.filter(|group| group.backup_id() == backup_id)
.collect()
},
_ => BackupInfo::list_backup_groups(&base_path)?,
};
let mut snapshots = vec![];
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
let backup_type = group.backup_type().to_string();
let backup_id = group.backup_id().to_string();
let backup_time = info.backup_dir.backup_time();
for info in backup_list {
let group = info.backup_dir.group();
if let Some(ref backup_type) = backup_type {
if backup_type != group.backup_type() { continue; }
}
if let Some(ref backup_id) = backup_id {
if backup_id != group.backup_id() { continue; }
}
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all && owner != userid {
continue;
}
let mut size = None;
let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
match get_all_snapshot_files(&datastore, &info) {
Ok((manifest, files)) => {
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
// extract the first line from notes
let comment: Option<String> = manifest.unprotected["notes"]
.as_str()
.and_then(|notes| notes.lines().next())
.map(String::from);
let verify = manifest.unprotected["verify_state"].clone();
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
let fingerprint = match manifest.fingerprint() {
Ok(fp) => fp,
Err(err) => {
eprintln!("error parsing fingerprint: '{}'", err);
None
},
};
let verification = manifest.unprotected["verify_state"].clone();
let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
Ok(verify) => verify,
Err(err) => {
eprintln!("error parsing verification state : '{}'", err);
@ -386,72 +420,114 @@ pub fn list_snapshots (
}
};
(comment, verify, files)
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
SnapshotListItem {
backup_type,
backup_id,
backup_time,
comment,
verification,
fingerprint,
files,
size,
owner,
}
},
Err(err) => {
eprintln!("error during snapshot file listing: '{}'", err);
(
None,
None,
info
let files = info
.files
.iter()
.into_iter()
.map(|x| BackupContent {
filename: x.to_string(),
size: None,
crypt_mode: None,
})
.collect()
)
.collect();
SnapshotListItem {
backup_type,
backup_id,
backup_time,
comment: None,
verification: None,
fingerprint: None,
files,
size: None,
owner,
}
},
};
}
};
let result_item = SnapshotListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
backup_time: info.backup_dir.backup_time(),
comment,
verification,
files,
size,
owner: Some(owner),
};
groups
.iter()
.try_fold(Vec::new(), |mut snapshots, group| {
let owner = match datastore.get_owner(group) {
Ok(auth_id) => auth_id,
Err(err) => {
eprintln!("Failed to get owner of group '{}/{}' - {}",
&store,
group,
err);
return Ok(snapshots);
},
};
snapshots.push(result_item);
}
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
return Ok(snapshots);
}
Ok(snapshots)
let group_backups = group.list_backups(&datastore.base_path())?;
snapshots.extend(
group_backups
.into_iter()
.map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
);
Ok(snapshots)
})
}
// returns a map from type to (group_count, snapshot_count)
fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize)>, Error> {
fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
let base_path = store.base_path();
let backup_list = BackupInfo::list_backups(&base_path)?;
let mut groups = HashSet::new();
let mut result: HashMap<String, (usize, usize)> = HashMap::new();
for info in backup_list {
let group = info.backup_dir.group();
let groups = BackupInfo::list_backup_groups(&base_path)?;
let id = group.backup_id();
let backup_type = group.backup_type();
groups.iter()
.filter(|group| {
let owner = match store.get_owner(&group) {
Ok(owner) => owner,
Err(err) => {
eprintln!("Failed to get owner of group '{}/{}' - {}",
store.name(),
group,
err);
return false;
},
};
let mut new_id = false;
if groups.insert(format!("{}-{}", &backup_type, &id)) {
new_id = true;
}
if let Some(mut counts) = result.get_mut(backup_type) {
counts.1 += 1;
if new_id {
counts.0 +=1;
match filter_owner {
Some(filter) => check_backup_owner(&owner, filter).is_ok(),
None => true,
}
} else {
result.insert(backup_type.to_string(), (1, 1));
}
}
})
.try_fold(Counts::default(), |mut counts, group| {
let snapshot_count = group.list_backups(&base_path)?.len() as u64;
Ok(result)
let type_count = match group.backup_type() {
"ct" => counts.ct.get_or_insert(Default::default()),
"vm" => counts.vm.get_or_insert(Default::default()),
"host" => counts.host.get_or_insert(Default::default()),
_ => counts.other.get_or_insert(Default::default()),
};
type_count.groups += 1;
type_count.snapshots += snapshot_count;
Ok(counts)
})
}
#[api(
@ -460,24 +536,17 @@ fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize
store: {
schema: DATASTORE_SCHEMA,
},
verbose: {
type: bool,
default: false,
optional: true,
description: "Include additional information like snapshot counts and GC status.",
},
},
},
returns: {
description: "The overall Datastore status and information.",
type: Object,
properties: {
storage: {
type: StorageStatus,
},
counts: {
description: "Group and Snapshot counts per Type",
type: Object,
properties: { },
},
"gc-status": {
type: GarbageCollectionStatus,
},
},
type: DataStoreStatus,
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
@ -486,21 +555,38 @@ fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize
/// Get datastore status.
pub fn status(
store: String,
verbose: bool,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
rpcenv: &mut dyn RpcEnvironment,
) -> Result<DataStoreStatus, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let storage_status = crate::tools::disks::disk_usage(&datastore.base_path())?;
let counts = get_snaphots_count(&datastore)?;
let gc_status = datastore.last_gc_status();
let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
let (counts, gc_status) = if verbose {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let res = json!({
"storage": storage_status,
"counts": counts,
"gc-status": gc_status,
});
let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
None
} else {
Some(&auth_id)
};
Ok(res)
let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
let gc_status = Some(datastore.last_gc_status());
(counts, gc_status)
} else {
(None, None)
};
Ok(DataStoreStatus {
total: storage.total,
used: storage.used,
avail: storage.avail,
gc_status,
counts,
})
}
#[api(
@ -527,7 +613,7 @@ pub fn status(
schema: UPID_SCHEMA,
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Verify backups.
@ -543,6 +629,7 @@ pub fn verify(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let worker_id;
let mut backup_dir = None;
@ -553,12 +640,18 @@ pub fn verify(
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_dir = Some(dir);
worker_type = "verify_snapshot";
}
(Some(backup_type), Some(backup_id), None) => {
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
let group = BackupGroup::new(backup_type, backup_id);
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_group = Some(group);
worker_type = "verify_group";
}
@ -568,18 +661,16 @@ pub fn verify(
_ => bail!("parameters do not specify a backup group or snapshot"),
}
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
userid,
auth_id.clone(),
to_stdout,
move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
let filter = |_backup_info: &BackupInfo| { true };
let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new();
@ -590,6 +681,7 @@ pub fn verify(
corrupt_chunks,
worker.clone(),
worker.upid().clone(),
None,
)? {
res.push(backup_dir.to_string());
}
@ -603,14 +695,23 @@ pub fn verify(
None,
worker.clone(),
worker.upid(),
&filter,
None,
)?;
failed_dirs
} else {
verify_all_backups(datastore, worker.clone(), worker.upid(), &filter)?
let privs = CachedUserInfo::new()?
.lookup_privs(&auth_id, &["datastore", &store]);
let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
Some(auth_id)
} else {
None
};
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
};
if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:");
worker.log("Failed to verify the following snapshots/groups:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
}
@ -703,9 +804,7 @@ fn prune(
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
@ -713,8 +812,7 @@ fn prune(
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
@ -756,7 +854,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
@ -831,21 +929,15 @@ fn start_garbage_collection(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
println!("Starting garbage collection on store {}", store);
let job = Job::new("garbage_collection", &store)
.map_err(|_| format_err!("garbage collection already running"))?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread(
"garbage_collection",
Some(store.clone()),
Userid::root_userid().clone(),
to_stdout,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store));
datastore.garbage_collection(&*worker, worker.upid())
},
)?;
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
Ok(json!(upid_str))
}
@ -885,15 +977,7 @@ pub fn garbage_collection_status(
type: Array,
items: {
description: "Datastore name and description.",
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
type: DataStoreListItem,
},
},
access: {
@ -905,24 +989,25 @@ fn get_datastore_list(
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
) -> Result<Vec<DataStoreListItem>, Error> {
let (config, _digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
let mut entry = json!({ "store": store });
if let Some(comment) = data["comment"].as_str() {
entry["comment"] = comment.into();
}
list.push(entry);
list.push(
DataStoreListItem {
store: store.clone(),
comment: data["comment"].as_str().map(String::from),
}
);
}
}
@ -960,9 +1045,7 @@ fn download_file(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -972,8 +1055,7 @@ fn download_file(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
@ -1033,9 +1115,7 @@ fn download_file_decoded(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -1045,8 +1125,7 @@ fn download_file_decoded(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
@ -1158,8 +1237,9 @@ fn upload_backup_log(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let owner = datastore.get_owner(backup_dir.group())?;
check_backup_owner(&owner, &auth_id)?;
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
@ -1228,14 +1308,11 @@ fn catalog(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
let file_name = CATALOG_NAME;
@ -1399,9 +1476,7 @@ fn pxar_file_download(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
@ -1411,8 +1486,7 @@ fn pxar_file_download(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
@ -1565,7 +1639,7 @@ fn get_rrd_stats(
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Get "notes" for a specific backup
@ -1578,14 +1652,10 @@ fn get_notes(
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
@ -1617,7 +1687,9 @@ fn get_notes(
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
permission: &Permission::Privilege(&["datastore", "{store}"],
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
true),
},
)]
/// Set "notes" for a specific backup
@ -1631,14 +1703,10 @@ fn set_notes(
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
datastore.update_manifest(&backup_dir,|manifest| {
manifest.unprotected["notes"] = notes.into();
@ -1660,12 +1728,13 @@ fn set_notes(
schema: BACKUP_ID_SCHEMA,
},
"new-owner": {
type: Userid,
type: Authid,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
permission: &Permission::Anybody,
description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
},
)]
/// Change owner of a backup group
@ -1673,18 +1742,69 @@ fn set_backup_owner(
store: String,
backup_type: String,
backup_id: String,
new_owner: Userid,
_rpcenv: &mut dyn RpcEnvironment,
new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let backup_group = BackupGroup::new(backup_type, backup_id);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&new_owner) {
bail!("user '{}' is inactive or non-existent", new_owner);
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
// High-privilege user/token
true
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
let owner = datastore.get_owner(&backup_group)?;
match (owner.is_token(), new_owner.is_token()) {
(true, true) => {
// API token to API token, owned by same user
let owner = owner.user();
let new_owner = new_owner.user();
owner == new_owner && Authid::from(owner.clone()) == auth_id
},
(true, false) => {
// API token to API token owner
Authid::from(owner.user().clone()) == auth_id
&& new_owner == auth_id
},
(false, true) => {
// API token owner to API token
owner == auth_id
&& Authid::from(new_owner.user().clone()) == auth_id
},
(false, false) => {
// User to User, not allowed for unprivileged users
false
},
}
} else {
false
};
if !allowed {
return Err(http_err!(UNAUTHORIZED,
"{} does not have permission to change owner of backup group '{}' to {}",
auth_id,
backup_group,
new_owner,
));
}
if !user_info.is_active_auth_id(&new_owner) {
bail!("{} '{}' is inactive or non-existent",
if new_owner.is_token() {
"API token".to_string()
} else {
"user".to_string()
},
new_owner);
}
datastore.set_owner(&backup_group, &new_owner, true)?;

View File

@ -1,12 +1,15 @@
use anyhow::{format_err, Error};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::api2::pull::do_sync_job;
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::UPID;
use crate::server::jobstate::{Job, JobState};
@ -27,6 +30,10 @@ use crate::tools::systemd::time::{
type: Array,
items: { type: sync::SyncJobStatus },
},
access: {
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
permission: &Permission::Anybody,
},
)]
/// List all sync jobs
pub fn list_sync_jobs(
@ -35,6 +42,9 @@ pub fn list_sync_jobs(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = sync::config()?;
let mut list: Vec<SyncJobStatus> = config
@ -46,6 +56,10 @@ pub fn list_sync_jobs(
} else {
true
}
})
.filter(|job: &SyncJobStatus| {
let as_config: SyncJobConfig = job.clone().into();
check_sync_job_read_access(&user_info, &auth_id, &as_config)
}).collect();
for job in &mut list {
@ -89,7 +103,11 @@ pub fn list_sync_jobs(
schema: JOB_ID_SCHEMA,
}
}
}
},
access: {
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
permission: &Permission::Anybody,
},
)]
/// Runs the sync jobs manually.
fn run_sync_job(
@ -97,15 +115,19 @@ fn run_sync_job(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
let job = Job::new("syncjob", &id)?;
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -2,11 +2,16 @@ use anyhow::{format_err, Error};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use crate::api2::types::*;
use crate::server::do_verification_job;
use crate::server::jobstate::{Job, JobState};
use crate::config::acl::{
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_VERIFY,
};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::verify;
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
use serde_json::Value;
@ -23,10 +28,14 @@ use crate::server::UPID;
},
},
returns: {
description: "List configured jobs and their status.",
description: "List configured jobs and their status (filtered by access)",
type: Array,
items: { type: verify::VerificationJobStatus },
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on datastore.",
},
)]
/// List all verification jobs
pub fn list_verification_jobs(
@ -34,6 +43,10 @@ pub fn list_verification_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<VerificationJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
let (config, digest) = verify::config()?;
@ -41,6 +54,11 @@ pub fn list_verification_jobs(
.convert_to_typed_array("verification")?
.into_iter()
.filter(|job: &VerificationJobStatus| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
if privs & required_privs == 0 {
return false;
}
if let Some(store) = &store {
&job.store == store
} else {
@ -90,7 +108,11 @@ pub fn list_verification_jobs(
schema: JOB_ID_SCHEMA,
}
}
}
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Runs a verification job manually.
fn run_verification_job(
@ -98,14 +120,17 @@ fn run_verification_job(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?;
let job = Job::new("verificationjob", &id)?;
let upid_str = do_verification_job(job, verification_job, &userid, None)?;
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -59,12 +59,12 @@ async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
@ -105,12 +105,15 @@ async move {
};
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
if owner != userid && worker_type != "benchmark" {
let correct_owner = owner == auth_id
|| (owner.is_token()
&& Authid::from(owner.user().clone()) == auth_id);
if !correct_owner && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", userid, owner);
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
@ -153,9 +156,9 @@ async move {
if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
env_type, userid, worker.clone(), datastore, backup_dir);
env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug;
env.last_backup = last_backup;
@ -308,6 +311,10 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
"previous", &Router::new()
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
),
(
"previous_backup_time", &Router::new()
.get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
),
(
"speedtest", &Router::new()
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
@ -691,6 +698,28 @@ fn finish_backup (
Ok(Value::Null)
}
#[sortable]
pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&get_previous_backup_time),
&ObjectSchema::new(
"Get previous backup time.",
&[],
)
);
fn get_previous_backup_time(
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref();
let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
Ok(json!(backup_time))
}
#[sortable]
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&download_previous),

View File

@ -10,7 +10,7 @@ use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::api2::types::Userid;
use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*;
@ -104,7 +104,7 @@ impl SharedBackupState {
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
user: Userid,
auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@ -117,7 +117,7 @@ pub struct BackupEnvironment {
impl BackupEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
user: Userid,
auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@ -137,7 +137,7 @@ impl BackupEnvironment {
Self {
result_attributes: json!({}),
env_type,
user,
auth_id,
worker,
datastore,
debug: false,
@ -518,7 +518,7 @@ impl BackupEnvironment {
WorkerTask::new_thread(
"verify",
Some(worker_id),
self.user.clone(),
self.auth_id.clone(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
@ -533,6 +533,7 @@ impl BackupEnvironment {
corrupt_chunks,
worker.clone(),
worker.upid().clone(),
None,
snap_lock,
)? {
bail!("verification failed - please check the log for details");
@ -598,12 +599,12 @@ impl RpcEnvironment for BackupEnvironment {
self.env_type
}
fn set_user(&mut self, _user: Option<String>) {
panic!("unable to change user");
fn set_auth_id(&mut self, _auth_id: Option<String>) {
panic!("unable to change auth_id");
}
fn get_user(&self) -> Option<String> {
Some(self.user.to_string())
fn get_auth_id(&self) -> Option<String> {
Some(self.auth_id.to_string())
}
}

View File

@ -5,6 +5,7 @@ use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::api::schema::parse_property_string;
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
@ -35,14 +36,14 @@ pub fn list_datastores(
let (config, digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};
@ -68,6 +69,14 @@ pub fn list_datastores(
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -187,6 +196,12 @@ pub enum DeletableProperty {
keep_monthly,
/// Delete the keep-yearly property
keep_yearly,
/// Delete the verify-new property
verify_new,
/// Delete the notify-user property
notify_user,
/// Delete the notify property
notify,
}
#[api(
@ -200,6 +215,14 @@ pub enum DeletableProperty {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -232,6 +255,12 @@ pub enum DeletableProperty {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
"verify-new": {
description: "If enabled, all new backups will be verified right after completion.",
type: bool,
optional: true,
default: false,
},
delete: {
description: "List of properties to delete.",
type: Array,
@ -262,6 +291,9 @@ pub fn update_datastore(
keep_weekly: Option<u64>,
keep_monthly: Option<u64>,
keep_yearly: Option<u64>,
verify_new: Option<bool>,
notify: Option<String>,
notify_user: Option<Userid>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -290,6 +322,9 @@ pub fn update_datastore(
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
DeletableProperty::verify_new => { data.verify_new = None; },
DeletableProperty::notify => { data.notify = None; },
DeletableProperty::notify_user => { data.notify_user = None; },
}
}
}
@ -322,6 +357,19 @@ pub fn update_datastore(
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
if let Some(notify_str) = notify {
let value = parse_property_string(&notify_str, &DatastoreNotify::API_SCHEMA)?;
let notify: DatastoreNotify = serde_json::from_value(value)?;
if let DatastoreNotify { gc: None, verify: None, sync: None } = notify {
data.notify = None;
} else {
data.notify = Some(notify_str);
}
}
if verify_new.is_some() { data.verify_new = verify_new; }
if notify_user.is_some() { data.notify_user = notify_user; }
config.set_data(&name, "datastore", &data)?;
datastore::save_config(&config)?;

View File

@ -1,11 +1,14 @@
use anyhow::{bail, Error};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::http_err;
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::client::{HttpClient, HttpClientOptions};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::remote;
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
@ -22,7 +25,8 @@ use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
},
},
access: {
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
description: "List configured remotes filtered by Remote.Audit privileges",
permission: &Permission::Anybody,
},
)]
/// List all remotes
@ -31,16 +35,25 @@ pub fn list_remotes(
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<remote::Remote>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = remote::config()?;
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
// don't return password in api
for remote in &mut list {
remote.password = "".to_string();
}
let list = list
.into_iter()
.filter(|remote| {
let privs = user_info.lookup_privs(&auth_id, &["remote", &remote.name]);
privs & PRIV_REMOTE_AUDIT != 0
})
.collect();
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
@ -65,8 +78,8 @@ pub fn list_remotes(
optional: true,
default: 8007,
},
userid: {
type: Userid,
"auth-id": {
type: Authid,
},
password: {
schema: remote::REMOTE_PASSWORD_SCHEMA,
@ -165,9 +178,9 @@ pub enum DeletableProperty {
type: u16,
optional: true,
},
userid: {
"auth-id": {
optional: true,
type: Userid,
type: Authid,
},
password: {
optional: true,
@ -201,7 +214,7 @@ pub fn update_remote(
comment: Option<String>,
host: Option<String>,
port: Option<u16>,
userid: Option<Userid>,
auth_id: Option<Authid>,
password: Option<String>,
fingerprint: Option<String>,
delete: Option<Vec<DeletableProperty>>,
@ -239,7 +252,7 @@ pub fn update_remote(
}
if let Some(host) = host { data.host = host; }
if port.is_some() { data.port = port; }
if let Some(userid) = userid { data.userid = userid; }
if let Some(auth_id) = auth_id { data.auth_id = auth_id; }
if let Some(password) = password { data.password = password; }
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
@ -271,6 +284,17 @@ pub fn update_remote(
/// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
use crate::config::sync::{self, SyncJobConfig};
let (sync_jobs, _) = sync::config()?;
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
if job.remote == name {
bail!("remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
}
}
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = remote::config()?;
@ -290,10 +314,83 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
Ok(())
}
/// Helper to get client for remote.cfg entry
pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error> {
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let client = HttpClient::new(
&remote.host,
remote.port.unwrap_or(8007),
&remote.auth_id,
options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
Ok(client)
}
#[api(
input: {
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
},
returns: {
description: "List the accessible datastores.",
type: Array,
items: {
description: "Datastore name and description.",
type: DataStoreListItem,
},
},
)]
/// List datastores of a remote.cfg entry
pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListItem>, Error> {
let (remote_config, _digest) = remote::config()?;
let remote: remote::Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err)
};
let client = remote_client(remote)
.await
.map_err(map_remote_err)?;
let api_res = client
.get("api2/json/admin/datastore", None)
.await
.map_err(map_remote_err)?;
let parse_res = match api_res.get("data") {
Some(data) => serde_json::from_value::<Vec<DataStoreListItem>>(data.to_owned()),
None => bail!("remote {} did not return any datastore list data", &name),
};
match parse_res {
Ok(parsed) => Ok(parsed),
Err(_) => bail!("Failed to parse remote scan api result."),
}
}
const SCAN_ROUTER: Router = Router::new()
.get(&API_METHOD_SCAN_REMOTE_DATASTORES);
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_REMOTE)
.put(&API_METHOD_UPDATE_REMOTE)
.delete(&API_METHOD_DELETE_REMOTE);
.delete(&API_METHOD_DELETE_REMOTE)
.subdirs(&[("scan", &SCAN_ROUTER)]);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_REMOTES)

View File

@ -2,13 +2,73 @@ use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment};
use proxmox::api::{api, Permission, Router, RpcEnvironment};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::acl::{
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
PRIV_REMOTE_AUDIT,
PRIV_REMOTE_READ,
};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::sync::{self, SyncJobConfig};
// fixme: add access permissions
pub fn check_sync_job_read_access(
user_info: &CachedUserInfo,
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]);
remote_privs & PRIV_REMOTE_AUDIT != 0
}
// user can run the corresponding pull job
pub fn check_sync_job_modify_access(
user_info: &CachedUserInfo,
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
return false;
}
if let Some(true) = job.remove_vanished {
if datastore_privs & PRIV_DATASTORE_PRUNE == 0 {
return false;
}
}
let correct_owner = match job.owner {
Some(ref owner) => {
owner == auth_id
|| (owner.is_token()
&& !auth_id.is_token()
&& owner.user() == auth_id.user())
},
// default sync owner
None => auth_id == Authid::root_auth_id(),
};
// same permission as changing ownership after syncing
if !correct_owner && datastore_privs & PRIV_DATASTORE_MODIFY == 0 {
return false;
}
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]);
remote_privs & PRIV_REMOTE_READ != 0
}
#[api(
input: {
@ -19,12 +79,18 @@ use crate::config::sync::{self, SyncJobConfig};
type: Array,
items: { type: sync::SyncJobConfig },
},
access: {
description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
permission: &Permission::Anybody,
},
)]
/// List all sync jobs
pub fn list_sync_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = sync::config()?;
@ -32,7 +98,11 @@ pub fn list_sync_jobs(
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
let list = list
.into_iter()
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job))
.collect();
Ok(list)
}
#[api(
@ -45,6 +115,10 @@ pub fn list_sync_jobs(
store: {
schema: DATASTORE_SCHEMA,
},
owner: {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
},
@ -65,13 +139,25 @@ pub fn list_sync_jobs(
},
},
},
access: {
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
permission: &Permission::Anybody,
},
)]
/// Create a new sync job.
pub fn create_sync_job(param: Value) -> Result<(), Error> {
pub fn create_sync_job(
param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
let (mut config, _digest) = sync::config()?;
@ -100,15 +186,26 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
description: "The sync job configuration.",
type: sync::SyncJobConfig,
},
access: {
description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
permission: &Permission::Anybody,
},
)]
/// Read a sync job configuration.
pub fn read_sync_job(
id: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<SyncJobConfig, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = sync::config()?;
let sync_job = config.lookup("sync", &id)?;
if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(sync_job)
@ -120,6 +217,8 @@ pub fn read_sync_job(
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the owner property.
owner,
/// Delete the comment property.
comment,
/// Delete the job schedule.
@ -139,6 +238,10 @@ pub enum DeletableProperty {
schema: DATASTORE_SCHEMA,
optional: true,
},
owner: {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
@ -173,11 +276,16 @@ pub enum DeletableProperty {
},
},
},
access: {
permission: &Permission::Anybody,
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
},
)]
/// Update sync job config.
pub fn update_sync_job(
id: String,
store: Option<String>,
owner: Option<Authid>,
remote: Option<String>,
remote_store: Option<String>,
remove_vanished: Option<bool>,
@ -185,7 +293,10 @@ pub fn update_sync_job(
schedule: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -202,6 +313,7 @@ pub fn update_sync_job(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::owner => { data.owner = None; },
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::schedule => { data.schedule = None; },
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
@ -221,11 +333,15 @@ pub fn update_sync_job(
if let Some(store) = store { data.store = store; }
if let Some(remote) = remote { data.remote = remote; }
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
if let Some(owner) = owner { data.owner = Some(owner); }
if schedule.is_some() { data.schedule = schedule; }
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
bail!("permission check failed");
}
config.set_data(&id, "sync", &data)?;
sync::save_config(&config)?;
@ -246,9 +362,19 @@ pub fn update_sync_job(
},
},
},
access: {
permission: &Permission::Anybody,
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
},
)]
/// Remove a sync job configuration
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
pub fn delete_sync_job(
id: String,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -259,10 +385,15 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.sections.get(&id) {
Some(_) => { config.sections.remove(&id); },
None => bail!("job '{}' does not exist.", id),
}
match config.lookup("sync", &id) {
Ok(job) => {
if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
bail!("permission check failed");
}
config.sections.remove(&id);
},
Err(_) => { bail!("job '{}' does not exist.", id) },
};
sync::save_config(&config)?;
@ -280,3 +411,116 @@ pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_SYNC_JOBS)
.post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER);
#[test]
fn sync_job_access_test() -> Result<(), Error> {
let (user_cfg, _) = crate::config::user::test_cfg_from_str(r###"
user: noperm@pbs
user: read@pbs
user: write@pbs
"###).expect("test user.cfg is not parsable");
let acl_tree = crate::config::acl::AclTree::from_raw(r###"
acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
"###).expect("test acl.cfg is not parsable");
let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
let root_auth_id = Authid::root_auth_id();
let no_perm_auth_id: Authid = "noperm@pbs".parse()?;
let read_auth_id: Authid = "read@pbs".parse()?;
let write_auth_id: Authid = "write@pbs".parse()?;
let mut job = SyncJobConfig {
id: "regular".to_string(),
remote: "remote0".to_string(),
remote_store: "remotestore1".to_string(),
store: "localstore0".to_string(),
owner: Some(write_auth_id.clone()),
comment: None,
remove_vanished: None,
schedule: None,
};
// should work without ACLs
assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true);
assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true);
// user without permissions must fail
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false);
// reading without proper read permissions on either remote or local must fail
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
// reading without proper read permissions on local end must fail
job.remote = "remote1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
// reading without proper read permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
// writing without proper write permissions on either end must fail
job.store = "localstore0".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
// writing without proper write permissions on local end must fail
job.remote = "remote1".to_string();
// writing without proper write permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
// reset remote to one where users have access
job.remote = "remote1".to_string();
// user with read permission can only read, but not modify/run
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true);
job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
job.owner = Some(write_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
// user with simple write permission can modify/run
assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
// but can't modify/run with deletion
job.remove_vanished = Some(true);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
// unless they have Datastore.Prune as well
job.store = "localstore2".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
// changing owner is not possible
job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
// also not to the default 'root@pam'
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
// unless they have Datastore.Modify as well
job.store = "localstore3".to_string();
job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
Ok(())
}

View File

@ -2,10 +2,18 @@ use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment};
use proxmox::api::{api, Permission, Router, RpcEnvironment};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::acl::{
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_VERIFY,
};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::verify::{self, VerificationJobConfig};
#[api(
@ -17,17 +25,32 @@ use crate::config::verify::{self, VerificationJobConfig};
type: Array,
items: { type: verify::VerificationJobConfig },
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on datastore.",
},
)]
/// List all verification jobs
pub fn list_verification_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<VerificationJobConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
let (config, digest) = verify::config()?;
let list = config.convert_to_typed_array("verification")?;
let list = list.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
privs & required_privs != 00
}).collect();
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
@ -61,15 +84,26 @@ pub fn list_verification_jobs(
schema: VERIFICATION_SCHEDULE_SCHEMA,
},
}
}
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Create a new verification job.
pub fn create_verification_job(param: Value) -> Result<(), Error> {
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
pub fn create_verification_job(
param: Value,
rpcenv: &mut dyn RpcEnvironment
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, _digest) = verify::config()?;
if let Some(_) = config.sections.get(&verification_job.id) {
@ -97,15 +131,26 @@ pub fn create_verification_job(param: Value) -> Result<(), Error> {
description: "The verification job configuration.",
type: verify::VerificationJobConfig,
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
},
)]
/// Read a verification job configuration.
pub fn read_verification_job(
id: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<VerificationJobConfig, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = verify::config()?;
let verification_job = config.lookup("verification", &id)?;
let verification_job: verify::VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(verification_job)
@ -167,6 +212,10 @@ pub enum DeletableProperty {
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Update verification job config.
pub fn update_verification_job(
@ -178,7 +227,10 @@ pub fn update_verification_job(
schedule: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -192,7 +244,10 @@ pub fn update_verification_job(
let mut data: verify::VerificationJobConfig = config.lookup("verification", &id)?;
if let Some(delete) = delete {
// check existing store
user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
@ -212,7 +267,12 @@ pub fn update_verification_job(
}
}
if let Some(store) = store { data.store = store; }
if let Some(store) = store {
// check new store
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_VERIFY, true)?;
data.store = store;
}
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
if outdated_after.is_some() { data.outdated_after = outdated_after; }
@ -238,14 +298,27 @@ pub fn update_verification_job(
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Remove a verification job configuration
pub fn delete_verification_job(id: String, digest: Option<String>) -> Result<(), Error> {
pub fn delete_verification_job(
id: String,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = verify::config()?;
let job: verify::VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;

View File

@ -24,20 +24,21 @@ use crate::server::WorkerTask;
use crate::tools;
use crate::tools::ticket::{self, Empty, Ticket};
pub mod apt;
pub mod disks;
pub mod dns;
pub mod network;
pub mod tasks;
pub mod subscription;
pub(crate) mod rrd;
mod apt;
mod journal;
mod services;
pub(crate) mod services;
mod status;
mod subscription;
mod syslog;
mod time;
mod report;
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
.format(&ApiStringFormat::Enum(&[
@ -91,10 +92,12 @@ async fn termproxy(
cmd: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
// intentionally user only for now
let userid: Userid = rpcenv
.get_user()
.get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
let auth_id = Authid::from(userid.clone());
if userid.realm() != "pam" {
bail!("only pam users can use the console");
@ -137,7 +140,7 @@ async fn termproxy(
let upid = WorkerTask::spawn(
"termproxy",
None,
userid,
auth_id,
false,
move |worker| async move {
// move inside the worker so that it survives and does not close the port
@ -272,7 +275,8 @@ fn upgrade_to_websocket(
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
// intentionally user only for now
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let ticket = tools::required_string_param(&param, "vncticket")?;
let port: u16 = tools::required_integer_param(&param, "port")? as u16;
@ -307,6 +311,7 @@ pub const SUBDIRS: SubdirMap = &[
("dns", &dns::ROUTER),
("journal", &journal::ROUTER),
("network", &network::ROUTER),
("report", &report::ROUTER),
("rrd", &rrd::ROUTER),
("services", &services::ROUTER),
("status", &status::ROUTER),

View File

@ -1,301 +1,16 @@
use std::collections::HashSet;
use apt_pkg_native::Cache;
use anyhow::{Error, bail, format_err};
use serde_json::{json, Value};
use std::collections::HashMap;
use proxmox::{list_subdirs_api_method, const_regex};
use proxmox::list_subdirs_api_method;
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
use proxmox::api::router::{Router, SubdirMap};
use crate::server::WorkerTask;
use crate::tools::http;
use crate::tools::{apt, http, subscription};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
const_regex! {
VERSION_EPOCH_REGEX = r"^\d+:";
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
}
// FIXME: once the 'changelog' API call switches over to 'apt-get changelog' only,
// consider removing this function entirely, as it's value is never used anywhere
// then (widget-toolkit doesn't use the value either)
fn get_changelog_url(
package: &str,
filename: &str,
version: &str,
origin: &str,
component: &str,
) -> Result<String, Error> {
if origin == "" {
bail!("no origin available for package {}", package);
}
if origin == "Debian" {
let mut command = std::process::Command::new("apt-get");
command.arg("changelog");
command.arg("--print-uris");
command.arg(package);
let output = crate::tools::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
let output = match output.splitn(2, ' ').next() {
Some(output) => {
if output.len() < 2 {
bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
}
output[1..output.len()-1].to_owned()
},
None => bail!("invalid output from 'apt-get changelog --print-uris': {}", output)
};
return Ok(output);
} else if origin == "Proxmox" {
// FIXME: Use above call to 'apt changelog <pkg> --print-uris' as well.
// Currently not possible as our packages do not have a URI set in their Release file.
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
Some(captures) => {
let base_capture = captures.get(1);
match base_capture {
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
None => bail!("incompatible filename, cannot find regex group")
}
},
None => bail!("incompatible filename, doesn't match regex")
};
return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
base, package, version));
}
bail!("unknown origin ({}) or component ({})", origin, component)
}
struct FilterData<'a> {
// this is version info returned by APT
installed_version: Option<&'a str>,
candidate_version: &'a str,
// this is the version info the filter is supposed to check
active_version: &'a str,
}
enum PackagePreSelect {
OnlyInstalled,
OnlyNew,
All,
}
fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
filter: F,
only_versions_for: Option<&str>,
) -> Vec<APTUpdateInfo> {
let mut ret = Vec::new();
let mut depends = HashSet::new();
// note: this is not an 'apt update', it just re-reads the cache from disk
let mut cache = Cache::get_singleton();
cache.reload();
let mut cache_iter = match only_versions_for {
Some(name) => cache.find_by_name(name),
None => cache.iter()
};
loop {
match cache_iter.next() {
Some(view) => {
let di = if only_versions_for.is_some() {
query_detailed_info(
PackagePreSelect::All,
&filter,
view,
None
)
} else {
query_detailed_info(
PackagePreSelect::OnlyInstalled,
&filter,
view,
Some(&mut depends)
)
};
if let Some(info) = di {
ret.push(info);
}
if only_versions_for.is_some() {
break;
}
},
None => {
drop(cache_iter);
// also loop through missing dependencies, as they would be installed
for pkg in depends.iter() {
let mut iter = cache.find_by_name(&pkg);
let view = match iter.next() {
Some(view) => view,
None => continue // package not found, ignore
};
let di = query_detailed_info(
PackagePreSelect::OnlyNew,
&filter,
view,
None
);
if let Some(info) = di {
ret.push(info);
}
}
break;
}
}
}
return ret;
}
fn query_detailed_info<'a, F, V>(
pre_select: PackagePreSelect,
filter: F,
view: V,
depends: Option<&mut HashSet<String>>,
) -> Option<APTUpdateInfo>
where
F: Fn(FilterData) -> bool,
V: std::ops::Deref<Target = apt_pkg_native::sane::PkgView<'a>>
{
let current_version = view.current_version();
let candidate_version = view.candidate_version();
let (current_version, candidate_version) = match pre_select {
PackagePreSelect::OnlyInstalled => match (current_version, candidate_version) {
(Some(cur), Some(can)) => (Some(cur), can), // package installed and there is an update
(Some(cur), None) => (Some(cur.clone()), cur), // package installed and up-to-date
(None, Some(_)) => return None, // package could be installed
(None, None) => return None, // broken
},
PackagePreSelect::OnlyNew => match (current_version, candidate_version) {
(Some(_), Some(_)) => return None,
(Some(_), None) => return None,
(None, Some(can)) => (None, can),
(None, None) => return None,
},
PackagePreSelect::All => match (current_version, candidate_version) {
(Some(cur), Some(can)) => (Some(cur), can),
(Some(cur), None) => (Some(cur.clone()), cur),
(None, Some(can)) => (None, can),
(None, None) => return None,
},
};
// get additional information via nested APT 'iterators'
let mut view_iter = view.versions();
while let Some(ver) = view_iter.next() {
let package = view.name();
let version = ver.version();
let mut origin_res = "unknown".to_owned();
let mut section_res = "unknown".to_owned();
let mut priority_res = "unknown".to_owned();
let mut change_log_url = "".to_owned();
let mut short_desc = package.clone();
let mut long_desc = "".to_owned();
let fd = FilterData {
installed_version: current_version.as_deref(),
candidate_version: &candidate_version,
active_version: &version,
};
if filter(fd) {
if let Some(section) = ver.section() {
section_res = section;
}
if let Some(prio) = ver.priority_type() {
priority_res = prio;
}
// assume every package has only one origin file (not
// origin, but origin *file*, for some reason those seem to
// be different concepts in APT)
let mut origin_iter = ver.origin_iter();
let origin = origin_iter.next();
if let Some(origin) = origin {
if let Some(sd) = origin.short_desc() {
short_desc = sd;
}
if let Some(ld) = origin.long_desc() {
long_desc = ld;
}
// the package files appear in priority order, meaning
// the one for the candidate version is first - this is fine
// however, as the source package should be the same for all
// versions anyway
let mut pkg_iter = origin.file();
let pkg_file = pkg_iter.next();
if let Some(pkg_file) = pkg_file {
if let Some(origin_name) = pkg_file.origin() {
origin_res = origin_name;
}
let filename = pkg_file.file_name();
let component = pkg_file.component();
// build changelog URL from gathered information
// ignore errors, use empty changelog instead
let url = get_changelog_url(&package, &filename,
&version, &origin_res, &component);
if let Ok(url) = url {
change_log_url = url;
}
}
}
if let Some(depends) = depends {
let mut dep_iter = ver.dep_iter();
loop {
let dep = match dep_iter.next() {
Some(dep) if dep.dep_type() != "Depends" => continue,
Some(dep) => dep,
None => break
};
let dep_pkg = dep.target_pkg();
let name = dep_pkg.name();
depends.insert(name);
}
}
return Some(APTUpdateInfo {
package,
title: short_desc,
arch: view.arch(),
description: long_desc,
change_log_url,
origin: origin_res,
version: candidate_version.clone(),
old_version: match current_version {
Some(vers) => vers,
None => "".to_owned()
},
priority: priority_res,
section: section_res,
});
}
}
return None;
}
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
#[api(
input: {
@ -308,19 +23,60 @@ where
returns: {
description: "A list of packages with available updates.",
type: Array,
items: { type: APTUpdateInfo },
items: {
type: APTUpdateInfo
},
},
protected: true,
access: {
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
},
)]
/// List available APT updates
fn apt_update_available(_param: Value) -> Result<Value, Error> {
let all_upgradeable = list_installed_apt_packages(|data| {
data.candidate_version == data.active_version &&
data.installed_version != Some(data.candidate_version)
}, None);
Ok(json!(all_upgradeable))
match apt::pkg_cache_expired() {
Ok(false) => {
if let Ok(Some(cache)) = apt::read_pkg_state() {
return Ok(json!(cache.package_status));
}
},
_ => (),
}
let cache = apt::update_cache()?;
return Ok(json!(cache.package_status));
}
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
if !quiet { worker.log("starting apt-get update") }
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
let mut command = std::process::Command::new("apt-get");
command.arg("update");
// apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now.
let output = command.output()
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
if !quiet {
worker.log(String::from_utf8(output.stdout)?);
}
// TODO: improve run_command to allow outputting both, stderr and stdout
if !output.status.success() {
if output.status.code().is_some() {
let msg = String::from_utf8(output.stderr)
.map(|m| if m.is_empty() { String::from("no error message") } else { m })
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
worker.warn(msg);
} else {
bail!("terminated by signal");
}
}
Ok(())
}
#[api(
@ -330,6 +86,13 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
node: {
schema: NODE_SCHEMA,
},
notify: {
type: bool,
description: r#"Send notification mail about new package updates availanle to the
email address configured for 'root@pam')."#,
optional: true,
default: false,
},
quiet: {
description: "Only produces output suitable for logging, omitting progress indicators.",
type: bool,
@ -347,26 +110,46 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
)]
/// Update the APT database
pub fn apt_update_database(
notify: Option<bool>,
quiet: Option<bool>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
if !quiet { worker.log("starting apt-get update") }
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
do_apt_update(&worker, quiet)?;
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
let mut cache = apt::update_cache()?;
let mut command = std::process::Command::new("apt-get");
command.arg("update");
if notify {
let mut notified = match cache.notified {
Some(notified) => notified,
None => std::collections::HashMap::new(),
};
let mut to_notify: Vec<&APTUpdateInfo> = Vec::new();
let output = crate::tools::run_command(command, None)?;
if !quiet { worker.log(output) }
// TODO: add mail notify for new updates like PVE
for pkg in &cache.package_status {
match notified.insert(pkg.package.to_owned(), pkg.version.to_owned()) {
Some(notified_version) => {
if notified_version != pkg.version {
to_notify.push(pkg);
}
},
None => to_notify.push(pkg),
}
}
if !to_notify.is_empty() {
to_notify.sort_unstable_by_key(|k| &k.package);
crate::server::send_updates_available(&to_notify)?;
}
cache.notified = Some(notified);
apt::write_pkg_cache(&cache)?;
}
Ok(())
})?;
@ -406,7 +189,7 @@ fn apt_get_changelog(
let name = crate::tools::required_string_param(&param, "name")?.to_owned();
let version = param["version"].as_str();
let pkg_info = list_installed_apt_packages(|data| {
let pkg_info = apt::list_installed_apt_packages(|data| {
match version {
Some(version) => version == data.active_version,
None => data.active_version == data.candidate_version
@ -420,9 +203,34 @@ fn apt_get_changelog(
let changelog_url = &pkg_info[0].change_log_url;
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url))
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
return Ok(json!(changelog));
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
let sub = match subscription::read_subscription()? {
Some(sub) => sub,
None => bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
};
let (key, id) = match sub.key {
Some(key) => {
match sub.serverid {
Some(id) => (key, id),
None =>
bail!("cannot retrieve changelog from enterprise repo: no server id found")
}
},
None => bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
};
let mut auth_header = HashMap::new();
auth_header.insert("Authorization".to_owned(),
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
return Ok(json!(changelog));
} else {
let mut command = std::process::Command::new("apt-get");
command.arg("changelog");
@ -433,12 +241,128 @@ fn apt_get_changelog(
}
}
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
description: "List of more relevant packages.",
type: Array,
items: {
type: APTUpdateInfo,
},
},
access: {
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
},
)]
/// Get package information for important Proxmox Backup Server packages.
pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
const PACKAGES: &[&str] = &[
"ifupdown2",
"libjs-extjs",
"proxmox-backup",
"proxmox-backup-docs",
"proxmox-backup-client",
"proxmox-backup-server",
"proxmox-mini-journalreader",
"proxmox-widget-toolkit",
"pve-xtermjs",
"smartmontools",
"zfsutils-linux",
];
fn unknown_package(package: String, extra_info: Option<String>) -> APTUpdateInfo {
APTUpdateInfo {
package,
title: "unknown".into(),
arch: "unknown".into(),
description: "unknown".into(),
version: "unknown".into(),
old_version: "unknown".into(),
origin: "unknown".into(),
priority: "unknown".into(),
section: "unknown".into(),
change_log_url: "unknown".into(),
extra_info,
}
}
let is_kernel = |name: &str| name.starts_with("pve-kernel-");
let mut packages: Vec<APTUpdateInfo> = Vec::new();
let pbs_packages = apt::list_installed_apt_packages(
|filter| {
filter.installed_version == Some(filter.active_version)
&& (is_kernel(filter.package) || PACKAGES.contains(&filter.package))
},
None,
);
let running_kernel = format!(
"running kernel: {}",
nix::sys::utsname::uname().release().to_owned()
);
if let Some(proxmox_backup) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup") {
let mut proxmox_backup = proxmox_backup.clone();
proxmox_backup.extra_info = Some(running_kernel);
packages.push(proxmox_backup);
} else {
packages.push(unknown_package("proxmox-backup".into(), Some(running_kernel)));
}
let version = crate::api2::version::PROXMOX_PKG_VERSION;
let release = crate::api2::version::PROXMOX_PKG_RELEASE;
let daemon_version_info = Some(format!("running version: {}.{}", version, release));
if let Some(pkg) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup-server") {
let mut pkg = pkg.clone();
pkg.extra_info = daemon_version_info;
packages.push(pkg);
} else {
packages.push(unknown_package("proxmox-backup".into(), daemon_version_info));
}
let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages
.iter()
.filter(|pkg| is_kernel(&pkg.package))
.cloned()
.collect();
// make sure the cache mutex gets dropped before the next call to list_installed_apt_packages
{
let cache = apt_pkg_native::Cache::get_singleton();
kernel_pkgs.sort_by(|left, right| {
cache
.compare_versions(&left.old_version, &right.old_version)
.reverse()
});
}
packages.append(&mut kernel_pkgs);
// add entry for all packages we're interested in, even if not installed
for pkg in PACKAGES.iter() {
if pkg == &"proxmox-backup" || pkg == &"proxmox-backup-server" {
continue;
}
match pbs_packages.iter().find(|item| &item.package == pkg) {
Some(apt_pkg) => packages.push(apt_pkg.to_owned()),
None => packages.push(unknown_package(pkg.to_string(), None)),
}
}
Ok(packages)
}
const SUBDIRS: SubdirMap = &[
("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)),
("update", &Router::new()
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
.post(&API_METHOD_APT_UPDATE_DATABASE)
),
("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)),
];
pub const ROUTER: Router = Router::new()

View File

@ -13,7 +13,7 @@ use crate::tools::disks::{
};
use crate::server::WorkerTask;
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
pub mod directory;
pub mod zfs;
@ -140,7 +140,7 @@ pub fn initialize_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@ -149,7 +149,7 @@ pub fn initialize_disk(
}
let upid_str = WorkerTask::new_thread(
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
"diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("initialize disk {}", disk));

View File

@ -134,7 +134,7 @@ pub fn create_datastore_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@ -142,8 +142,20 @@ pub fn create_datastore_disk(
bail!("disk '{}' is already in use.", disk);
}
let mount_point = format!("/mnt/datastore/{}", &name);
// check if the default path does exist already and bail if it does
let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) {
Err(_) => {}, // path does not exist
Ok(_) => {
bail!("path {:?} already exists", default_path);
}
}
let upid_str = WorkerTask::new_thread(
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create datastore '{}' on disk {}", name, disk));
@ -160,7 +172,7 @@ pub fn create_datastore_disk(
let uuid = get_fs_uuid(&partition)?;
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
let mount_unit_name = create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
systemd::reload_daemon()?;
systemd::enable_unit(&mount_unit_name)?;
@ -243,11 +255,11 @@ pub const ROUTER: Router = Router::new()
fn create_datastore_mount_unit(
datastore_name: &str,
mount_point: &str,
fs_type: FileSystemType,
what: &str,
) -> Result<(String, String), Error> {
) -> Result<String, Error> {
let mount_point = format!("/mnt/datastore/{}", datastore_name);
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
mount_unit_name.push_str(".mount");
@ -265,7 +277,7 @@ fn create_datastore_mount_unit(
let mount = SystemdMountSection {
What: what.to_string(),
Where: mount_point.clone(),
Where: mount_point.to_string(),
Type: Some(fs_type.to_string()),
Options: Some(String::from("defaults")),
..Default::default()
@ -278,5 +290,5 @@ fn create_datastore_mount_unit(
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
Ok((mount_unit_name, mount_point))
Ok(mount_unit_name)
}

View File

@ -243,7 +243,7 @@ pub fn zpool_details(
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// Create a new ZFS pool.
/// Create a new ZFS pool. Will be mounted under '/mnt/datastore/<name>'.
pub fn create_zpool(
name: String,
devices: String,
@ -256,7 +256,7 @@ pub fn create_zpool(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let add_datastore = add_datastore.unwrap_or(false);
@ -303,10 +303,11 @@ pub fn create_zpool(
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
}
let mount_point = format!("/mnt/datastore/{}", &name);
// check if the default path does exist already and bail if it does
// otherwise we get an error on mounting
let mut default_path = std::path::PathBuf::from("/");
default_path.push(&name);
// otherwise 'zpool create' aborts after partitioning, but before creating the pool
let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) {
Err(_) => {}, // path does not exist
@ -316,13 +317,13 @@ pub fn create_zpool(
}
let upid_str = WorkerTask::new_thread(
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
let mut command = std::process::Command::new("zpool");
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]);
match raidlevel {
ZfsRaidLevel::Single => {
@ -371,7 +372,6 @@ pub fn create_zpool(
}
if add_datastore {
let mount_point = format!("/{}", name);
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
}

View File

@ -684,9 +684,9 @@ pub async fn reload_network_config(
network::assert_ifupdown2_installed()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);

35
src/api2/node/report.rs Normal file
View File

@ -0,0 +1,35 @@
use anyhow::Error;
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use serde_json::{json, Value};
use crate::api2::types::*;
use crate::config::acl::PRIV_SYS_AUDIT;
use crate::server::generate_report;
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
type: String,
description: "Returns report of the node"
},
access: {
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
},
)]
/// Generate a report
fn get_report(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
Ok(json!(generate_report()))
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_GET_REPORT);

View File

@ -22,7 +22,7 @@ static SERVICE_NAME_LIST: [&str; 7] = [
"systemd-timesyncd",
];
fn real_service_name(service: &str) -> &str {
pub fn real_service_name(service: &str) -> &str {
// since postfix package 3.1.0-3.1 the postfix unit is only here
// to manage subinstances, of which the default is called "-".
@ -182,7 +182,7 @@ fn get_service_state(
Ok(json_service_state(&service, status))
}
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
let workerid = format!("srv{}", &cmd);
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value
let upid = WorkerTask::new_thread(
&workerid,
Some(service.clone()),
userid,
auth_id,
false,
move |_worker| {
@ -244,11 +244,11 @@ fn start_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service);
run_service_command(&service, "start", userid)
run_service_command(&service, "start", auth_id)
}
#[api(
@ -274,11 +274,11 @@ fn stop_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service);
run_service_command(&service, "stop", userid)
run_service_command(&service, "stop", auth_id)
}
#[api(
@ -304,15 +304,15 @@ fn restart_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service);
if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks
run_service_command(&service, "reload", userid)
run_service_command(&service, "reload", auth_id)
} else {
run_service_command(&service, "restart", userid)
run_service_command(&service, "restart", auth_id)
}
}
@ -339,11 +339,11 @@ fn reload_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service);
run_service_command(&service, "reload", userid)
run_service_command(&service, "reload", auth_id)
}

View File

@ -7,7 +7,7 @@ use crate::tools;
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
use crate::api2::types::{NODE_SCHEMA, Userid};
use crate::api2::types::{NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid};
#[api(
input: {
@ -29,7 +29,7 @@ use crate::api2::types::{NODE_SCHEMA, Userid};
},
)]
/// Check and update subscription status.
fn check_subscription(
pub fn check_subscription(
force: bool,
) -> Result<(), Error> {
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
@ -82,7 +82,7 @@ fn check_subscription(
},
)]
/// Read subscription info.
fn get_subscription(
pub fn get_subscription(
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<SubscriptionInfo, Error> {
@ -100,9 +100,9 @@ fn get_subscription(
},
};
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &[]);
let user_privs = user_info.lookup_privs(&auth_id, &[]);
if (user_privs & PRIV_SYS_AUDIT) == 0 {
// not enough privileges for full state
@ -124,9 +124,7 @@ fn get_subscription(
schema: NODE_SCHEMA,
},
key: {
description: "Proxmox Backup Server subscription key",
type: String,
max_length: 32,
schema: SUBSCRIPTION_KEY_SCHEMA,
},
},
},
@ -136,7 +134,7 @@ fn get_subscription(
},
)]
/// Set a subscription key and check it.
fn set_subscription(
pub fn set_subscription(
key: String,
) -> Result<(), Error> {
@ -164,7 +162,7 @@ fn set_subscription(
},
)]
/// Delete subscription info.
fn delete_subscription() -> Result<(), Error> {
pub fn delete_subscription() -> Result<(), Error> {
subscription::delete_subscription()
.map_err(|err| format_err!("Deleting subscription failed: {}", err))?;

View File

@ -134,12 +134,18 @@ fn get_syslog(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let service = if let Some(service) = param["service"].as_str() {
Some(crate::api2::node::services::real_service_name(service))
} else {
None
};
let (count, lines) = dump_journal(
param["start"].as_u64(),
param["limit"].as_u64(),
param["since"].as_str(),
param["until"].as_str(),
param["service"].as_str())?;
service)?;
rpcenv["total"] = Value::from(count);

View File

@ -1,7 +1,7 @@
use std::fs::File;
use std::io::{BufRead, BufReader};
use anyhow::{Error};
use anyhow::{bail, Error};
use serde_json::{json, Value};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
@ -9,11 +9,119 @@ use proxmox::api::router::SubdirMap;
use proxmox::{identity, list_subdirs_api_method, sortable};
use crate::tools;
use crate::api2::types::*;
use crate::api2::pull::check_pull_privs;
use crate::server::{self, UPID, TaskState, TaskListInfoIterator};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::config::acl::{
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_VERIFY,
PRIV_SYS_AUDIT,
PRIV_SYS_MODIFY,
};
use crate::config::cached_user_info::CachedUserInfo;
// matches respective job execution privileges
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
match (upid.worker_type.as_str(), &upid.worker_id) {
("verificationjob", Some(workerid)) => {
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
if let Some(store) = captures.get(1) {
return user_info.check_privs(&auth_id,
&["datastore", store.as_str()],
PRIV_DATASTORE_VERIFY,
true);
}
}
},
("syncjob", Some(workerid)) => {
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
let remote = captures.get(1);
let remote_store = captures.get(2);
let local_store = captures.get(3);
if let (Some(remote), Some(remote_store), Some(local_store)) =
(remote, remote_store, local_store) {
return check_pull_privs(&auth_id,
local_store.as_str(),
remote.as_str(),
remote_store.as_str(),
false);
}
}
},
("garbage_collection", Some(workerid)) => {
return user_info.check_privs(&auth_id,
&["datastore", &workerid],
PRIV_DATASTORE_MODIFY,
true)
},
("prune", Some(workerid)) => {
return user_info.check_privs(&auth_id,
&["datastore",
&workerid],
PRIV_DATASTORE_MODIFY,
true);
},
_ => bail!("not a scheduled job task"),
};
bail!("not a scheduled job task");
}
// get the store out of the worker_id
fn check_job_store(upid: &UPID, store: &str) -> bool {
match (upid.worker_type.as_str(), &upid.worker_id) {
(workertype, Some(workerid)) if workertype.starts_with("verif") => {
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
if let Some(jobstore) = captures.get(1) {
return store == jobstore.as_str();
}
} else {
return workerid == store;
}
}
("syncjob", Some(workerid)) => {
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
if let Some(local_store) = captures.get(3) {
return store == local_store.as_str();
}
}
}
("prune", Some(workerid))
| ("backup", Some(workerid))
| ("garbage_collection", Some(workerid)) => {
return workerid == store || workerid.starts_with(&format!("{}:", store));
}
_ => {}
};
false
}
fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
let task_auth_id = &upid.auth_id;
if auth_id == task_auth_id
|| (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) {
// task owner can always read
Ok(())
} else {
let user_info = CachedUserInfo::new()?;
let task_privs = user_info.lookup_privs(auth_id, &["system", "tasks"]);
if task_privs & PRIV_SYS_AUDIT != 0 {
// allowed to read all tasks in general
Ok(())
} else if check_job_privs(&auth_id, &user_info, upid).is_ok() {
// job which the user/token could have configured/manually executed
Ok(())
} else {
bail!("task access not allowed");
}
}
}
#[api(
input: {
@ -57,9 +165,13 @@ use crate::config::cached_user_info::CachedUserInfo;
description: "Worker ID (arbitrary ASCII string)",
},
user: {
type: String,
type: Userid,
description: "The user who started the task.",
},
tokenid: {
type: Tokenname,
optional: true,
},
status: {
type: String,
description: "'running' or 'stopped'",
@ -84,12 +196,8 @@ async fn get_task_status(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if userid != upid.userid {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
check_task_access(&auth_id, &upid)?;
let mut result = json!({
"upid": param["upid"],
@ -99,9 +207,13 @@ async fn get_task_status(
"starttime": upid.starttime,
"type": upid.worker_type,
"id": upid.worker_id,
"user": upid.userid,
"user": upid.auth_id.user(),
});
if upid.auth_id.is_token() {
result["tokenid"] = Value::from(upid.auth_id.tokenname().unwrap().as_str());
}
if crate::server::worker_is_active(&upid).await? {
result["status"] = Value::from("running");
} else {
@ -161,12 +273,9 @@ async fn read_task_log(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if userid != upid.userid {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
check_task_access(&auth_id, &upid)?;
let test_status = param["test-status"].as_bool().unwrap_or(false);
@ -234,11 +343,11 @@ fn stop_task(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if userid != upid.userid {
if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
}
server::abort_worker_async(upid);
@ -260,7 +369,7 @@ fn stop_task(
},
limit: {
type: u64,
description: "Only list this amount of tasks.",
description: "Only list this amount of tasks. (0 means no limit)",
default: 50,
optional: true,
},
@ -285,6 +394,29 @@ fn stop_task(
type: String,
description: "Only list tasks from this user.",
},
since: {
type: i64,
description: "Only list tasks since this UNIX epoch.",
optional: true,
},
until: {
type: i64,
description: "Only list tasks until this UNIX epoch.",
optional: true,
},
typefilter: {
optional: true,
type: String,
description: "Only list tasks whose type contains this.",
},
statusfilter: {
optional: true,
type: Array,
description: "Only list tasks which have any one of the listed status.",
items: {
type: TaskStateType,
},
},
},
},
returns: {
@ -304,66 +436,85 @@ pub fn list_tasks(
errors: bool,
running: bool,
userfilter: Option<String>,
since: Option<i64>,
until: Option<i64>,
typefilter: Option<String>,
statusfilter: Option<Vec<TaskStateType>>,
param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let store = param["store"].as_str();
let list = TaskListInfoIterator::new(running)?;
let limit = if limit > 0 { limit as usize } else { usize::MAX };
let result: Vec<TaskListItem> = list
.take_while(|info| !info.is_err())
.skip_while(|info| {
match (info, until) {
(Ok(info), Some(until)) => info.upid.starttime > until,
(Ok(_), None) => false,
(Err(_), _) => false,
}
})
.take_while(|info| {
match (info, since) {
(Ok(info), Some(since)) => info.upid.starttime > since,
(Ok(_), None) => true,
(Err(_), _) => false,
}
})
.filter_map(|info| {
let info = match info {
Ok(info) => info,
Err(_) => return None,
};
if !list_all && info.upid.userid != userid { return None; }
if !list_all && check_task_access(&auth_id, &info.upid).is_err() {
return None;
}
if let Some(userid) = &userfilter {
if !info.upid.userid.as_str().contains(userid) { return None; }
if let Some(needle) = &userfilter {
if !info.upid.auth_id.to_string().contains(needle) { return None; }
}
if let Some(store) = store {
// Note: useful to select all tasks spawned by proxmox-backup-client
let worker_id = match &info.upid.worker_id {
Some(w) => w,
None => return None, // skip
};
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
info.upid.worker_type == "prune"
{
let prefix = format!("{}:", store);
if !worker_id.starts_with(&prefix) { return None; }
} else if info.upid.worker_type == "garbage_collection" {
if worker_id != store { return None; }
} else {
return None; // skip
if !check_job_store(&info.upid, store) {
return None;
}
}
match info.state {
Some(_) if running => return None,
Some(crate::server::TaskState::OK { .. }) if errors => return None,
if let Some(typefilter) = &typefilter {
if !info.upid.worker_type.contains(typefilter) {
return None;
}
}
match (&info.state, &statusfilter) {
(Some(_), _) if running => return None,
(Some(crate::server::TaskState::OK { .. }), _) if errors => return None,
(Some(state), Some(filters)) => {
if !filters.contains(&state.tasktype()) {
return None;
}
},
(None, Some(_)) => return None,
_ => {},
}
Some(info.into())
}).skip(start as usize)
.take(limit as usize)
.take(limit)
.collect();
let mut count = result.len() + start as usize;
if result.len() > 0 && result.len() >= limit as usize { // we have a 'virtual' entry as long as we have any new
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
count += 1;
}

View File

@ -9,7 +9,7 @@ use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
use crate::server::{WorkerTask, jobstate::Job};
use crate::backup::DataStore;
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
use crate::client::{HttpClient, BackupRepository, pull::pull_store};
use crate::api2::types::*;
use crate::config::{
remote,
@ -20,7 +20,7 @@ use crate::config::{
pub fn check_pull_privs(
userid: &Userid,
auth_id: &Authid,
store: &str,
remote: &str,
remote_store: &str,
@ -29,11 +29,11 @@ pub fn check_pull_privs(
let user_info = CachedUserInfo::new()?;
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
if delete {
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
}
Ok(())
@ -50,17 +50,9 @@ pub async fn get_pull_parameters(
let (remote_config, _digest) = remote::config()?;
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let src_repo = BackupRepository::new(Some(remote.userid.clone()), Some(remote.host.clone()), remote.port, remote_store.to_string());
let client = HttpClient::new(&src_repo.host(), src_repo.port(), &src_repo.user(), options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
let src_repo = BackupRepository::new(Some(remote.auth_id.clone()), Some(remote.host.clone()), remote.port, remote_store.to_string());
let client = crate::api2::config::remote::remote_client(remote).await?;
Ok((client, src_repo, tgt_store))
}
@ -68,27 +60,35 @@ pub async fn get_pull_parameters(
pub fn do_sync_job(
mut job: Job,
sync_job: SyncJobConfig,
userid: &Userid,
auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let job_id = job.jobname().to_string();
let job_id = format!("{}:{}:{}:{}",
sync_job.remote,
sync_job.remote_store,
sync_job.store,
job.jobname());
let worker_type = job.jobtype().to_string();
let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
let upid_str = WorkerTask::spawn(
&worker_type,
Some(job.jobname().to_string()),
userid.clone(),
Some(job_id.clone()),
auth_id.clone(),
false,
move |worker| async move {
job.start(&worker.upid().to_string())?;
let worker2 = worker.clone();
let sync_job2 = sync_job.clone();
let worker_future = async move {
let delete = sync_job.remove_vanished.unwrap_or(true);
let sync_owner = sync_job.owner.unwrap_or(Authid::root_auth_id().clone());
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
worker.log(format!("Starting datastore sync job '{}'", job_id));
@ -98,7 +98,7 @@ pub fn do_sync_job(
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, sync_owner).await?;
worker.log(format!("sync job '{}' end", &job_id));
@ -107,12 +107,12 @@ pub fn do_sync_job(
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
let res = select!{
let result = select!{
worker = worker_future.fuse() => worker,
abort = abort_future => abort,
};
let status = worker2.create_state(&res);
let status = worker2.create_state(&result);
match job.finish(status) {
Ok(_) => {},
@ -121,7 +121,13 @@ pub fn do_sync_job(
}
}
res
if let Some(email) = email {
if let Err(err) = crate::server::send_sync_status(&email, notify, &sync_job2, &result) {
eprintln!("send sync notification failed: {}", err);
}
}
result
})?;
Ok(upid_str)
@ -164,19 +170,19 @@ async fn pull (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(true);
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,

View File

@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
@ -94,7 +94,10 @@ fn upgrade_to_backup_reader_protocol(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
if owner != userid {
let correct_owner = owner == auth_id
|| (owner.is_token()
&& Authid::from(owner.user().clone()) == auth_id);
if !correct_owner {
bail!("backup owner check failed!");
}
}
@ -110,10 +113,10 @@ fn upgrade_to_backup_reader_protocol(
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type,
userid,
auth_id,
worker.clone(),
datastore,
backup_dir,

View File

@ -5,7 +5,7 @@ use serde_json::{json, Value};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::api2::types::Userid;
use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::formatter::*;
use crate::server::WorkerTask;
@ -17,7 +17,7 @@ use crate::server::WorkerTask;
pub struct ReaderEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
user: Userid,
auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@ -29,7 +29,7 @@ pub struct ReaderEnvironment {
impl ReaderEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
user: Userid,
auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@ -39,7 +39,7 @@ impl ReaderEnvironment {
Self {
result_attributes: json!({}),
env_type,
user,
auth_id,
worker,
datastore,
debug: false,
@ -82,12 +82,12 @@ impl RpcEnvironment for ReaderEnvironment {
self.env_type
}
fn set_user(&mut self, _user: Option<String>) {
panic!("unable to change user");
fn set_auth_id(&mut self, _auth_id: Option<String>) {
panic!("unable to change auth_id");
}
fn get_user(&self) -> Option<String> {
Some(self.user.to_string())
fn get_auth_id(&self) -> Option<String> {
Some(self.auth_id.to_string())
}
}

View File

@ -16,18 +16,14 @@ use crate::api2::types::{
DATASTORE_SCHEMA,
RRDMode,
RRDTimeFrameResolution,
TaskListItem,
TaskStateType,
Userid,
Authid,
};
use crate::server;
use crate::backup::{DataStore};
use crate::config::datastore;
use crate::tools::statistics::{linear_regression};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::acl::{
PRIV_SYS_AUDIT,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
};
@ -87,13 +83,13 @@ fn datastore_status(
let (config, _digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, _)) in &config.sections {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
continue;
@ -107,6 +103,7 @@ fn datastore_status(
"total": status.total,
"used": status.used,
"avail": status.avail,
"gc-status": datastore.last_gc_status(),
});
let rrd_dir = format!("datastore/{}", store);
@ -156,6 +153,8 @@ fn datastore_status(
}
}
entry["history-start"] = start.into();
entry["history-delta"] = reso.into();
entry["history"] = history.into();
// we skip the calculation for datastores with not enough data
@ -179,103 +178,8 @@ fn datastore_status(
Ok(list.into())
}
#[api(
input: {
properties: {
since: {
type: i64,
description: "Only list tasks since this UNIX epoch.",
optional: true,
},
typefilter: {
optional: true,
type: String,
description: "Only list tasks, whose type contains this string.",
},
statusfilter: {
optional: true,
type: Array,
description: "Only list tasks which have any one of the listed status.",
items: {
type: TaskStateType,
},
},
},
},
returns: {
description: "A list of tasks.",
type: Array,
items: { type: TaskListItem },
},
access: {
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
permission: &Permission::Anybody,
},
)]
/// List tasks.
pub fn list_tasks(
since: Option<i64>,
typefilter: Option<String>,
statusfilter: Option<Vec<TaskStateType>>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let since = since.unwrap_or_else(|| 0);
let list: Vec<TaskListItem> = server::TaskListInfoIterator::new(false)?
.take_while(|info| {
match info {
Ok(info) => info.upid.starttime > since,
Err(_) => false
}
})
.filter_map(|info| {
match info {
Ok(info) => {
if list_all || info.upid.userid == userid {
if let Some(filter) = &typefilter {
if !info.upid.worker_type.contains(filter) {
return None;
}
}
if let Some(filters) = &statusfilter {
if let Some(state) = &info.state {
let statetype = match state {
server::TaskState::OK { .. } => TaskStateType::OK,
server::TaskState::Unknown { .. } => TaskStateType::Unknown,
server::TaskState::Error { .. } => TaskStateType::Error,
server::TaskState::Warning { .. } => TaskStateType::Warning,
};
if !filters.contains(&statetype) {
return None;
}
}
}
Some(Ok(TaskListItem::from(info)))
} else {
None
}
}
Err(err) => Some(Err(err))
}
})
.collect::<Result<Vec<TaskListItem>, Error>>()?;
Ok(list.into())
}
const SUBDIRS: SubdirMap = &[
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
];
pub const ROUTER: Router = Router::new()

View File

@ -5,7 +5,7 @@ use proxmox::api::{api, schema::*};
use proxmox::const_regex;
use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use crate::backup::CryptMode;
use crate::backup::{CryptMode, Fingerprint, BACKUP_ID_REGEX};
use crate::server::UPID;
#[macro_use]
@ -14,9 +14,11 @@ mod macros;
#[macro_use]
mod userid;
pub use userid::{Realm, RealmRef};
pub use userid::{Tokenname, TokennameRef};
pub use userid::{Username, UsernameRef};
pub use userid::Userid;
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
pub use userid::Authid;
pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
// File names: may not contain slashes, may not start with "."
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
@ -57,6 +59,11 @@ const_regex!{
/// any identifier command line tools work with.
pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
@ -65,12 +72,14 @@ const_regex!{
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
@ -97,6 +106,9 @@ pub const CERT_FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const BACKUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
@ -127,6 +139,9 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
pub const CIDR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&CIDR_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
@ -269,7 +284,7 @@ pub const BACKUP_TYPE_SCHEMA: Schema =
pub const BACKUP_ID_SCHEMA: Schema =
StringSchema::new("Backup ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.format(&BACKUP_ID_FORMAT)
.schema();
pub const BACKUP_TIME_SCHEMA: Schema =
@ -346,6 +361,12 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
.format(&DNS_NAME_OR_IP_FORMAT)
.schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
.format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
.max_length(16)
.schema();
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
.format(&BLOCKDEVICE_NAME_FORMAT)
.min_length(3)
@ -354,6 +375,25 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
// Complex type definitions
#[api(
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// Basic information about a datastore.
pub struct DataStoreListItem {
pub store: String,
pub comment: Option<String>,
}
#[api(
properties: {
"backup-type": {
@ -374,7 +414,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
},
},
owner: {
type: Userid,
type: Authid,
optional: true,
},
},
@ -392,7 +432,7 @@ pub struct GroupListItem {
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Userid>,
pub owner: Option<Authid>,
}
#[api()]
@ -444,13 +484,17 @@ pub struct SnapshotVerifyState {
type: SnapshotVerifyState,
optional: true,
},
fingerprint: {
type: String,
optional: true,
},
files: {
items: {
schema: BACKUP_ARCHIVE_NAME_SCHEMA
},
},
owner: {
type: Userid,
type: Authid,
optional: true,
},
},
@ -468,6 +512,9 @@ pub struct SnapshotListItem {
/// The result of the last run verify task
#[serde(skip_serializing_if="Option::is_none")]
pub verification: Option<SnapshotVerifyState>,
/// Fingerprint of encryption key
#[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<Fingerprint>,
/// List of contained archive files.
pub files: Vec<BackupContent>,
/// Overall snapshot size (sum of all archive sizes).
@ -475,7 +522,7 @@ pub struct SnapshotListItem {
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Userid>,
pub owner: Option<Authid>,
}
#[api(
@ -622,10 +669,83 @@ pub struct StorageStatus {
pub avail: u64,
}
#[api()]
#[derive(Serialize, Deserialize, Default)]
/// Backup Type group/snapshot counts.
pub struct TypeCounts {
/// The number of groups of the type.
pub groups: u64,
/// The number of snapshots of the type.
pub snapshots: u64,
}
#[api(
properties: {
ct: {
type: TypeCounts,
optional: true,
},
host: {
type: TypeCounts,
optional: true,
},
vm: {
type: TypeCounts,
optional: true,
},
other: {
type: TypeCounts,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Default)]
/// Counts of groups/snapshots per BackupType.
pub struct Counts {
/// The counts for CT backups
pub ct: Option<TypeCounts>,
/// The counts for Host backups
pub host: Option<TypeCounts>,
/// The counts for VM backups
pub vm: Option<TypeCounts>,
/// The counts for other backup types
pub other: Option<TypeCounts>,
}
#[api(
properties: {
"gc-status": {
type: GarbageCollectionStatus,
optional: true,
},
counts: {
type: Counts,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// Overall Datastore status and useful information.
pub struct DataStoreStatus {
/// Total space (bytes).
pub total: u64,
/// Used space (bytes).
pub used: u64,
/// Available space (bytes).
pub avail: u64,
/// Status of last GC
#[serde(skip_serializing_if="Option::is_none")]
pub gc_status: Option<GarbageCollectionStatus>,
/// Group/Snapshot counts
#[serde(skip_serializing_if="Option::is_none")]
pub counts: Option<Counts>,
}
#[api(
properties: {
upid: { schema: UPID_SCHEMA },
user: { type: Userid },
user: { type: Authid },
},
)]
#[derive(Serialize, Deserialize)]
@ -644,8 +764,8 @@ pub struct TaskListItem {
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
/// The user who started the task
pub user: Userid,
/// The authenticated entity who started the task
pub user: Authid,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
@ -668,7 +788,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
starttime: info.upid.starttime,
worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id,
user: info.upid.userid,
user: info.upid.auth_id,
endtime,
status,
}
@ -1048,7 +1168,7 @@ pub enum RRDTimeFrameResolution {
}
#[api()]
#[derive(Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
/// Describes a package for which an update is available.
pub struct APTUpdateInfo {
@ -1072,4 +1192,52 @@ pub struct APTUpdateInfo {
pub section: String,
/// URL under which the package's changelog can be retrieved
pub change_log_url: String,
/// Custom extra field for additional package information
#[serde(skip_serializing_if="Option::is_none")]
pub extra_info: Option<String>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// When do we send notifications
pub enum Notify {
/// Never send notification
Never,
/// Send notifications for failed and sucessful jobs
Always,
/// Send notifications for failed jobs only
Error,
}
#[api(
properties: {
gc: {
type: Notify,
optional: true,
},
verify: {
type: Notify,
optional: true,
},
sync: {
type: Notify,
optional: true,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Datastore notify settings
pub struct DatastoreNotify {
/// Garbage collection settings
pub gc: Option<Notify>,
/// Verify job setting
pub verify: Option<Notify>,
/// Sync job setting
pub sync: Option<Notify>,
}
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
"Datastore notification setting")
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
.schema();

View File

@ -1,6 +1,7 @@
//! Types for user handling.
//!
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
//! We have [`Username`]s, [`Realm`]s and [`Tokenname`]s. To uniquely identify a user/API token, they
//! must be combined into a [`Userid`] or [`Authid`].
//!
//! Since they're all string types, they're organized as follows:
//!
@ -9,13 +10,16 @@
//! with `String`, meaning you can only make references to it.
//! * [`Realm`]: an owned realm (`String` equivalent).
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
//! borrowed type.
//! * [`Tokenname`]: an owned API token name (`String` equivalent)
//! * [`TokennameRef`]: a borrowed `Tokenname` (`str` equivalent).
//! * [`Userid`]: an owned user id (`"user@realm"`).
//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`).
//! Note that `Userid` and `Authid` do not have a separate borrowed type.
//!
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
//! Note that `Username`s and `Tokenname`s are not unique, therefore they do not implement `Eq` and cannot be
//! compared directly. If a direct comparison is really required, they can be compared as strings
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
//! each other, as in those two cases the comparison has meaning.
//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other
//! hand can be compared with each other, as in those cases the comparison has meaning.
use std::borrow::Borrow;
use std::convert::TryFrom;
@ -36,19 +40,42 @@ use proxmox::const_regex;
// also see "man useradd"
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
const_regex! {
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
pub PROXMOX_TOKEN_NAME_REGEX = concat!(r"^", TOKEN_NAME_REGEX_STR!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
pub PROXMOX_APITOKEN_ID_REGEX = concat!(r"^", APITOKEN_ID_REGEX_STR!(), r"$");
pub PROXMOX_AUTH_ID_REGEX = concat!(r"^", r"(?:", USER_ID_REGEX_STR!(), r"|", APITOKEN_ID_REGEX_STR!(), r")$");
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
}
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
pub const PROXMOX_TOKEN_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_TOKEN_NAME_REGEX);
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
pub const PROXMOX_TOKEN_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_APITOKEN_ID_REGEX);
pub const PROXMOX_AUTH_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_AUTH_ID_REGEX);
pub const PROXMOX_TOKEN_ID_SCHEMA: Schema = StringSchema::new("API Token ID")
.format(&PROXMOX_TOKEN_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_TOKEN_NAME_SCHEMA: Schema = StringSchema::new("API Token name")
.format(&PROXMOX_TOKEN_NAME_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
@ -91,26 +118,6 @@ pub struct Username(String);
#[derive(Debug, Hash)]
pub struct UsernameRef(str);
#[doc(hidden)]
/// ```compile_fail
/// let a: Username = unsafe { std::mem::zeroed() };
/// let b: Username = unsafe { std::mem::zeroed() };
/// let _ = <Username as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
/// ```
struct _AssertNoEqImpl;
impl UsernameRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const UsernameRef) }
@ -286,7 +293,132 @@ impl PartialEq<Realm> for &RealmRef {
}
}
/// A complete user id consting of a user name and a realm.
#[api(
type: String,
format: &PROXMOX_TOKEN_NAME_FORMAT,
)]
/// The token ID part of an API token authentication id.
///
/// This alone does NOT uniquely identify the API token and therefore does not implement `Eq`. In
/// order to compare token IDs directly, they need to be explicitly compared as strings by calling
/// `.as_str()`.
///
/// ```compile_fail
/// fn test(a: Tokenname, b: Tokenname) -> bool {
/// a == b // illegal and does not compile
/// }
/// ```
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
pub struct Tokenname(String);
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
/// the user.
///
/// This is like a `str` to the `String` of a [`Tokenname`].
#[derive(Debug, Hash)]
pub struct TokennameRef(str);
#[doc(hidden)]
/// ```compile_fail
/// let a: Username = unsafe { std::mem::zeroed() };
/// let b: Username = unsafe { std::mem::zeroed() };
/// let _ = <Username as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: Tokenname = unsafe { std::mem::zeroed() };
/// let b: Tokenname = unsafe { std::mem::zeroed() };
/// let _ = <Tokenname as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
/// let _ = <&TokennameRef as PartialEq>::eq(a, b);
/// ```
///
/// ```compile_fail
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
/// let _ = <&TokennameRef as PartialEq>::eq(&a, &b);
/// ```
struct _AssertNoEqImpl;
impl TokennameRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const TokennameRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Tokenname {
type Target = TokennameRef;
fn deref(&self) -> &TokennameRef {
self.borrow()
}
}
impl Borrow<TokennameRef> for Tokenname {
fn borrow(&self) -> &TokennameRef {
TokennameRef::new(self.0.as_str())
}
}
impl AsRef<TokennameRef> for Tokenname {
fn as_ref(&self) -> &TokennameRef {
self.borrow()
}
}
impl ToOwned for TokennameRef {
type Owned = Tokenname;
fn to_owned(&self) -> Self::Owned {
Tokenname(self.0.to_owned())
}
}
impl TryFrom<String> for Tokenname {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
if !PROXMOX_TOKEN_NAME_REGEX.is_match(&s) {
bail!("invalid token name");
}
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a TokennameRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a TokennameRef, Error> {
if !PROXMOX_TOKEN_NAME_REGEX.is_match(s) {
bail!("invalid token name in user id");
}
Ok(TokennameRef::new(s))
}
}
/// A complete user id consisting of a user name and a realm
#[derive(Clone, Debug, Hash)]
pub struct Userid {
data: String,
@ -318,11 +450,6 @@ impl Userid {
&self.data
}
/// Get the "backup@pam" user id.
pub fn backup_userid() -> &'static Self {
&*BACKUP_USERID
}
/// Get the "root@pam" user id.
pub fn root_userid() -> &'static Self {
&*ROOT_USERID
@ -330,7 +457,6 @@ impl Userid {
}
lazy_static! {
pub static ref BACKUP_USERID: Userid = Userid::new("backup@pam".to_string(), 6);
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
}
@ -342,6 +468,12 @@ impl PartialEq for Userid {
}
}
impl From<Authid> for Userid {
fn from(authid: Authid) -> Self {
authid.user
}
}
impl From<(Username, Realm)> for Userid {
fn from(parts: (Username, Realm)) -> Self {
Self::from((parts.0.as_ref(), parts.1.as_ref()))
@ -366,10 +498,18 @@ impl std::str::FromStr for Userid {
type Err = Error;
fn from_str(id: &str) -> Result<Self, Error> {
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
None => bail!("not a valid user id"),
};
let name_len = id
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let name = &id[..name_len];
let realm = &id[(name_len + 1)..];
if !PROXMOX_USER_NAME_REGEX.is_match(name) {
bail!("invalid user name in user id");
}
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
.map_err(|_| format_err!("invalid realm in user id"))?;
@ -388,6 +528,10 @@ impl TryFrom<String> for Userid {
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
if !PROXMOX_USER_NAME_REGEX.is_match(&data[..name_len]) {
bail!("invalid user name in user id");
}
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
.map_err(|_| format_err!("invalid realm in user id"))?;
@ -413,5 +557,176 @@ impl PartialEq<String> for Userid {
}
}
/// A complete authentication id consisting of a user id and an optional token name.
#[derive(Clone, Debug, Hash)]
pub struct Authid {
user: Userid,
tokenname: Option<Tokenname>
}
impl Authid {
pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
.format(&PROXMOX_AUTH_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
Self { user, tokenname }
}
pub fn user(&self) -> &Userid {
&self.user
}
pub fn is_token(&self) -> bool {
self.tokenname.is_some()
}
pub fn tokenname(&self) -> Option<&TokennameRef> {
match &self.tokenname {
Some(name) => Some(&name),
None => None,
}
}
/// Get the "root@pam" auth id.
pub fn root_auth_id() -> &'static Self {
&*ROOT_AUTHID
}
}
lazy_static! {
pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4));
}
impl Eq for Authid {}
impl PartialEq for Authid {
fn eq(&self, rhs: &Self) -> bool {
self.user == rhs.user && match (&self.tokenname, &rhs.tokenname) {
(Some(ours), Some(theirs)) => ours.as_str() == theirs.as_str(),
(None, None) => true,
_ => false,
}
}
}
impl From<Userid> for Authid {
fn from(parts: Userid) -> Self {
Self::new(parts, None)
}
}
impl From<(Userid, Option<Tokenname>)> for Authid {
fn from(parts: (Userid, Option<Tokenname>)) -> Self {
Self::new(parts.0, parts.1)
}
}
impl fmt::Display for Authid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.tokenname {
Some(token) => write!(f, "{}!{}", self.user, token.as_str()),
None => self.user.fmt(f),
}
}
}
impl std::str::FromStr for Authid {
type Err = Error;
fn from_str(id: &str) -> Result<Self, Error> {
let name_len = id
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let realm_end = id
.as_bytes()
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { id.len() } else { pos })
.unwrap_or(id.len());
if realm_end == id.len() - 1 {
bail!("empty token name in userid");
}
let user = Userid::from_str(&id[..realm_end])?;
if id.len() > realm_end {
let token = Tokenname::try_from(id[(realm_end + 1)..].to_string())?;
Ok(Self::new(user, Some(token)))
} else {
Ok(Self::new(user, None))
}
}
}
impl TryFrom<String> for Authid {
type Error = Error;
fn try_from(mut data: String) -> Result<Self, Error> {
let name_len = data
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let realm_end = data
.as_bytes()
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { data.len() } else { pos })
.unwrap_or(data.len());
if realm_end == data.len() - 1 {
bail!("empty token name in userid");
}
let tokenname = if data.len() > realm_end {
Some(Tokenname::try_from(data[(realm_end + 1)..].to_string())?)
} else {
None
};
data.truncate(realm_end);
let user:Userid = data.parse()?;
Ok(Self { user, tokenname })
}
}
#[test]
fn test_token_id() {
let userid: Userid = "test@pam".parse().expect("parsing Userid failed");
assert_eq!(userid.name().as_str(), "test");
assert_eq!(userid.realm(), "pam");
assert_eq!(userid, "test@pam");
let auth_id: Authid = "test@pam".parse().expect("parsing user Authid failed");
assert_eq!(auth_id.to_string(), "test@pam".to_string());
assert!(!auth_id.is_token());
assert_eq!(auth_id.user(), &userid);
let user_auth_id = Authid::from(userid.clone());
assert_eq!(user_auth_id, auth_id);
assert!(!user_auth_id.is_token());
let auth_id: Authid = "test@pam!bar".parse().expect("parsing token Authid failed");
let token_userid = auth_id.user();
assert_eq!(&userid, token_userid);
assert!(auth_id.is_token());
assert_eq!(auth_id.tokenname().expect("Token has tokenname").as_str(), TokennameRef::new("bar").as_str());
assert_eq!(auth_id.to_string(), "test@pam!bar".to_string());
}
proxmox::forward_deserialize_to_from_str!(Userid);
proxmox::forward_serialize_to_display!(Userid);
proxmox::forward_deserialize_to_from_str!(Authid);
proxmox::forward_serialize_to_display!(Authid);

View File

@ -1,37 +1,31 @@
use crate::tools;
use anyhow::{bail, format_err, Error};
use regex::Regex;
use std::os::unix::io::RawFd;
use std::path::{PathBuf, Path};
use lazy_static::lazy_static;
use proxmox::const_regex;
use super::manifest::MANIFEST_BLOB_NAME;
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9][A-Za-z0-9_-]+") }
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
lazy_static!{
static ref BACKUP_FILE_REGEX: Regex = Regex::new(
r"^.*\.([fd]idx|blob)$").unwrap();
const_regex!{
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
static ref BACKUP_TYPE_REGEX: Regex = Regex::new(
concat!(r"^(", BACKUP_TYPE_RE!(), r")$")).unwrap();
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
static ref BACKUP_ID_REGEX: Regex = Regex::new(
concat!(r"^", BACKUP_ID_RE!(), r"$")).unwrap();
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
static ref BACKUP_DATE_REGEX: Regex = Regex::new(
concat!(r"^", BACKUP_TIME_RE!() ,r"$")).unwrap();
BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
static ref GROUP_PATH_REGEX: Regex = Regex::new(
concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$")).unwrap();
static ref SNAPSHOT_PATH_REGEX: Regex = Regex::new(
concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")$")).unwrap();
GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
SNAPSHOT_PATH_REGEX = concat!(
r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")$");
}
/// BackupGroup is a directory containing a list of BackupDir
@ -333,26 +327,20 @@ impl BackupInfo {
Ok(files)
}
pub fn list_backups(base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
let mut list = Vec::new();
tools::scandir(libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |_, backup_id, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time_string, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
let backup_dir = BackupDir::with_rfc3339(backup_type, backup_id, backup_time_string)?;
list.push(BackupGroup::new(backup_type, backup_id));
let files = list_backup_files(l2_fd, backup_time_string)?;
list.push(BackupInfo { backup_dir, files });
Ok(())
})
Ok(())
})
})?;
Ok(list)
}

View File

@ -78,7 +78,7 @@ pub struct DirEntry {
#[derive(Clone, Debug, PartialEq)]
pub enum DirEntryAttribute {
Directory { start: u64 },
File { size: u64, mtime: u64 },
File { size: u64, mtime: i64 },
Symlink,
Hardlink,
BlockDevice,
@ -89,7 +89,7 @@ pub enum DirEntryAttribute {
impl DirEntry {
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime:u64) -> Self {
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime: i64) -> Self {
match etype {
CatalogEntryType::Directory => {
DirEntry { name, attr: DirEntryAttribute::Directory { start } }
@ -184,7 +184,7 @@ impl DirInfo {
catalog_encode_u64(writer, name.len() as u64)?;
writer.write_all(name)?;
catalog_encode_u64(writer, *size)?;
catalog_encode_u64(writer, *mtime)?;
catalog_encode_i64(writer, *mtime)?;
}
DirEntry { name, attr: DirEntryAttribute::Symlink } => {
writer.write_all(&[CatalogEntryType::Symlink as u8])?;
@ -234,7 +234,7 @@ impl DirInfo {
Ok((self.name, data))
}
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, u64) -> Result<bool, Error>>(
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, i64) -> Result<bool, Error>>(
data: &[u8],
mut callback: C,
) -> Result<(), Error> {
@ -265,7 +265,7 @@ impl DirInfo {
}
CatalogEntryType::File => {
let size = catalog_decode_u64(&mut cursor)?;
let mtime = catalog_decode_u64(&mut cursor)?;
let mtime = catalog_decode_i64(&mut cursor)?;
callback(etype, name, 0, size, mtime)?
}
_ => {
@ -362,7 +362,7 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
Ok(())
}
fn add_file(&mut self, name: &CStr, size: u64, mtime: u64) -> Result<(), Error> {
fn add_file(&mut self, name: &CStr, size: u64, mtime: i64) -> Result<(), Error> {
let dir = self.dirstack.last_mut().ok_or_else(|| format_err!("outside root"))?;
let name = name.to_bytes().to_vec();
dir.entries.push(DirEntry { name, attr: DirEntryAttribute::File { size, mtime } });
@ -587,14 +587,77 @@ impl <R: Read + Seek> CatalogReader<R> {
}
}
/// Serialize i64 as short, variable length byte sequence
///
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
/// If the value is negative, we end with a zero byte (0x00).
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
let mut enc = Vec::new();
let mut d = if v < 0 {
(-1 * (v + 1)) as u64 + 1 // also handles i64::MIN
} else {
v as u64
};
loop {
if d < 128 {
if v < 0 {
enc.push(128 | d as u8);
enc.push(0u8);
} else {
enc.push(d as u8);
}
break;
}
enc.push((128 | (d & 127)) as u8);
d = d >> 7;
}
writer.write_all(&enc)?;
Ok(())
}
/// Deserialize i64 from variable length byte sequence
///
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
/// this method is compatible with catalog_encode_u64 iff the
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
let mut v: u64 = 0;
let mut buf = [0u8];
for i in 0..11 { // only allow 11 bytes (70 bits + sign marker)
if buf.is_empty() {
bail!("decode_i64 failed - unexpected EOB");
}
reader.read_exact(&mut buf)?;
let t = buf[0];
if t == 0 {
if v == 0 {
return Ok(0);
}
return Ok(((v - 1) as i64 * -1) - 1); // also handles i64::MIN
} else if t < 128 {
v |= (t as u64) << (i*7);
return Ok(v as i64);
} else {
v |= ((t & 127) as u64) << (i*7);
}
}
bail!("decode_i64 failed - missing end marker");
}
/// Serialize u64 as short, variable length byte sequence
///
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
/// We limit values to a maximum of 2^63.
pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error> {
let mut enc = Vec::new();
if (v & (1<<63)) != 0 { bail!("catalog_encode_u64 failed - value >= 2^63"); }
let mut d = v;
loop {
if d < 128 {
@ -611,13 +674,14 @@ pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error>
/// Deserialize u64 from variable length byte sequence
///
/// We currently read maximal 9 bytes, which give a maximum of 63 bits.
/// We currently read maximal 10 bytes, which give a maximum of 70 bits,
/// but we currently only encode up to 64 bits
pub fn catalog_decode_u64<R: Read>(reader: &mut R) -> Result<u64, Error> {
let mut v: u64 = 0;
let mut buf = [0u8];
for i in 0..9 { // only allow 9 bytes (63 bits)
for i in 0..10 { // only allow 10 bytes (70 bits)
if buf.is_empty() {
bail!("decode_u64 failed - unexpected EOB");
}
@ -652,9 +716,58 @@ fn test_catalog_u64_encoder() {
assert!(decoded == value);
}
test_encode_decode(u64::MIN);
test_encode_decode(126);
test_encode_decode((1<<12)-1);
test_encode_decode((1<<20)-1);
test_encode_decode((1<<50)-1);
test_encode_decode((1<<63)-1);
test_encode_decode(u64::MAX);
}
#[test]
fn test_catalog_i64_encoder() {
fn test_encode_decode(value: i64) {
let mut data = Vec::new();
catalog_encode_i64(&mut data, value).unwrap();
let slice = &mut &data[..];
let decoded = catalog_decode_i64(slice).unwrap();
assert!(decoded == value);
}
test_encode_decode(0);
test_encode_decode(-0);
test_encode_decode(126);
test_encode_decode(-126);
test_encode_decode((1<<12)-1);
test_encode_decode(-(1<<12)-1);
test_encode_decode((1<<20)-1);
test_encode_decode(-(1<<20)-1);
test_encode_decode(i64::MIN);
test_encode_decode(i64::MAX);
}
#[test]
fn test_catalog_i64_compatibility() {
fn test_encode_decode(value: u64) {
let mut data = Vec::new();
catalog_encode_u64(&mut data, value).unwrap();
let slice = &mut &data[..];
let decoded = catalog_decode_i64(slice).unwrap() as u64;
assert!(decoded == value);
}
test_encode_decode(u64::MIN);
test_encode_decode(126);
test_encode_decode((1<<12)-1);
test_encode_decode((1<<20)-1);
test_encode_decode((1<<50)-1);
test_encode_decode(u64::MAX);
}

View File

@ -154,9 +154,11 @@ impl ChunkStore {
}
pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
let (chunk_path, _digest_str) = self.chunk_path(digest);
self.cond_touch_path(&chunk_path, fail_if_not_exist)
}
pub fn cond_touch_path(&self, path: &Path, fail_if_not_exist: bool) -> Result<bool, Error> {
const UTIME_NOW: i64 = (1 << 30) - 1;
const UTIME_OMIT: i64 = (1 << 30) - 2;
@ -167,7 +169,7 @@ impl ChunkStore {
use nix::NixPath;
let res = chunk_path.with_nix_path(|cstr| unsafe {
let res = path.with_nix_path(|cstr| unsafe {
let tmp = libc::utimensat(-1, cstr.as_ptr(), &times[0], libc::AT_SYMLINK_NOFOLLOW);
nix::errno::Errno::result(tmp)
})?;
@ -177,7 +179,7 @@ impl ChunkStore {
return Ok(false);
}
bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
bail!("update atime failed for chunk/file {:?} - {}", path, err);
}
Ok(true)
@ -328,49 +330,13 @@ impl ChunkStore {
let lock = self.mutex.lock();
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
if bad {
// filename validity checked in iterator
let orig_filename = std::ffi::CString::new(&filename.to_bytes()[..64])?;
match fstatat(
dirfd,
orig_filename.as_c_str(),
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
{
Ok(_) => {
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
Err(err) =>
crate::task_warn!(
worker,
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
filename,
self.name,
err,
),
Ok(_) => {
status.removed_bad += 1;
status.removed_bytes += stat.st_size as u64;
}
}
},
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
// chunk hasn't been rewritten yet, keep .bad file
status.still_bad += 1;
},
Err(err) => {
// some other error, warn user and keep .bad file around too
status.still_bad += 1;
crate::task_warn!(
worker,
"error during stat on '{:?}' - {}",
orig_filename,
err,
);
}
}
} else if stat.st_atime < min_atime {
if stat.st_atime < min_atime {
//let age = now - stat.st_atime;
//println!("UNLINK {} {:?}", age/(3600*24), filename);
if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
if bad {
status.still_bad += 1;
}
bail!(
"unlinking chunk {:?} failed on store '{}' - {}",
filename,
@ -378,13 +344,23 @@ impl ChunkStore {
err,
);
}
status.removed_chunks += 1;
if bad {
status.removed_bad += 1;
} else {
status.removed_chunks += 1;
}
status.removed_bytes += stat.st_size as u64;
} else if stat.st_atime < oldest_writer {
status.pending_chunks += 1;
if bad {
status.still_bad += 1;
} else {
status.pending_chunks += 1;
}
status.pending_bytes += stat.st_size as u64;
} else {
status.disk_chunks += 1;
if !bad {
status.disk_chunks += 1;
}
status.disk_bytes += stat.st_size as u64;
}
}

View File

@ -7,6 +7,8 @@
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
//! for a short introduction.
use std::fmt;
use std::fmt::Display;
use std::io::Write;
use anyhow::{bail, Error};
@ -15,8 +17,15 @@ use openssl::pkcs5::pbkdf2_hmac;
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
use serde::{Deserialize, Serialize};
use crate::tools::format::{as_fingerprint, bytes_as_fingerprint};
use proxmox::api::api;
// openssl::sha::sha256(b"Proxmox Backup Encryption Key Fingerprint")
const FINGERPRINT_INPUT: [u8; 32] = [ 110, 208, 239, 119, 71, 31, 255, 77,
85, 199, 168, 254, 74, 157, 182, 33,
97, 64, 127, 19, 76, 114, 93, 223,
48, 153, 45, 37, 236, 69, 237, 38, ];
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
@ -30,6 +39,30 @@ pub enum CryptMode {
SignOnly,
}
#[derive(Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(transparent)]
/// 32-byte fingerprint, usually calculated with SHA256.
pub struct Fingerprint {
#[serde(with = "bytes_as_fingerprint")]
bytes: [u8; 32],
}
impl Fingerprint {
pub fn new(bytes: [u8; 32]) -> Self {
Self { bytes }
}
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
}
/// Display as short key ID
impl Display for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
}
}
/// Encryption Configuration with secret key
///
/// This structure stores the secret key and provides helpers for
@ -101,6 +134,10 @@ impl CryptConfig {
tag
}
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::new(self.compute_digest(&FINGERPRINT_INPUT))
}
pub fn data_crypter(&self, iv: &[u8; 16], mode: Mode) -> Result<Crypter, Error> {
let mut crypter = openssl::symm::Crypter::new(self.cipher, mode, &self.enc_key, Some(iv))?;
crypter.aad_update(b"")?; //??
@ -219,7 +256,13 @@ impl CryptConfig {
) -> Result<Vec<u8>, Error> {
let modified = proxmox::tools::time::epoch_i64();
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
let key_config = super::KeyConfig {
kdf: None,
created,
modified,
data: self.enc_key.to_vec(),
fingerprint: Some(self.fingerprint()),
};
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
let mut buffer = vec![0u8; rsa.size() as usize];

View File

@ -23,7 +23,7 @@ use crate::task::TaskState;
use crate::tools;
use crate::tools::format::HumanByte;
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
use crate::api2::types::{GarbageCollectionStatus, Userid};
use crate::api2::types::{Authid, GarbageCollectionStatus};
use crate::server::UPID;
lazy_static! {
@ -276,8 +276,8 @@ impl DataStore {
/// Returns the backup owner.
///
/// The backup owner is the user who first created the backup group.
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
/// The backup owner is the entity who first created the backup group.
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
@ -289,7 +289,7 @@ impl DataStore {
pub fn set_owner(
&self,
backup_group: &BackupGroup,
userid: &Userid,
auth_id: &Authid,
force: bool,
) -> Result<(), Error> {
let mut path = self.base_path();
@ -309,7 +309,7 @@ impl DataStore {
let mut file = open_options.open(&path)
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
writeln!(file, "{}", userid)
writeln!(file, "{}", auth_id)
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
Ok(())
@ -324,8 +324,8 @@ impl DataStore {
pub fn create_locked_backup_group(
&self,
backup_group: &BackupGroup,
userid: &Userid,
) -> Result<(Userid, DirLockGuard), Error> {
auth_id: &Authid,
) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let base_path = self.base_path();
@ -339,7 +339,7 @@ impl DataStore {
match std::fs::create_dir(&full_path) {
Ok(_) => {
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
self.set_owner(backup_group, userid, false)?;
self.set_owner(backup_group, auth_id, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure
Ok((owner, guard))
}
@ -446,6 +446,17 @@ impl DataStore {
file_name,
err,
);
// touch any corresponding .bad files to keep them around, meaning if a chunk is
// rewritten correctly they will be removed automatically, as well as if no index
// file requires the chunk anymore (won't get to this loop then)
for i in 0..=9 {
let bad_ext = format!("{}.bad", i);
let mut bad_path = PathBuf::new();
bad_path.push(self.chunk_path(digest).0);
bad_path.set_extension(bad_ext);
self.chunk_store.cond_touch_path(&bad_path, false)?;
}
}
}
Ok(())
@ -458,38 +469,35 @@ impl DataStore {
) -> Result<(), Error> {
let image_list = self.list_images()?;
let image_count = image_list.len();
let mut done = 0;
let mut last_percentage: usize = 0;
for path in image_list {
for img in image_list {
worker.check_abort()?;
tools::fail_on_shutdown()?;
let full_path = self.chunk_store.relative_path(&path);
match std::fs::File::open(&full_path) {
let path = self.chunk_store.relative_path(&img);
match std::fs::File::open(&path) {
Ok(file) => {
if let Ok(archive_type) = archive_type(&path) {
if let Ok(archive_type) = archive_type(&img) {
if archive_type == ArchiveType::FixedIndex {
let index = FixedIndexReader::new(file)?;
self.index_mark_used_chunks(index, &path, status, worker)?;
let index = FixedIndexReader::new(file).map_err(|e| {
format_err!("can't read index '{}' - {}", path.to_string_lossy(), e)
})?;
self.index_mark_used_chunks(index, &img, status, worker)?;
} else if archive_type == ArchiveType::DynamicIndex {
let index = DynamicIndexReader::new(file)?;
self.index_mark_used_chunks(index, &path, status, worker)?;
let index = DynamicIndexReader::new(file).map_err(|e| {
format_err!("can't read index '{}' - {}", path.to_string_lossy(), e)
})?;
self.index_mark_used_chunks(index, &img, status, worker)?;
}
}
}
Err(err) => {
if err.kind() == std::io::ErrorKind::NotFound {
// simply ignore vanished files
} else {
return Err(err.into());
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
Err(err) => bail!("can't open index {} - {}", path.to_string_lossy(), err),
}
done += 1;
@ -559,7 +567,11 @@ impl DataStore {
);
}
if gc_status.removed_bad > 0 {
crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
}
if gc_status.still_bad > 0 {
crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
}
crate::task_log!(
@ -580,6 +592,14 @@ impl DataStore {
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
let deduplication_factor = if gc_status.disk_bytes > 0 {
(gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64)
} else {
1.0
};
crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
if gc_status.disk_chunks > 0 {
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));

View File

@ -95,6 +95,18 @@ impl DynamicIndexReader {
let header_size = std::mem::size_of::<DynamicIndexHeader>();
let rawfd = file.as_raw_fd();
let stat = match nix::sys::stat::fstat(rawfd) {
Ok(stat) => stat,
Err(err) => bail!("fstat failed - {}", err),
};
let size = stat.st_size as usize;
if size < header_size {
bail!("index too small ({})", stat.st_size);
}
let header: Box<DynamicIndexHeader> = unsafe { file.read_host_value_boxed()? };
if header.magic != super::DYNAMIC_SIZED_CHUNK_INDEX_1_0 {
@ -103,13 +115,7 @@ impl DynamicIndexReader {
let ctime = proxmox::tools::time::epoch_i64();
let rawfd = file.as_raw_fd();
let stat = nix::sys::stat::fstat(rawfd)?;
let size = stat.st_size as usize;
let index_size = size - header_size;
let index_size = stat.st_size as usize - header_size;
let index_count = index_size / 40;
if index_count * 40 != index_size {
bail!("got unexpected file size");
@ -213,7 +219,6 @@ impl IndexFile for DynamicIndexReader {
(csum, chunk_end)
}
#[allow(clippy::cast_ptr_alignment)]
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
if pos >= self.index.len() {
return None;

View File

@ -68,6 +68,19 @@ impl FixedIndexReader {
file.seek(SeekFrom::Start(0))?;
let header_size = std::mem::size_of::<FixedIndexHeader>();
let rawfd = file.as_raw_fd();
let stat = match nix::sys::stat::fstat(rawfd) {
Ok(stat) => stat,
Err(err) => bail!("fstat failed - {}", err),
};
let size = stat.st_size as usize;
if size < header_size {
bail!("index too small ({})", stat.st_size);
}
let header: Box<FixedIndexHeader> = unsafe { file.read_host_value_boxed()? };
if header.magic != super::FIXED_SIZED_CHUNK_INDEX_1_0 {
@ -81,12 +94,6 @@ impl FixedIndexReader {
let index_length = ((size + chunk_size - 1) / chunk_size) as usize;
let index_size = index_length * 32;
let rawfd = file.as_raw_fd();
let stat = match nix::sys::stat::fstat(rawfd) {
Ok(stat) => stat,
Err(err) => bail!("fstat failed - {}", err),
};
let expected_index_size = (stat.st_size as usize) - header_size;
if index_size != expected_index_size {

View File

@ -2,9 +2,34 @@ use anyhow::{bail, format_err, Context, Error};
use serde::{Deserialize, Serialize};
use crate::backup::{CryptConfig, Fingerprint};
use proxmox::api::api;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox::try_block;
#[api(default: "scrypt")]
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
/// Key derivation function for password protected encryption keys.
pub enum Kdf {
/// Do not encrypt the key.
None,
/// Encrypt they key with a password using SCrypt.
Scrypt,
/// Encrtypt the Key with a password using PBKDF2
PBKDF2,
}
impl Default for Kdf {
#[inline]
fn default() -> Self {
Kdf::Scrypt
}
}
#[derive(Deserialize, Serialize, Debug)]
pub enum KeyDerivationConfig {
Scrypt {
@ -66,6 +91,9 @@ pub struct KeyConfig {
pub modified: i64,
#[serde(with = "proxmox::tools::serde::bytes_as_base64")]
pub data: Vec<u8>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
pub fingerprint: Option<Fingerprint>,
}
pub fn store_key_config(
@ -103,15 +131,25 @@ pub fn store_key_config(
pub fn encrypt_key_with_passphrase(
raw_key: &[u8],
passphrase: &[u8],
kdf: Kdf,
) -> Result<KeyConfig, Error> {
let salt = proxmox::sys::linux::random_data(32)?;
let kdf = KeyDerivationConfig::Scrypt {
n: 65536,
r: 8,
p: 1,
salt,
let kdf = match kdf {
Kdf::Scrypt => KeyDerivationConfig::Scrypt {
n: 65536,
r: 8,
p: 1,
salt,
},
Kdf::PBKDF2 => KeyDerivationConfig::PBKDF2 {
iter: 65535,
salt,
},
Kdf::None => {
bail!("No key derivation function specified");
}
};
let derived_key = kdf.derive_key(passphrase)?;
@ -142,28 +180,22 @@ pub fn encrypt_key_with_passphrase(
created,
modified: created,
data: enc_data,
fingerprint: None,
})
}
pub fn load_and_decrypt_key(
path: &std::path::Path,
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], i64), Error> {
do_load_and_decrypt_key(path, passphrase)
.with_context(|| format!("failed to load decryption key from {:?}", path))
}
fn do_load_and_decrypt_key(
path: &std::path::Path,
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], i64), Error> {
) -> Result<([u8;32], i64, Fingerprint), Error> {
decrypt_key(&file_get_contents(&path)?, passphrase)
.with_context(|| format!("failed to load decryption key from {:?}", path))
}
pub fn decrypt_key(
mut keydata: &[u8],
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], i64), Error> {
) -> Result<([u8;32], i64, Fingerprint), Error> {
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
let raw_data = key_config.data;
@ -203,5 +235,13 @@ pub fn decrypt_key(
let mut result = [0u8; 32];
result.copy_from_slice(&key);
Ok((result, created))
let fingerprint = match key_config.fingerprint {
Some(fingerprint) => fingerprint,
None => {
let crypt_config = CryptConfig::new(result.clone())?;
crypt_config.fingerprint()
},
};
Ok((result, created, fingerprint))
}

View File

@ -5,7 +5,7 @@ use std::path::Path;
use serde_json::{json, Value};
use ::serde::{Deserialize, Serialize};
use crate::backup::{BackupDir, CryptMode, CryptConfig};
use crate::backup::{BackupDir, CryptMode, CryptConfig, Fingerprint};
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
@ -223,12 +223,48 @@ impl BackupManifest {
if let Some(crypt_config) = crypt_config {
let sig = self.signature(crypt_config)?;
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
let fingerprint = &crypt_config.fingerprint();
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
}
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
Ok(manifest)
}
pub fn fingerprint(&self) -> Result<Option<Fingerprint>, Error> {
match &self.unprotected["key-fingerprint"] {
Value::Null => Ok(None),
value => Ok(Some(serde_json::from_value(value.clone())?))
}
}
/// Checks if a BackupManifest and a CryptConfig share a valid fingerprint combination.
///
/// An unsigned manifest is valid with any or no CryptConfig.
/// A signed manifest is only valid with a matching CryptConfig.
pub fn check_fingerprint(&self, crypt_config: Option<&CryptConfig>) -> Result<(), Error> {
if let Some(fingerprint) = self.fingerprint()? {
match crypt_config {
None => bail!(
"missing key - manifest was created with key {}",
fingerprint,
),
Some(crypt_config) => {
let config_fp = crypt_config.fingerprint();
if config_fp != fingerprint {
bail!(
"wrong key - manifest's key {} does not match provided key {}",
fingerprint,
config_fp
);
}
}
}
};
Ok(())
}
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
let json: Value = serde_json::from_slice(data)?;
@ -237,6 +273,19 @@ impl BackupManifest {
if let Some(ref crypt_config) = crypt_config {
if let Some(signature) = signature {
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
let fingerprint = &json["unprotected"]["key-fingerprint"];
if fingerprint != &Value::Null {
let fingerprint = serde_json::from_value(fingerprint.clone())?;
let config_fp = crypt_config.fingerprint();
if config_fp != fingerprint {
bail!(
"wrong key - unable to verify signature since manifest's key {} does not match provided key {}",
fingerprint,
config_fp
);
}
}
if signature != expected_signature {
bail!("wrong signature in manifest");
}

View File

@ -14,6 +14,7 @@ use crate::{
BackupGroup,
BackupDir,
BackupInfo,
BackupManifest,
IndexFile,
CryptMode,
FileInfo,
@ -284,6 +285,7 @@ pub fn verify_backup_dir(
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<dyn TaskState + Send + Sync>,
upid: UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<bool, Error> {
let snap_lock = lock_dir_noblock_shared(
&datastore.snapshot_path(&backup_dir),
@ -297,6 +299,7 @@ pub fn verify_backup_dir(
corrupt_chunks,
worker,
upid,
filter,
snap_lock
),
Err(err) => {
@ -320,6 +323,7 @@ pub fn verify_backup_dir_with_lock(
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<dyn TaskState + Send + Sync>,
upid: UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
_snap_lock: Dir,
) -> Result<bool, Error> {
let manifest = match datastore.load_manifest(&backup_dir) {
@ -336,6 +340,18 @@ pub fn verify_backup_dir_with_lock(
}
};
if let Some(filter) = filter {
if filter(&manifest) == false {
task_log!(
worker,
"SKIPPED: verify {}:{} (recently verified)",
datastore.name(),
backup_dir,
);
return Ok(true);
}
}
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
let mut error_count = 0;
@ -412,7 +428,7 @@ pub fn verify_backup_group(
progress: Option<(usize, usize)>, // (done, snapshot_count)
worker: Arc<dyn TaskState + Send + Sync>,
upid: &UPID,
filter: &dyn Fn(&BackupInfo) -> bool,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<(usize, Vec<String>), Error> {
let mut errors = Vec::new();
@ -439,16 +455,6 @@ pub fn verify_backup_group(
for info in list {
count += 1;
if filter(&info) == false {
task_log!(
worker,
"SKIPPED: verify {}:{} (recently verified)",
datastore.name(),
info.backup_dir,
);
continue;
}
if !verify_backup_dir(
datastore.clone(),
&info.backup_dir,
@ -456,6 +462,7 @@ pub fn verify_backup_group(
corrupt_chunks.clone(),
worker.clone(),
upid.clone(),
filter,
)? {
errors.push(info.backup_dir.to_string());
}
@ -475,7 +482,7 @@ pub fn verify_backup_group(
Ok((count, errors))
}
/// Verify all backups inside a datastore
/// Verify all (owned) backups inside a datastore
///
/// Errors are logged to the worker log.
///
@ -486,20 +493,56 @@ pub fn verify_all_backups(
datastore: Arc<DataStore>,
worker: Arc<dyn TaskState + Send + Sync>,
upid: &UPID,
filter: &dyn Fn(&BackupInfo) -> bool,
owner: Option<Authid>,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<Vec<String>, Error> {
let mut errors = Vec::new();
task_log!(worker, "verify datastore {}", datastore.name());
if let Some(owner) = &owner {
task_log!(worker, "limiting to backups owned by {}", owner);
}
let filter_by_owner = |group: &BackupGroup| {
match (datastore.get_owner(group), &owner) {
(Ok(ref group_owner), Some(owner)) => {
group_owner == owner
|| (group_owner.is_token()
&& !owner.is_token()
&& group_owner.user() == owner.user())
},
(Ok(_), None) => true,
(Err(err), Some(_)) => {
// intentionally not in task log
// the task user might not be allowed to see this group!
println!("Failed to get owner of group '{}' - {}", group, err);
false
},
(Err(err), None) => {
// we don't filter by owner, but we want to log the error
task_log!(
worker,
"Failed to get owner of group '{} - {}",
group,
err,
);
errors.push(group.to_string());
true
},
}
};
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
Ok(list) => list
.into_iter()
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
.filter(filter_by_owner)
.collect::<Vec<BackupGroup>>(),
Err(err) => {
task_log!(
worker,
"verify datastore {} - unable to list backups: {}",
datastore.name(),
"unable to list backups: {}",
err,
);
return Ok(errors);
@ -519,7 +562,7 @@ pub fn verify_all_backups(
// start with 64 chunks since we assume there are few corrupt ones
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
task_log!(worker, "verify datastore {} ({} snapshots)", datastore.name(), snapshot_count);
task_log!(worker, "found {} snapshots", snapshot_count);
let mut done = 0;
for group in list {

View File

@ -52,7 +52,9 @@ async fn run() -> Result<(), Error> {
let mut config = server::ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
let rest_server = RestServer::new(config);
@ -74,12 +76,15 @@ async fn run() -> Result<(), Error> {
})
)
},
"proxmox-backup.service",
);
server::write_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
let init_result: Result<(), Error> = try_block!({
server::create_task_control_socket()?;
server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
server::server_state_init()?;
Ok(())
});

View File

@ -32,11 +32,11 @@ use proxmox::{
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use proxmox_backup::tools;
use proxmox_backup::api2::access::user::UserWithTokens;
use proxmox_backup::api2::types::*;
use proxmox_backup::api2::version;
use proxmox_backup::client::*;
use proxmox_backup::pxar::catalog::*;
use proxmox_backup::config::user::complete_user_name;
use proxmox_backup::backup::{
archive_type,
decrypt_key,
@ -53,7 +53,6 @@ use proxmox_backup::backup::{
ChunkStream,
CryptConfig,
CryptMode,
DataBlob,
DynamicIndexReader,
FixedChunkStream,
FixedIndexReader,
@ -193,8 +192,12 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
result
}
fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error> {
fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
connect_do(repo.host(), repo.port(), repo.auth_id())
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
}
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
use std::env::VarError::*;
@ -212,7 +215,7 @@ fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error
.fingerprint_cache(true)
.ticket_cache(true);
HttpClient::new(server, port, userid, options)
HttpClient::new(server, port, auth_id, options)
}
async fn view_task_result(
@ -366,7 +369,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
@ -425,7 +428,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
description: "Backup group.",
},
"new-owner": {
type: Userid,
type: Authid,
},
}
}
@ -435,7 +438,7 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
let repo = extract_repository_from_value(&param)?;
let mut client = connect(repo.host(), repo.port(), repo.user())?;
let mut client = connect(&repo)?;
param.as_object_mut().unwrap().remove("repository");
@ -452,112 +455,6 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
Ok(())
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
group: {
type: String,
description: "Backup group.",
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List backup snapshots.
async fn list_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let output_format = get_output_format(&param);
let client = connect(repo.host(), repo.port(), repo.user())?;
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
Some(path.parse()?)
} else {
None
};
let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
record_repository(&repo);
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
let mut filenames = Vec::new();
for file in &item.files {
filenames.push(file.filename.to_string());
}
Ok(tools::format::render_backup_file_list(&filenames[..]))
};
let options = default_table_format_options()
.sortby("backup-type", false)
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
.column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files))
;
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Snapshot path.",
},
}
}
)]
/// Forget (remove) backup snapshots.
async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let path = tools::required_string_param(&param, "snapshot")?;
let snapshot: BackupDir = path.parse()?;
let mut client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
Ok(result)
}
#[api(
input: {
properties: {
@ -573,7 +470,7 @@ async fn api_login(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
client.login().await?;
record_repository(&repo);
@ -630,7 +527,7 @@ async fn api_version(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(&param);
if let Ok(repo) = repo {
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
match client.get("api2/json/version", None).await {
Ok(mut result) => version_info["server"] = result["data"].take(),
@ -651,58 +548,6 @@ async fn api_version(param: Value) -> Result<(), Error> {
Ok(())
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Snapshot path.",
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List snapshot files.
async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let path = tools::required_string_param(&param, "snapshot")?;
let snapshot: BackupDir = path.parse()?;
let output_format = get_output_format(&param);
let client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
let mut result = client.get(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
let mut data: Value = result["data"].take();
let options = default_table_format_options();
format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
@ -724,7 +569,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let mut client = connect(repo.host(), repo.port(), repo.user())?;
let mut client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
@ -798,7 +643,10 @@ fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Err
let keydata = match (keyfile, key_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(file_get_contents(keyfile)?),
(Some(keyfile), None) => {
eprintln!("Using encryption key file: {}", keyfile);
Some(file_get_contents(keyfile)?)
},
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
@ -806,6 +654,7 @@ fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Err
.map_err(|err| {
format_err!("error reading encryption key from fd {}: {}", fd, err)
})?;
eprintln!("Using encryption key from file descriptor");
Some(data)
}
};
@ -813,7 +662,10 @@ fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Err
Ok(match (keydata, crypt_mode) {
// no parameters:
(None, None) => match key::read_optional_default_encryption_key()? {
Some(key) => (Some(key), CryptMode::Encrypt),
Some(key) => {
eprintln!("Encrypting with default encryption key!");
(Some(key), CryptMode::Encrypt)
},
None => (None, CryptMode::None),
},
@ -823,7 +675,10 @@ fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Err
// just --crypt-mode other than none
(None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
Some(key) => (Some(key), crypt_mode),
Some(key) => {
eprintln!("Encrypting with default encryption key!");
(Some(key), crypt_mode)
},
}
// just --keyfile
@ -861,6 +716,11 @@ fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Err
description: "Path to file.",
}
},
"all-file-systems": {
type: Boolean,
description: "Include all mounted subdirectories.",
optional: true,
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
@ -1036,7 +896,7 @@ async fn create_backup(
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
record_repository(&repo);
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
@ -1050,7 +910,8 @@ async fn create_backup(
let (crypt_config, rsa_encrypted_key) = match keydata {
None => (None, None),
Some(key) => {
let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
println!("Encryption key fingerprint: {}", fingerprint);
let crypt_config = CryptConfig::new(key)?;
@ -1059,6 +920,8 @@ async fn create_backup(
let pem_data = file_get_contents(path)?;
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
println!("Master key '{:?}'", path);
(Some(Arc::new(crypt_config)), Some(enc_key))
}
_ => (Some(Arc::new(crypt_config)), None),
@ -1077,8 +940,40 @@ async fn create_backup(
false
).await?;
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
Some(Arc::new(previous_manifest))
let download_previous_manifest = match client.previous_backup_time().await {
Ok(Some(backup_time)) => {
println!(
"Downloading previous manifest ({})",
strftime_local("%c", backup_time)?
);
true
}
Ok(None) => {
println!("No previous manifest available.");
false
}
Err(_) => {
// Fallback for outdated server, TODO remove/bubble up with 2.0
true
}
};
let previous_manifest = if download_previous_manifest {
match client.download_previous_manifest().await {
Ok(previous_manifest) => {
match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
Ok(()) => Some(Arc::new(previous_manifest)),
Err(err) => {
println!("Couldn't re-use previous manifest - {}", err);
None
}
}
}
Err(err) => {
println!("Couldn't download previous manifest - {}", err);
None
}
}
} else {
None
};
@ -1339,7 +1234,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let archive_name = tools::required_string_param(&param, "archive-name")?;
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
record_repository(&repo);
@ -1361,7 +1256,8 @@ async fn restore(param: Value) -> Result<Value, Error> {
let crypt_config = match keydata {
None => None,
Some(key) => {
let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
let (key, _, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
eprintln!("Encryption key fingerprint: '{}'", fingerprint);
Some(Arc::new(CryptConfig::new(key)?))
}
};
@ -1377,6 +1273,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
).await?;
let (manifest, backup_index_data) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let (archive_name, archive_type) = parse_archive_type(archive_name);
@ -1473,81 +1370,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
Ok(Value::Null)
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Group/Snapshot path.",
},
logfile: {
type: String,
description: "The path to the log file you want to upload.",
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
"keyfd": {
schema: KEYFD_SCHEMA,
optional: true,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
}
}
)]
/// Upload backup log file.
async fn upload_log(param: Value) -> Result<Value, Error> {
let logfile = tools::required_string_param(&param, "logfile")?;
let repo = extract_repository_from_value(&param)?;
let snapshot = tools::required_string_param(&param, "snapshot")?;
let snapshot: BackupDir = snapshot.parse()?;
let mut client = connect(repo.host(), repo.port(), repo.user())?;
let (keydata, crypt_mode) = keyfile_parameters(&param)?;
let crypt_config = match keydata {
None => None,
Some(key) => {
let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
};
let data = file_get_contents(logfile)?;
// fixme: howto sign log?
let blob = match crypt_mode {
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
};
let raw_data = blob.into_inner();
let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
let args = json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
});
let body = hyper::Body::from(raw_data);
client.upload("application/octet-stream", body, &path, Some(args)).await
}
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
&ApiHandler::Async(&prune),
&ObjectSchema::new(
@ -1583,7 +1405,7 @@ fn prune<'a>(
async fn prune_async(mut param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let mut client = connect(repo.host(), repo.port(), repo.user())?;
let mut client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
@ -1657,7 +1479,10 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
optional: true,
},
}
}
},
returns: {
type: StorageStatus,
},
)]
/// Get repository status.
async fn status(param: Value) -> Result<Value, Error> {
@ -1666,7 +1491,7 @@ async fn status(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
@ -1690,7 +1515,7 @@ async fn status(param: Value) -> Result<Value, Error> {
.column(ColumnConfig::new("used").renderer(render_total_percentage))
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
let schema = &API_RETURN_SCHEMA_STATUS;
format_and_print_result_full(&mut data, schema, &output_format, &options);
@ -1711,7 +1536,7 @@ async fn try_get(repo: &BackupRepository, url: &str) -> Value {
.fingerprint_cache(true)
.ticket_cache(true);
let client = match HttpClient::new(repo.host(), repo.port(), repo.user(), options) {
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
Ok(v) => v,
_ => return Value::Null,
};
@ -1901,6 +1726,33 @@ fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<Stri
result
}
fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_backup::tools::runtime::main(async { complete_auth_id_do(param).await })
}
async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
for user in parsed {
result.push(user.userid.to_string());
for token in user.tokens {
result.push(token.tokenid.to_string());
}
}
};
result
}
use proxmox_backup::client::RemoteChunkReader;
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
/// async use!
@ -1955,26 +1807,9 @@ fn main() {
.completion_cb("repository", complete_repository)
.completion_cb("keyfile", tools::complete_file_name);
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
.arg_param(&["snapshot", "logfile"])
.completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("logfile", tools::complete_file_name)
.completion_cb("keyfile", tools::complete_file_name)
.completion_cb("repository", complete_repository);
let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
.completion_cb("repository", complete_repository);
let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
.arg_param(&["group"])
.completion_cb("group", complete_backup_group)
.completion_cb("repository", complete_repository);
let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
.arg_param(&["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot);
let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
.completion_cb("repository", complete_repository);
@ -1985,11 +1820,6 @@ fn main() {
.completion_cb("archive-name", complete_archive_name)
.completion_cb("target", tools::complete_file_name);
let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
.arg_param(&["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot);
let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
.arg_param(&["group"])
.completion_cb("group", complete_backup_group)
@ -2010,21 +1840,18 @@ fn main() {
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
.arg_param(&["group", "new-owner"])
.completion_cb("group", complete_backup_group)
.completion_cb("new-owner", complete_user_name)
.completion_cb("new-owner", complete_auth_id)
.completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()
.insert("backup", backup_cmd_def)
.insert("upload-log", upload_log_cmd_def)
.insert("forget", forget_cmd_def)
.insert("garbage-collect", garbage_collect_cmd_def)
.insert("list", list_cmd_def)
.insert("login", login_cmd_def)
.insert("logout", logout_cmd_def)
.insert("prune", prune_cmd_def)
.insert("restore", restore_cmd_def)
.insert("snapshots", snapshots_cmd_def)
.insert("files", files_cmd_def)
.insert("snapshot", snapshot_mgtm_cli())
.insert("status", status_cmd_def)
.insert("key", key::cli())
.insert("mount", mount_cmd_def())
@ -2034,7 +1861,13 @@ fn main() {
.insert("task", task_mgmt_cli())
.insert("version", version_cmd_def)
.insert("benchmark", benchmark_cmd_def)
.insert("change-owner", change_owner_cmd_def);
.insert("change-owner", change_owner_cmd_def)
.alias(&["files"], &["snapshot", "files"])
.alias(&["forget"], &["snapshot", "forget"])
.alias(&["upload-log"], &["snapshot", "upload-log"])
.alias(&["snapshots"], &["snapshot", "list"])
;
let rpcenv = CliEnvironment::new();
run_cli_command(cmd_def, rpcenv, Some(|future| {

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::io::{self, Write};
use anyhow::{format_err, Error};
use serde_json::{json, Value};
@ -62,10 +63,10 @@ fn connect() -> Result<HttpClient, Error> {
let ticket = Ticket::new("PBS", Userid::root_userid())?
.sign(private_auth_key(), None)?;
options = options.password(Some(ticket));
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
} else {
options = options.ticket_cache(true).interactive(true);
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
};
Ok(client)
@ -244,7 +245,7 @@ async fn task_stop(param: Value) -> Result<Value, Error> {
let mut client = connect()?;
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
let path = format!("api2/json/nodes/localhost/tasks/{}", tools::percent_encode_component(upid_str));
let _ = client.delete(&path, None).await?;
Ok(Value::Null)
@ -354,6 +355,51 @@ async fn verify(
Ok(Value::Null)
}
#[api()]
/// System report
async fn report() -> Result<Value, Error> {
let report = proxmox_backup::server::generate_report();
io::stdout().write_all(report.as_bytes())?;
Ok(Value::Null)
}
#[api(
input: {
properties: {
verbose: {
type: Boolean,
optional: true,
default: false,
description: "Output verbose package information. It is ignored if output-format is specified.",
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
}
}
}
)]
/// List package versions for important Proxmox Backup Server packages.
async fn get_versions(verbose: bool, param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let packages = crate::api2::node::apt::get_versions()?;
let mut packages = json!(if verbose { &packages[..] } else { &packages[1..2] });
let options = default_table_format_options()
.disable_sort()
.noborder(true) // just not helpfull for version info which gets copy pasted often
.column(ColumnConfig::new("Package"))
.column(ColumnConfig::new("Version"))
.column(ColumnConfig::new("ExtraInfo").header("Extra Info"))
;
let schema = &crate::api2::node::apt::API_RETURN_SCHEMA_GET_VERSIONS;
format_and_print_result_full(&mut packages, schema, &output_format, &options);
Ok(Value::Null)
}
fn main() {
proxmox_backup::tools::setup_safe_path_env();
@ -368,6 +414,7 @@ fn main() {
.insert("remote", remote_commands())
.insert("garbage-collection", garbage_collection_commands())
.insert("cert", cert_mgmt_cli())
.insert("subscription", subscription_commands())
.insert("sync-job", sync_job_commands())
.insert("task", task_mgmt_cli())
.insert(
@ -383,12 +430,18 @@ fn main() {
CliCommand::new(&API_METHOD_VERIFY)
.arg_param(&["store"])
.completion_cb("store", config::datastore::complete_datastore_name)
)
.insert("report",
CliCommand::new(&API_METHOD_REPORT)
)
.insert("versions",
CliCommand::new(&API_METHOD_GET_VERSIONS)
);
let mut rpcenv = CliEnvironment::new();
rpcenv.set_user(Some(String::from("root@pam")));
rpcenv.set_auth_id(Some(String::from("root@pam")));
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
}
@ -400,29 +453,13 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
let _ = proxmox::try_block!({
let remote = param.get("remote").ok_or_else(|| format_err!("no remote"))?;
let (remote_config, _digest) = config::remote::config()?;
let remote: config::remote::Remote = remote_config.lookup("remote", &remote)?;
let data = crate::tools::runtime::block_on(async move {
crate::api2::config::remote::scan_remote_datastores(remote.clone()).await
})?;
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let client = HttpClient::new(
&remote.host,
remote.port.unwrap_or(8007),
&remote.userid,
options,
)?;
let result = crate::tools::runtime::block_on(client.get("api2/json/admin/datastore", None))?;
if let Some(data) = result["data"].as_array() {
for item in data {
if let Some(store) = item["store"].as_str() {
list.push(store.to_owned());
}
}
for item in data {
list.push(item.store);
}
Ok(())

View File

@ -1,4 +1,4 @@
use std::sync::{Arc};
use std::sync::Arc;
use std::path::{Path, PathBuf};
use std::os::unix::io::AsRawFd;
@ -13,7 +13,6 @@ use proxmox::api::RpcEnvironmentType;
use proxmox_backup::{
backup::DataStore,
server::{
UPID,
WorkerTask,
ApiConfig,
rest::*,
@ -30,7 +29,7 @@ use proxmox_backup::{
};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
@ -41,6 +40,7 @@ use proxmox_backup::tools::{
DiskManage,
zfs_pool_stats,
},
logrotate::LogRotate,
socket::{
set_tcp_keepalive,
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
@ -49,6 +49,7 @@ use proxmox_backup::tools::{
use proxmox_backup::api2::pull::do_sync_job;
use proxmox_backup::server::do_verification_job;
use proxmox_backup::server::do_prune_job;
fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env();
@ -73,6 +74,10 @@ async fn run() -> Result<(), Error> {
bail!("unable to inititialize syslog - {}", err);
}
// Note: To debug early connection error use
// PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy
let debug = std::env::var("PROXMOX_DEBUG").is_ok();
let _ = public_auth_key(); // load with lazy_static
let _ = csrf_secret(); // load with lazy_static
@ -85,7 +90,6 @@ async fn run() -> Result<(), Error> {
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
@ -93,7 +97,9 @@ async fn run() -> Result<(), Error> {
config.register_template("index", &indexpath)?;
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
let rest_server = RestServer::new(config);
@ -113,25 +119,12 @@ async fn run() -> Result<(), Error> {
let server = daemon::create_daemon(
([0,0,0,0,0,0,0,0], 8007).into(),
|listener, ready| {
let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener)
.map_err(Error::from)
.try_filter_map(move |(sock, _addr)| {
let acceptor = Arc::clone(&acceptor);
async move {
sock.set_nodelay(true).unwrap();
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
Ok(tokio_openssl::accept(&acceptor, sock)
.await
.ok() // handshake errors aren't be fatal, so return None to filter
)
}
});
let connections = proxmox_backup::tools::async_io::HyperAccept(connections);
let connections = accept_connections(listener, acceptor, debug);
let connections = hyper::server::accept::from_stream(connections);
Ok(ready
.and_then(|_| hyper::Server::builder(connections)
.and_then(|_| hyper::Server::builder(connections)
.serve(rest_server)
.with_graceful_shutdown(server::shutdown_future())
.map_err(Error::from)
@ -140,12 +133,15 @@ async fn run() -> Result<(), Error> {
.map(|_| ())
)
},
"proxmox-backup-proxy.service",
);
server::write_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
let init_result: Result<(), Error> = try_block!({
server::create_task_control_socket()?;
server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
server::server_state_init()?;
Ok(())
});
@ -165,6 +161,72 @@ async fn run() -> Result<(), Error> {
Ok(())
}
fn accept_connections(
mut listener: tokio::net::TcpListener,
acceptor: Arc<openssl::ssl::SslAcceptor>,
debug: bool,
) -> tokio::sync::mpsc::Receiver<Result<tokio_openssl::SslStream<tokio::net::TcpStream>, Error>> {
const MAX_PENDING_ACCEPTS: usize = 1024;
let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS);
let accept_counter = Arc::new(());
tokio::spawn(async move {
loop {
match listener.accept().await {
Err(err) => {
eprintln!("error accepting tcp connection: {}", err);
}
Ok((sock, _addr)) => {
sock.set_nodelay(true).unwrap();
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
let acceptor = Arc::clone(&acceptor);
let mut sender = sender.clone();
if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
eprintln!("connection rejected - to many open connections");
continue;
}
let accept_counter = accept_counter.clone();
tokio::spawn(async move {
let accept_future = tokio::time::timeout(
Duration::new(10, 0), tokio_openssl::accept(&acceptor, sock));
let result = accept_future.await;
match result {
Ok(Ok(connection)) => {
if let Err(_) = sender.send(Ok(connection)).await {
if debug {
eprintln!("detect closed connection channel");
}
}
}
Ok(Err(err)) => {
if debug {
eprintln!("https handshake failed - {}", err);
}
}
Err(_) => {
if debug {
eprintln!("https handshake timeout");
}
}
}
drop(accept_counter); // decrease reference count
});
}
}
}
});
receiver
}
fn start_stat_generator() {
let abort_future = server::shutdown_future();
let future = Box::pin(run_stat_generator());
@ -247,8 +309,6 @@ async fn schedule_datastore_garbage_collection() {
},
};
let email = server::lookup_user_email(Userid::root_userid());
let config = match datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
@ -291,22 +351,11 @@ async fn schedule_datastore_garbage_collection() {
let worker_type = "garbage_collection";
let stat = datastore.last_gc_status();
let last = if let Some(upid_str) = stat.upid {
match upid_str.parse::<UPID>() {
Ok(upid) => upid.starttime,
Err(err) => {
eprintln!("unable to parse upid '{}' - {}", upid_str, err);
continue;
}
}
} else {
match jobstate::last_run_time(worker_type, &store) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
let last = match jobstate::last_run_time(worker_type, &store) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
@ -323,44 +372,15 @@ async fn schedule_datastore_garbage_collection() {
if next > now { continue; }
let mut job = match Job::new(worker_type, &store) {
let job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let store2 = store.clone();
let email2 = email.clone();
let auth_id = Authid::root_auth_id();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
Userid::backup_userid().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
let result = datastore.garbage_collection(&*worker, worker.upid());
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
}
if let Some(email2) = email2 {
let gc_status = datastore.last_gc_status();
if let Err(err) = crate::server::send_gc_status(&email2, datastore.name(), &gc_status, &result) {
eprintln!("send gc notification failed: {}", err);
}
}
result
}
) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str), false) {
eprintln!("unable to start garbage collection job on datastore {} - {}", store, err);
}
}
}
@ -370,8 +390,6 @@ async fn schedule_datastore_prune() {
use proxmox_backup::{
backup::{
PruneOptions,
BackupGroup,
compute_prune_info,
},
config::datastore::{
self,
@ -388,13 +406,6 @@ async fn schedule_datastore_prune() {
};
for (store, (_, store_config)) in config.sections {
let datastore = match DataStore::lookup_datastore(&store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore '{}' failed - {}", store, err);
continue;
}
};
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
@ -422,95 +433,18 @@ async fn schedule_datastore_prune() {
continue;
}
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "prune";
if check_schedule(worker_type, &event_str, &store) {
let job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let last = match jobstate::last_run_time(worker_type, &store) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
let auth_id = Authid::root_auth_id().clone();
if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) {
eprintln!("unable to start datastore prune job {} - {}", &store, err);
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = proxmox::tools::time::epoch_i64();
if next > now { continue; }
let mut job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
Userid::backup_userid().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
let result = try_block!({
worker.log(format!("Starting datastore prune on store \"{}\"", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
let base_path = datastore.base_path();
let groups = BackupGroup::list_groups(&base_path)?;
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, group.backup_type(), group.backup_id()));
for (info, keep) in prune_info {
worker.log(format!(
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(), group.backup_id(),
info.backup_dir.backup_time_string()));
if !keep {
datastore.remove_backup_dir(&info.backup_dir, true)?;
}
}
}
Ok(())
});
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
}
result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
}
}
}
@ -543,47 +477,18 @@ async fn schedule_datastore_sync_jobs() {
None => continue,
};
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "syncjob";
if check_schedule(worker_type, &event_str, &job_id) {
let job = match Job::new(worker_type, &job_id) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let last = match jobstate::last_run_time(worker_type, &job_id) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
continue;
let auth_id = Authid::root_auth_id().clone();
if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = proxmox::tools::time::epoch_i64();
if next > now { continue; }
let job = match Job::new(worker_type, &job_id) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let userid = Userid::backup_userid().clone();
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
}
}
@ -613,79 +518,30 @@ async fn schedule_datastore_verify_jobs() {
Some(ref event_str) => event_str.clone(),
None => continue,
};
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "verificationjob";
let last = match jobstate::last_run_time(worker_type, &job_id) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
continue;
let auth_id = Authid::root_auth_id().clone();
if check_schedule(worker_type, &event_str, &job_id) {
let job = match Job::new(&worker_type, &job_id) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = proxmox::tools::time::epoch_i64();
if next > now { continue; }
let job = match Job::new(worker_type, &job_id) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let userid = Userid::backup_userid().clone();
if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
}
}
async fn schedule_task_log_rotate() {
let worker_type = "logrotate";
let job_id = "task_archive";
let last = match jobstate::last_run_time(worker_type, job_id) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of task log archive rotation: {}", err);
return;
}
};
let job_id = "access-log_and_task-archive";
// schedule daily at 00:00 like normal logrotate
let schedule = "00:00";
let event = match parse_calendar_event(schedule) {
Ok(event) => event,
Err(err) => {
// should not happen?
eprintln!("unable to parse schedule '{}' - {}", schedule, err);
return;
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => return,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
return;
}
};
let now = proxmox::tools::time::epoch_i64();
if next > now {
if !check_schedule(worker_type, schedule, job_id) {
// if we never ran the rotation, schedule instantly
match jobstate::JobState::load(worker_type, job_id) {
Ok(state) => match state {
@ -703,17 +559,16 @@ async fn schedule_task_log_rotate() {
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(job_id.to_string()),
Userid::backup_userid().clone(),
None,
Authid::root_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
worker.log(format!("starting task log rotation"));
let result = try_block!({
// rotate task log archive
let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
let max_files = 20; // times twenty files gives at least 100000 task entries
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
let max_files = 20; // times twenty files gives > 100000 task entries
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
if has_rotated {
worker.log(format!("task log archive was rotated"));
@ -721,6 +576,28 @@ async fn schedule_task_log_rotate() {
worker.log(format!("task log archive was not rotated"));
}
let max_size = 32 * 1024 * 1024 - 1;
let max_files = 14;
let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
.ok_or_else(|| format_err!("could not get API access log file names"))?;
if logrotate.rotate(max_size, None, Some(max_files))? {
println!("rotated access log, telling daemons to re-open log file");
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
worker.log(format!("API access log was rotated"));
} else {
worker.log(format!("API access log was not rotated"));
}
let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
if logrotate.rotate(max_size, None, Some(max_files))? {
worker.log(format!("API authentication log was rotated"));
} else {
worker.log(format!("API authentication log was not rotated"));
}
Ok(())
});
@ -738,6 +615,28 @@ async fn schedule_task_log_rotate() {
}
async fn command_reopen_logfiles() -> Result<(), Error> {
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
let sock = server::our_ctrl_sock();
let f1 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
}));
let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
let sock = server::ctrl_sock_from_pid(pid);
let f2 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
}));
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
(Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
(Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
_ => Ok(()),
}
}
async fn run_stat_generator() {
let mut count = 0;
@ -850,6 +749,36 @@ async fn generate_host_stats(save: bool) {
});
}
fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
let event = match parse_calendar_event(event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
return false;
}
};
let last = match jobstate::last_run_time(worker_type, &id) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
return false;
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => return false,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
return false;
}
};
let now = proxmox::tools::time::epoch_i64();
next <= now
}
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
match proxmox_backup::tools::disks::disk_usage(path) {

View File

@ -0,0 +1,73 @@
use anyhow::Error;
use serde_json::{json, Value};
use proxmox::api::{cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::api2;
use proxmox_backup::tools::subscription;
async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
let sleep_duration = core::time::Duration::new(0, 100_000_000);
loop {
if !proxmox_backup::server::worker_is_active_local(&upid) {
break;
}
tokio::time::delay_for(sleep_duration).await;
}
Ok(())
}
/// Daily update
async fn do_update(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let param = json!({});
let method = &api2::node::subscription::API_METHOD_CHECK_SUBSCRIPTION;
let _res = match method.handler {
ApiHandler::Sync(handler) => (handler)(param, method, rpcenv)?,
_ => unreachable!(),
};
let notify = match subscription::read_subscription() {
Ok(Some(subscription)) => subscription.status == subscription::SubscriptionStatus::ACTIVE,
Ok(None) => false,
Err(err) => {
eprintln!("Error reading subscription - {}", err);
false
},
};
let param = json!({
"notify": notify,
});
let method = &api2::node::apt::API_METHOD_APT_UPDATE_DATABASE;
let upid = match method.handler {
ApiHandler::Sync(handler) => (handler)(param, method, rpcenv)?,
_ => unreachable!(),
};
wait_for_local_worker(upid.as_str().unwrap()).await?;
// TODO: certificate checks/renewal/... ?
// TODO: cleanup tasks like in PVE?
Ok(Value::Null)
}
fn main() {
proxmox_backup::tools::setup_safe_path_env();
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam")));
match proxmox_backup::tools::runtime::main(do_update(&mut rpcenv)) {
Err(err) => {
eprintln!("error during update: {}", err);
std::process::exit(1);
},
_ => (),
}
}

View File

@ -151,7 +151,7 @@ pub async fn benchmark(
let crypt_config = match keyfile {
None => None,
Some(path) => {
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
let (key, _, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
@ -225,7 +225,7 @@ async fn test_upload_speed(
let backup_time = proxmox::tools::time::epoch_i64();
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
record_repository(&repo);
if verbose { eprintln!("Connecting to backup server"); }

View File

@ -73,13 +73,13 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let crypt_config = match keydata {
None => None,
Some(key) => {
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
let (key, _created, _fingerprint) = decrypt_key(&key, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
};
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
let client = BackupReader::start(
client,
@ -92,6 +92,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
).await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
@ -153,7 +154,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
/// Shell to interactively inspect and restore snapshots.
async fn catalog_shell(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?;
let client = connect(repo.host(), repo.port(), repo.user())?;
let client = connect(&repo)?;
let path = tools::required_string_param(&param, "snapshot")?;
let archive_name = tools::required_string_param(&param, "archive-name")?;
@ -170,7 +171,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let crypt_config = match keydata {
None => None,
Some(key) => {
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
let (key, _created, _fingerprint) = decrypt_key(&key, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
@ -199,6 +200,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
.open("/tmp")?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let most_used = index.find_most_used_chunks(8);

View File

@ -4,14 +4,28 @@ use std::process::{Stdio, Command};
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox::api::api;
use proxmox::api::cli::{CliCommand, CliCommandMap};
use proxmox::api::cli::{
ColumnConfig,
CliCommand,
CliCommandMap,
format_and_print_result_full,
get_output_format,
OUTPUT_FORMAT,
};
use proxmox::sys::linux::tty;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_backup::backup::{
encrypt_key_with_passphrase, load_and_decrypt_key, store_key_config, KeyConfig,
encrypt_key_with_passphrase,
load_and_decrypt_key,
store_key_config,
CryptConfig,
Kdf,
KeyConfig,
KeyDerivationConfig,
};
use proxmox_backup::tools;
@ -71,27 +85,6 @@ pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
bail!("no password input mechanism available");
}
#[api(
default: "scrypt",
)]
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Key derivation function for password protected encryption keys.
pub enum Kdf {
/// Do not encrypt the key.
None,
/// Encrypt they key with a password using SCrypt.
Scrypt,
}
impl Default for Kdf {
#[inline]
fn default() -> Self {
Kdf::Scrypt
}
}
#[api(
input: {
properties: {
@ -120,7 +113,10 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
let kdf = kdf.unwrap_or_default();
let key = proxmox::sys::linux::random_data(32)?;
let mut key_array = [0u8; 32];
proxmox::sys::linux::fill_with_random_data(&mut key_array)?;
let crypt_config = CryptConfig::new(key_array.clone())?;
let key = key_array.to_vec();
match kdf {
Kdf::None => {
@ -134,10 +130,11 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
created,
modified: created,
data: key,
fingerprint: Some(crypt_config.fingerprint()),
},
)?;
}
Kdf::Scrypt => {
Kdf::Scrypt | Kdf::PBKDF2 => {
// always read passphrase from tty
if !tty::stdin_isatty() {
bail!("unable to read passphrase - no tty");
@ -145,7 +142,8 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
let key_config = encrypt_key_with_passphrase(&key, &password)?;
let mut key_config = encrypt_key_with_passphrase(&key, &password, kdf)?;
key_config.fingerprint = Some(crypt_config.fingerprint());
store_key_config(&path, false, key_config)?;
}
@ -188,7 +186,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
bail!("unable to change passphrase - no tty");
}
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
let (key, created, fingerprint) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
match kdf {
Kdf::None => {
@ -202,14 +200,16 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
created, // keep original value
modified,
data: key.to_vec(),
fingerprint: Some(fingerprint),
},
)?;
}
Kdf::Scrypt => {
Kdf::Scrypt | Kdf::PBKDF2 => {
let password = tty::read_and_verify_password("New Password: ")?;
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
let mut new_key_config = encrypt_key_with_passphrase(&key, &password, kdf)?;
new_key_config.created = created; // keep original value
new_key_config.fingerprint = Some(fingerprint);
store_key_config(&path, true, new_key_config)?;
}
@ -218,6 +218,91 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
Ok(())
}
#[api(
properties: {
kdf: {
type: Kdf,
},
},
)]
#[derive(Deserialize, Serialize)]
/// Encryption Key Information
struct KeyInfo {
/// Path to key
path: String,
kdf: Kdf,
/// Key creation time
pub created: i64,
/// Key modification time
pub modified: i64,
/// Key fingerprint
pub fingerprint: Option<String>,
}
#[api(
input: {
properties: {
path: {
description: "Key file. Without this the default key's metadata will be shown.",
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
},
},
)]
/// Print the encryption key's metadata.
fn show_key(
path: Option<String>,
param: Value,
) -> Result<(), Error> {
let path = match path {
Some(path) => PathBuf::from(path),
None => {
let path = find_default_encryption_key()?
.ok_or_else(|| {
format_err!("no encryption file provided and no default file found")
})?;
path
}
};
let config: KeyConfig = serde_json::from_slice(&file_get_contents(path.clone())?)?;
let output_format = get_output_format(&param);
let info = KeyInfo {
path: format!("{:?}", path),
kdf: match config.kdf {
Some(KeyDerivationConfig::PBKDF2 { .. }) => Kdf::PBKDF2,
Some(KeyDerivationConfig::Scrypt { .. }) => Kdf::Scrypt,
None => Kdf::None,
},
created: config.created,
modified: config.modified,
fingerprint: match config.fingerprint {
Some(ref fp) => Some(format!("{}", fp)),
None => None,
},
};
let options = proxmox::api::cli::default_table_format_options()
.column(ColumnConfig::new("path"))
.column(ColumnConfig::new("kdf"))
.column(ColumnConfig::new("created").renderer(tools::format::render_epoch))
.column(ColumnConfig::new("modified").renderer(tools::format::render_epoch))
.column(ColumnConfig::new("fingerprint"));
let schema = &KeyInfo::API_SCHEMA;
format_and_print_result_full(&mut serde_json::to_value(info)?, schema, &output_format, &options);
Ok(())
}
#[api(
input: {
properties: {
@ -313,13 +398,47 @@ fn paper_key(
};
let data = file_get_contents(&path)?;
let data = std::str::from_utf8(&data)?;
let data = String::from_utf8(data)?;
let (data, is_private_key) = if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
let lines: Vec<String> = data
.lines()
.map(|s| s.trim_end())
.filter(|s| !s.is_empty())
.map(String::from)
.collect();
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
bail!("unexpected key format");
}
if lines.len() < 20 {
bail!("unexpected key format");
}
(lines, true)
} else {
match serde_json::from_str::<KeyConfig>(&data) {
Ok(key_config) => {
let lines = serde_json::to_string_pretty(&key_config)?
.lines()
.map(String::from)
.collect();
(lines, false)
},
Err(err) => {
eprintln!("Couldn't parse '{:?}' as KeyConfig - {}", path, err);
bail!("Neither a PEM-formatted private key, nor a PBS key file.");
},
}
};
let format = output_format.unwrap_or(PaperkeyFormat::Html);
match format {
PaperkeyFormat::Html => paperkey_html(data, subject),
PaperkeyFormat::Text => paperkey_text(data, subject),
PaperkeyFormat::Html => paperkey_html(&data, subject, is_private_key),
PaperkeyFormat::Text => paperkey_text(&data, subject, is_private_key),
}
}
@ -337,6 +456,10 @@ pub fn cli() -> CliCommandMap {
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
let key_show_cmd_def = CliCommand::new(&API_METHOD_SHOW_KEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
@ -346,10 +469,11 @@ pub fn cli() -> CliCommandMap {
.insert("create-master-key", key_create_master_key_cmd_def)
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
.insert("change-passphrase", key_change_passphrase_cmd_def)
.insert("show", key_show_cmd_def)
.insert("paperkey", paper_key_cmd_def)
}
fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
fn paperkey_html(lines: &[String], subject: Option<String>, is_private: bool) -> Result<(), Error> {
let img_size_pt = 500;
@ -378,21 +502,7 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
println!("<p>Subject: {}</p>", subject);
}
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
let lines: Vec<String> = data.lines()
.map(|s| s.trim_end())
.filter(|s| !s.is_empty())
.map(String::from)
.collect();
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
bail!("unexpected key format");
}
if lines.len() < 20 {
bail!("unexpected key format");
}
if is_private {
const BLOCK_SIZE: usize = 20;
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
@ -413,8 +523,7 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
println!("</p>");
let data = data.join("\n");
let qr_code = generate_qr_code("svg", data.as_bytes())?;
let qr_code = generate_qr_code("svg", data)?;
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
println!("<center>");
@ -430,16 +539,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
return Ok(());
}
let key_config: KeyConfig = serde_json::from_str(&data)?;
let key_text = serde_json::to_string_pretty(&key_config)?;
println!("<div style=\"page-break-inside: avoid\">");
println!("<p>");
println!("-----BEGIN PROXMOX BACKUP KEY-----");
for line in key_text.lines() {
for line in lines {
println!("{}", line);
}
@ -447,7 +553,7 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
println!("</p>");
let qr_code = generate_qr_code("svg", key_text.as_bytes())?;
let qr_code = generate_qr_code("svg", lines)?;
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
println!("<center>");
@ -464,27 +570,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
Ok(())
}
fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
fn paperkey_text(lines: &[String], subject: Option<String>, is_private: bool) -> Result<(), Error> {
if let Some(subject) = subject {
println!("Subject: {}\n", subject);
}
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
let lines: Vec<String> = data.lines()
.map(|s| s.trim_end())
.filter(|s| !s.is_empty())
.map(String::from)
.collect();
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
bail!("unexpected key format");
}
if lines.len() < 20 {
bail!("unexpected key format");
}
if is_private {
const BLOCK_SIZE: usize = 5;
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
@ -499,8 +591,7 @@ fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
for l in start..end {
println!("{:-2}: {}", l, lines[l]);
}
let data = data.join("\n");
let qr_code = generate_qr_code("utf8i", data.as_bytes())?;
let qr_code = generate_qr_code("utf8i", data)?;
let qr_code = String::from_utf8(qr_code)
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
println!("{}", qr_code);
@ -510,14 +601,13 @@ fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
return Ok(());
}
let key_config: KeyConfig = serde_json::from_str(&data)?;
let key_text = serde_json::to_string_pretty(&key_config)?;
println!("-----BEGIN PROXMOX BACKUP KEY-----");
println!("{}", key_text);
for line in lines {
println!("{}", line);
}
println!("-----END PROXMOX BACKUP KEY-----");
let qr_code = generate_qr_code("utf8i", key_text.as_bytes())?;
let qr_code = generate_qr_code("utf8i", &lines)?;
let qr_code = String::from_utf8(qr_code)
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
@ -526,8 +616,7 @@ fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
Ok(())
}
fn generate_qr_code(output_type: &str, data: &[u8]) -> Result<Vec<u8>, Error> {
fn generate_qr_code(output_type: &str, lines: &[String]) -> Result<Vec<u8>, Error> {
let mut child = Command::new("qrencode")
.args(&["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
.stdin(Stdio::piped())
@ -537,7 +626,8 @@ fn generate_qr_code(output_type: &str, data: &[u8]) -> Result<Vec<u8>, Error> {
{
let stdin = child.stdin.as_mut()
.ok_or_else(|| format_err!("Failed to open stdin"))?;
stdin.write_all(data)
let data = lines.join("\n");
stdin.write_all(data.as_bytes())
.map_err(|_| format_err!("Failed to write to stdin"))?;
}

View File

@ -8,6 +8,8 @@ mod task;
pub use task::*;
mod catalog;
pub use catalog::*;
mod snapshot;
pub use snapshot::*;
pub mod key;

Some files were not shown because too many files have changed in this diff Show More