Compare commits

...

963 Commits

Author SHA1 Message Date
64394b0de8 bump version to 1.0.7-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-03 10:36:18 +01:00
2f617a4548 docs: tfa: add screenshots
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-03 10:36:18 +01:00
2ba64bed18 ui: tfa: fix emptyText for password
One needs to enter their password, not the one from the user one
adds/deletes TFA.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-03 10:36:18 +01:00
cafccb5991 d/control: update
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-03 10:36:18 +01:00
b22e8c3632 tape: add media pool regression tests 2021-02-03 10:23:04 +01:00
7929292618 tape: add regresion test for media state 2021-02-03 09:34:31 +01:00
0d4e4cae7f tape: improve pmt command line completion 2021-02-03 08:54:12 +01:00
f4ba2e3155 depend on proxmox 0.10.1 2021-02-03 08:53:34 +01:00
7101ed6e27 ui: tape: add TapeInventory panel
since we do not show the tapes anymore in the BackupOverview, add
another panel where we can list the available tapes in the inventory

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:47:35 +01:00
85ac35aa9a ui: tape: add Restore Window
in the BackupOverview, when a media-set is selected

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:47:21 +01:00
40590561fe ui: tape: TapeBackupWindow: add missing DriveSelector
and make it a bit wider

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:47:05 +01:00
631e550920 ui: tape: rework BackupOverview
instead of grouping by tape (which is rarely interesting),
group by pool -> group -> id -> mediaset

this way a user looking for a backup of specific vm can do just that

we may want to have an additional view here were we list all snapshots
included in the selected media-set ?

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:46:43 +01:00
f806c0effa ui: refactor get_type_icon_cls
we need this later again

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:46:15 +01:00
50a4797fb1 api2/types/tape/media: add media_set_ctime to MediaContentEntry
to be able to better sort in the ui

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-02 14:45:54 +01:00
cc2a0b12f8 test: define tape tests as submodule 2021-02-02 14:38:15 +01:00
988e8de122 tape: set correct ownership on lock file 2021-02-02 14:18:57 +01:00
2f8809c6bc test: src/tape/inventory.rs - avoid chown when running tests 2021-02-02 13:43:16 +01:00
92b7775fa1 fix debian/control 2021-02-02 12:33:00 +01:00
f4d231e70a test: add regression tests for tape inventory 2021-02-02 12:19:28 +01:00
b419050aa7 bump pxar to 0.8
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-02-02 11:02:08 +01:00
8937c65951 tape: add pmt stoptions/stsethoptions/stclearoptions 2021-02-02 08:58:02 +01:00
6c6ad82d90 tape: add pmt setblk 2021-02-02 07:19:54 +01:00
d0f11b66f7 thape: add read_tapedev_options, display driver options with status command 2021-02-02 06:40:40 +01:00
f9fcac51a5 docs: add initial TFA documentation
better than nothing..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-01 19:46:24 +01:00
ca953d831f cleanup: remove MT_ST_ prefix from SetDrvBufferOptions 2021-02-01 17:54:53 +01:00
01c023d50f paperkey: rustfmt
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-02-01 17:05:40 +01:00
c2113a405e paperkey: simplify block generation
the chunk-iterator already does exactly what we want here..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-02-01 17:05:32 +01:00
5dae81d199 paperkey: allow RSA keys without passphrase
some users might want to store the plain version of their master key for
long-term storage and rely on physical security instead of a passphrase
to protect the paper key.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-02-01 17:05:22 +01:00
bd768c3320 ui: tfa: adapt low recovery key hint, drop unused other hint
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-01 15:39:56 +01:00
572fc035a2 ui: webauthn: add notes/warnings for better UX
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-01 15:37:47 +01:00
99b2f045af ui: tfa: add auto-fill button for webAuthn setup
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-01 15:37:47 +01:00
6248e51797 change half-ticket time range from -120..240 to -60..600
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-02-01 15:13:11 +01:00
19e4a36c70 tape: do not use drive.open() within pmt
Do not fail if no media is loaded. Inportant for load command.
2021-02-01 12:39:50 +01:00
90769e5694 tape: add pmt lock/unlock 2021-02-01 12:18:55 +01:00
b8cbe5d65b tape: fix tape alert flag decoding 2021-02-01 12:18:55 +01:00
35c95ca653 bump apt-pkg-native dependency
our patches got applied upstream, and a release was cut, so we no longer
need to depend on a manually patched version here.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-02-01 11:53:25 +01:00
2dbc1a9a55 ui: tfa: improve button text for webAuthn
So users now what to press for starting off a webauthn challenge.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-01 11:48:43 +01:00
dceecb0bbf debcargo: fix maintainer directive"
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-02-01 11:21:21 +01:00
d690d14568 tape: add pmt bsr/fsr 2021-02-01 10:39:04 +01:00
85ef624440 tape: add pmt asf 2021-02-01 10:32:21 +01:00
e995996290 tape: pmt - fix count parameter schema 2021-02-01 10:21:25 +01:00
8e6ad4301d tape: add pmt fsfm/bsfm, pass count as arg_param 2021-02-01 10:18:18 +01:00
86740dfc89 tape: ui - remove drive from pool config 2021-02-01 10:01:06 +01:00
1399c592d1 garbage_collection: only ignore 'missing chunk' errors
with the fix for #2909 (improving handling missing chunks), we
changed from bailing to warning during a garbage collection when
updating the atime of a chunk.

but, updating the atime can not only fail when the chunk is missing,
but also on other occasions, e.g. no permissions or more importantly,
no space left on the device. in that case, the atime of a valid and used
chunk cannot be updated, and the second sweep of the gc will remove that chunk.
[0] is a real world example of that happening.

instead, only warn on really missin chunks, and bail on all other
errors.

0: https://forum.proxmox.com/threads/pbs-server-full-two-days-later-almost-empty.83274/

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-01 09:18:59 +01:00
9883b54cba tape: remove drive from pool config 2021-02-01 09:14:28 +01:00
83b8949a98 tape: add pmt weof 2021-01-31 17:33:07 +01:00
28f60e5291 cleanup: avoid compiler warnings 2021-01-31 17:02:55 +01:00
1f31d06f48 tape: add pmt bsf 2021-01-31 17:00:15 +01:00
2f2e83c890 tape: add pmt fsf 2021-01-31 16:54:16 +01:00
b22c618734 tape: add pmt erase 2021-01-31 16:34:10 +01:00
1e041082bb tape: add pmt command line tool
Experimental, not installed by now.
2021-01-31 16:19:53 +01:00
a57ce270ac postinst: add user backup to group tape
So that it is possible to access tape and changer devcies.
2021-01-30 11:48:49 +01:00
b5b99a52cd tape: API type cleanup, use serde flatten to derive types 2021-01-30 09:36:54 +01:00
9586ce2f46 tape: move scan_drives API code to correct file 2021-01-30 08:03:17 +01:00
b8d526f18d ui: tape/ChangerStatus - use POST for barcode-label-media 2021-01-29 17:06:53 +01:00
d2edc68ead ui: tape/ChangerStatus: add missing tooltips
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-29 16:54:37 +01:00
4d651378e2 ui: tape: change wrong window title
this is the 'status' msgbox not the label information

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-29 16:54:19 +01:00
58791864d7 ui: tape/ChangerStatus: add import action for import/export slots
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-29 16:54:03 +01:00
1a41e9af4f ui: tape: add Changer config grid
analogous to the drive grid

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-29 16:53:33 +01:00
c297835b01 tape: proxmox-tape - use API instead of direct functions calls 2021-01-29 11:49:11 +01:00
e68269fcaf tape: proxmox-tape inventory: call API 2021-01-29 11:21:57 +01:00
5243df4712 tape: proxmox-tape - use API instead of direct functions calls 2021-01-29 10:50:11 +01:00
4470eba551 cleanup: factor out common client code to view task log/result 2021-01-29 10:10:04 +01:00
1f2c4713ef tape: improve backup task abort behaviour 2021-01-29 09:23:39 +01:00
a6c16894ff worker_task: log something when we receive an abort request 2021-01-29 09:22:37 +01:00
271764deb9 tape: make it possible to abort tape backup tasks (check_abort)
Also use task_log makro instead of worker.log.
2021-01-29 09:07:55 +01:00
52f7a73009 display_task_log: make it possible to abort tasks with CTRL-C 2021-01-29 09:06:15 +01:00
bdb6e6b83f api2/reader: asyncify the reader worker task
this way, the code is much more readable

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-29 06:59:25 +01:00
41dacd5d3d tape: use worker task for eject-media api 2021-01-28 16:49:08 +01:00
eb1dfb02b5 tape: proxmox-tape - use api for erase-media and rewind 2021-01-28 16:36:10 +01:00
1a0eb86344 tape: gui: s/encryption/encrypt/ in media pool config panel 2021-01-28 15:50:01 +01:00
bdb62b20a3 tape: media_pool config api - set protected flags where required 2021-01-28 15:42:32 +01:00
f2ca03d7d0 cleanup: avoid compiler warning 2021-01-28 15:32:21 +01:00
00ac86c31b tape/drive/linux_tape: fix and refactor usage of sg-tape-cmd
when executing this code as non-root, we use sg-tape-cmd (a setuid binary)
to execute various ioctls on the tape device

we give the command the open tape device fd as stdin, but did not
dup it, so the std::process:Stdio handle closed it on drop,
which let subsequent operation on that file fail (since it was closed)

fix it by dup'ing it before giving it to the command, and also refactor
the calling code, so that we do not forget to do this

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:24:32 +01:00
627d000098 tape: change changer-drive-id to changer-drivenum
because it changed in the config

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:11:22 +01:00
4be4736603 tape/changer: refactor marking of import/export slots from config
we did this for 'mtx', but missed it for the sg_pt_changer code
refactor it into the MtxStatus strut, and call it from both
code paths

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:10:55 +01:00
2da7aca8e8 tape/changer: add vendor/model to DriveStatus
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:10:31 +01:00
8306b8b1a5 ui: tape: use panels in tape interface
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:08:56 +01:00
605cfd4ab1 ui: tape: move TapeManagement.js to tape dir
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:08:31 +01:00
dec3147501 ui: tape: add PoolConfig
CRUD interface to manage media pools

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:08:21 +01:00
c642aec128 ui: tape: add DriveConfig panel
mostly typical CRUD interface for managing drives, with an
additional actioncolumn containing some useful actions, e.g.
* reading the label
* show volume-statistics
* show the status
* label the inserted tape

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:08:08 +01:00
fd9aa8dfa2 ui: tape: add ChangerStatus panel
this lets the users manage changers and lets them view the status of one
by having an overview of:
* slots for tapes
* import/export slots
* drives

lets the user:
* barcode-label all the tapes in the library
* move tapes between slots, into/out of drives
* show some basic info when a tape is loaded into a drive
* show the status of a drive
* clean a drive

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:07:57 +01:00
07d6c0967d ui: tape: add BackupOverview Panel
shows all tapes with the relevant info
* which pool it belongs to
* what backups are on it
* which media-set
* location
* etc.

This is very rough, and maybe not the best way to display this information.
It may make sense to reverse the tree, i.e. having pools at top-level,
then media-sets, then tapes, then snapshots..

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:07:44 +01:00
80a3749088 ui: tape: add Edit Windows
includes edit windows for
* Drives
* Changers
* Media Pools
* Labeling Media
* Making new Tape Backups

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:07:29 +01:00
c72fdb53ae ui: tape: add form fields
this includes selectors for
* Allocation Policy
* Retention Policy
* Drives
* Changers
* Tape Device Paths
* Pools

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:07:09 +01:00
b03ec281bf api2/config/{drive, changer}: prevent adding same device multiple times
this check is not perfect since there are often multiple device
nodes per drive/changer, but from the scan api we should return always
the same, so for an api user this should be enough

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:03:56 +01:00
cef4654ff4 api2/tape/drive: change methods of some api calls from put to get
makes more sense to have retrieving api calls as get instead of put

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:02:52 +01:00
f45dceeb73 api2/tape/drive: add load_media as api call
code was already there, just add it as api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:02:13 +01:00
18262a88c9 api2/tape/changer: add changer filter to list_drives api call
so that an api user can get the drives belonging to a changer
without having to parse the config listing themselves

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 15:01:41 +01:00
87f4be7998 tape: use api to run proxmox-tape backup 2021-01-28 14:56:42 +01:00
d737adc6be tape: rename changer_drive_id to changer_drivenum 2021-01-28 11:29:59 +01:00
5fdaecf6f4 api2/tape/drive: reorganize drive api
similar to the changers, create a listing at /tape/drive and put
the specific api calls below that

move the scan api call up one level

remove the status info from the config listing

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 11:15:17 +01:00
d8792b88ef api2/types/tape/drive: add changer_drivenum
so that an api user can see which drive belongs to which drivenum of a changer
for ones with multiple drives

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-28 11:14:28 +01:00
8b1174f50a ui: tfa: drop useless extjs state save handling
was replaced with our own, not much more code and actually works.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 20:20:35 +01:00
8c8f7b5a09 ui: tfa: disable confirm during handling of challenge
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 20:20:35 +01:00
44915932d5 ui: tfa: webautn: move spinning icon down to waiting message
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 20:20:35 +01:00
e90fdf5bed ui: tfa: make webAuthn abortable and restartable
Fix two things:
* do not reject the login promise when we get the abort DOMException
  error
* safely save the original challenge string as we work on a reference
  here and avoid to convert to a UInt8 array twice to avoid an
  exception.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 20:20:35 +01:00
a11c8ab485 ui: tfa: only immediately trigger webAuthn when its the initial tab
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 19:38:40 +01:00
74a50158ca ui: tfa: drop bogus console.error
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 19:38:08 +01:00
6ee85d57be ui: tfa: save last used TFA method and prefer it next time
simple heuristic for those people who always prefer a specific TFA
method and have the others only as backup.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 18:45:36 +01:00
b2fc6f9228 fix build: commit missing file 2021-01-27 18:13:58 +01:00
f91481eded ui: rework TFA prompt on login
Improve UX by avoiding the need to click some buttons twice, or
calling TOTP and Recovery codes both "OTP" codes and showing multiple
buttons, with all having the same goal "submit a TFA token" at the
same time.

Instead use a tab panel with a single submit button.

WebAuthn can and should be still improved, but that can be OK as
followup.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-27 13:21:25 +01:00
651a61f559 pmtx: implement scan command 2021-01-27 12:40:51 +01:00
b06edeca02 remove generated file synopsis.rst (no need to track in git) 2021-01-27 12:38:02 +01:00
89ccb125d1 tape: use 36 byte Inquiry (recommended size) 2021-01-27 12:35:28 +01:00
c972704477 install pmtx binary 2021-01-27 11:36:15 +01:00
887f1cb90c cleanup: move scan changers API implementation 2021-01-27 09:58:16 +01:00
16b4d78400 tape: rename retry_command to execute_scsi_command, make retry a flag 2021-01-27 09:34:24 +01:00
ec8d9c6b80 tape: repeat changer scsi command until successful 2021-01-27 08:59:10 +01:00
49c2d1dcad sgutils2: use sg_get_asc_ascq_str to produce error messages 2021-01-27 06:56:11 +01:00
d0f51651f9 sgutils2: add ASC codes from tandeberg docs 2021-01-26 18:54:08 +01:00
481ccf16a5 sgutils2: further improve error messages 2021-01-26 15:19:43 +01:00
a223458753 sgutils2: support RequestSense Descriptor format 2021-01-26 13:38:16 +01:00
e1740f3f01 tape/changer/mtx: add mtx parser test
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 12:51:26 +01:00
740dc9d1d4 api2/tape/changer: reorganize api
add a changer listing here (copied from api2/config/changer)
and put the status and transfer api calls below that

puts the changer scan into the top level tape api
and removes the (now redundant) info from the config api path

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 12:47:34 +01:00
bbf01b644c tape: fix typos
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 12:39:54 +01:00
66d066964c docs/tape: fix some typos and improve wording
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 12:39:06 +01:00
c81c46c336 sgutils2: improve error messages 2021-01-26 12:24:58 +01:00
c3747b93c8 tape: add new command line tool "pmtx"
Also improve sgutil2 error reporting
2021-01-26 11:57:15 +01:00
d43265b7f1 ui: add missing uri encoding in user edit and view
userid parameter needs to be properly encoded when shown on the browser

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
Reviewed-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 10:53:30 +01:00
6864fd0149 server/worker_task: improve newline handling in upid_read_status
improves upid_read_status with:
* ignore multiple newlines at the end
* remove all code that could panic (array index access)
  the one place where we access with '[pos+1..]' is ok since
  we explicitely test the len of the vector, this is done to
  let rust optimize away the range checks, so it cannot panic

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-26 10:48:15 +01:00
340c0bf9e3 pxar: don't clone patterns unnecessarily
The options struct has no Drop handler and is passed by-move
so we can partially move out of it.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-26 10:24:18 +01:00
4d104cd4d8 clippy: more misc fixes
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:55 +01:00
367c0ff7c6 clippy: allow api functions with many arguments
some of those can be reduced/cleaned up when we have updater support in
the api macro.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:52 +01:00
9c26a3d61a verify: factor out common parameters
all the verify methods pass along the following:
- task worker
- datastore
- corrupt and verified chunks

might as well pull that out into a common type, with the added bonus of
now having a single point for construction instead of copying the
default capacaties in three different modules..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:49 +01:00
93e3581ce7 derive/impl and use Default for some structs
and revamp HttpClientOptions with two constructors for the common use
cases

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:45 +01:00
f4e52bb27d authid: make Tokenname(Ref) derive Eq
it's needed to derive Hash, and we always compare Authids or their
Userid components, never just the Tokenname part anyway..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:40 +01:00
72064fd0df pxar: extract PxarExtractOptions
same as PxarCreateOptions, but for extraction/restore rather than
create.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:36 +01:00
77486a608e pxar: factor out PxarCreateOptions
containing the CLI parameters that are mostly passed-through from the
client to our pxar archive creation wrapper in pxar::create

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:32 +01:00
e97025ab02 pxar: typedef on_error as ErrorHandler
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:26 +01:00
e43b9175c0 client: factor out UploadOptions
to reduce function signature complexity.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:18 +01:00
9cc1415ef5 systemd/time: extract Time/DateSpec structs
could be pulled up into CalendarEvent if desired..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:54:13 +01:00
bd215dc0e4 async index reader: typedef ReadFuture
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:53:58 +01:00
12e874cef0 allow complex Futures in tower_service impl
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:53:55 +01:00
6d233161b0 client: refactor catalog upload spawning
by pulling out Result type into separate struct

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:53:51 +01:00
905a570489 broadcast_future: refactor broadcast/future binding
into its own, private struct.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:53:48 +01:00
432fe44187 report: type-alias function call tuple
to make clippy happy.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-26 09:53:43 +01:00
51b938496d tools::sgutils2: name fixup
it's not a box anymore

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 15:05:52 +01:00
b7f9b25e4d tools::sgutils2: use NonNull
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 14:56:10 +01:00
fe61280b6b tools::sgutils2: extern 'C' and import ordering
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 14:54:25 +01:00
68c087d578 tools::sgutils2: don't transmute to a Box
Otherwise we run the drop handler for the scsi pt object AND
the box itself, which shouldn't even work as it should be
doing a double-free (unless the library does some kind of
reference counting in which case this should simply crash
later on?)

anyway, let's make a wrapper simply called `SgPt` containing
the pointer from `construct_scsi_pt_obj()`

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 14:48:27 +01:00
d6bf87cab7 tools::sgutils2: const correctness
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 14:33:45 +01:00
2b96a43879 tape: cleanup - use ScsiMediaChange trait instead of mtx_status() 2021-01-25 13:25:22 +01:00
697c41c584 tape: add/use rust scsi changer implementation using libsgutil2 2021-01-25 13:14:07 +01:00
a2379996e6 sgutils2: add scsi_inquiry command 2021-01-25 13:14:07 +01:00
29077d95db http-client: further clippy cleanups
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:54 +01:00
dbd00a57b0 http-client: fix typoed ticket cache condition
which was even copy-pasted once without noticing.

found with clippy.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:51 +01:00
d08cff51a4 rework GC traversal error handling
the error message don't make sense with an empty default

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:48 +01:00
3e461dec1c apt: let api handle optional bool with default
one less FIXME :)

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:46 +01:00
4d08e25913 clippy: rewrite ifs with identical return values
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:43 +01:00
43313c2ee7 clippy: rewrite comparison chains
chunk_stream one can be collapsed, since split == split_to with at set
to buffer.len() anyway.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:39 +01:00
81b2a87232 clippy: fix Mutex with unused value
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:36 +01:00
3d8cd0ced7 clippy: add is_empty() when len() is implemented
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-25 11:41:32 +01:00
7c78d54231 sgutils: allow command which does not transfer any data 2021-01-24 15:19:43 +01:00
f9d71e8b17 sgutils2: allow to set custom timeouts 2021-01-24 14:54:30 +01:00
0107fd323c cleanup: avoid compiler warnings 2021-01-23 17:34:26 +01:00
8ba47929a0 tape: add docu about paperkey 2021-01-23 15:34:28 +01:00
794b0fe9ce tape: document hardware encryption 2021-01-23 15:19:28 +01:00
979dccc7ec tape: avoid error when clearing encryption key
Simply ignore clear request when sg_spin_data_encryption_caps fails.
Assume those are tapes without hardware encryption support.
2021-01-23 10:20:43 +01:00
44a5f38bc4 docs: clarify that client-server communication is secure
This clarifies the fact that all communication between client and server
uses TLS for secure communication.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2021-01-22 16:07:44 +01:00
bf78f70885 improve code docs in api2
Note: API methos should be declared pub, so that they show up in the generated docu.
2021-01-22 15:57:42 +01:00
545706cbee d/control: bump B-D on pve-eslint
the old one does not understand www/config/TfaView.js and fails the
build..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-22 14:47:39 +01:00
0d916ac531 tape: add media pool config code docs 2021-01-22 12:01:46 +01:00
d4ab407045 tape: add drive config code docs 2021-01-22 11:51:36 +01:00
45212a8c78 fix mtx parser bug: s/strip_suffix/strip_prefix/ 2021-01-22 11:00:56 +01:00
64b83c3d70 tape: implement paperkey command for tape encryption keys 2021-01-22 09:56:14 +01:00
639a6782bd paperkey: move code to src/tools/paperkey.rs 2021-01-22 09:42:59 +01:00
5f34d69bcc tape: add volume-statistics api/command 2021-01-22 08:45:35 +01:00
337ff5a3cc tape: add estimated medium wearout to status 2021-01-22 08:06:25 +01:00
8e6459a818 tape: set encryption key on restore 2021-01-22 07:26:42 +01:00
aff3e16194 tape: add code docs to src/config/tape_encryption_keys.rs 2021-01-21 18:23:07 +01:00
9372c0787d renamed src/tape/sgutils2.rs -> src/tools/sgutils2.rs 2021-01-21 17:57:17 +01:00
83fb2da53e tape: move MediaCatalog magic number into struct (doc cleanup) 2021-01-21 17:48:07 +01:00
645a044bf6 tape: further hierarchy improvements 2021-01-21 17:25:32 +01:00
37796ff73f tape: change code hierarchy to improve docs 2021-01-21 17:12:01 +01:00
e1fdcb1678 tape: do not export/doc low level libsgutils2 bindings 2021-01-21 16:38:24 +01:00
aab9a26409 ui: cleanup order of declraing properties
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-21 15:09:22 +01:00
958055a789 ui: fix on-parse use of global Proxmox.UserName
This is wrong most of the time, when not loading the web interface
with valid credentials, and thus some checks or defaults did not
evaluated correctly when the underlying value was only set later.

Needs to be set on component creation only, this can be done through
initComponent, even listeners, view controllers or cbind closures.

Use the latter, as all affected components already use cbind.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-21 15:08:46 +01:00
edda5039d4 tape: improve code docs 2021-01-21 13:19:07 +01:00
1c86893d95 cleanup: always compute fingerprint in KeyConfig constructors 2021-01-21 11:56:54 +01:00
d543587d34 Merge branch 'master' of ssh://proxdev.maurer-it.com/rust/proxmox-backup 2021-01-21 10:56:52 +01:00
780bc4cad2 tape: try to set encryption key with read-label command 2021-01-21 10:31:49 +01:00
18bd6ba13d tape: restore_key - always update key, even if there is already an entry 2021-01-21 10:31:49 +01:00
4dafc513cc tape: fix file permissions for tape encryptiuon keys 2021-01-21 10:31:49 +01:00
7acd5c5659 cleanup: remove missleading wording from code docs 2021-01-21 10:31:49 +01:00
8428063d9e cleanup: KeyConfig::decrypt - show password hint on error 2021-01-21 10:31:49 +01:00
f490dda05a tape: use type Uuid instead of String 2021-01-21 10:31:49 +01:00
2b191385ea tape: use specialized encryption key per media-set 2021-01-21 10:31:49 +01:00
bc228e5eaf api: add types for UUIDs 2021-01-20 17:16:46 +01:00
8be65e34de clippy: replace transmute with &*
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:41:02 +01:00
d967d8f1a7 clippy: remove drop(&..)
it does nothing.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:41:02 +01:00
50deb0d3f8 clippy: use is_null to check for null pointers
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:41:02 +01:00
1d928b25fe clippy: remove some unnecessary reference taking
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
f2f81791d1 clippy: fix for_kv_map
and allow it in the one case where the entry loop is intended, but the
code is not yet implemented fully.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
382f10a0cc clippy: fix/allow needless_range_loop
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
0d2133db98 clippy: use while let loops
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
09faa9ee95 clippy: pass &str/&[..] instead of &String/&Vec
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
ccec086e25 clippy: remove unnecessary &mut
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
05725ac9a4 clippy: remove unnecessary let binding
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
96b7483138 clippy: remove/replace needless explicit lifetimes
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
81281d04a4 clippy: fix/allow identity_op
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
e062ebbc29 clippy: us *_or_else with function calls
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
b92cad0938 clippy: convert single match to if let
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
ea368a06cd clippy: misc. fixes
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
3f48cdb380 clippy: don't pass along unit value
make it explicit. this whole section should probably be re-written with
select!

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
17c7b46a69 clippy: use unwrap_or_default
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
a375df6f4c clippy: use copied/cloned instead of map
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
a3775bb4e8 clippy: shorten assignments
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
1e0c6194b5 clippy: fix option_as_ref_deref
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
a6bd669854 clippy: use matches!
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
6334bdc1c5 clippy: collapse nested ifs
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
3b82f3eea5 clippy: avoid useless format!
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
38556bf60d clippy: remove explicit returns
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
d8d8af9826 clippy: use chars / byte string literals
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
3984a5fd77 clippy: is_some/none/ok/err/empty
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:54 +01:00
397356096a clippy: remove needless bool literals
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:23:52 +01:00
365915da9a clippy: use strip_prefix instead of manual stripping
it's less error-prone (off-by-one!)

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:22:59 +01:00
87152fbac6 clippy: drop redundant 'static lifetime
those declarations are already const/static..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:22:59 +01:00
22a9189ee0 clippy: remove unnecessary closures
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:22:59 +01:00
4428818412 clippy: remove unnecessary clones
and from::<T>(T)

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:22:59 +01:00
47ea98e0e3 clippy: collapse/rework nested ifs
no semantic changes (intended).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-20 16:22:59 +01:00
6dd0513546 tape: allocate new media set when pool encryption key changes 2021-01-20 15:43:39 +01:00
8abe51b71d improve code docs 2021-01-20 15:43:19 +01:00
69b8bc3bfa tape: implemenmt show key
Moved API types Kdf and KeyInfo to src/api2/types/mod.rs.
2021-01-20 15:43:19 +01:00
301b8aa0a5 tape: implement change-passphrase for tape encryption keys 2021-01-20 15:43:19 +01:00
e5b6c93323 tape: add --kdf parameter to create key api 2021-01-20 15:43:19 +01:00
9a045790ed cleanup KeyConfig 2021-01-20 15:43:19 +01:00
82a103c8f9 add "password hint" to KeyConfig 2021-01-20 15:43:19 +01:00
0123039271 ui: tfa: rework removal confirmation dialog
present all relevant information about the TFA token to be removed,
so that a user can make a better decision.

Rework layout to match our commonly used style.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-19 19:46:10 +01:00
9a0e115a37 ui: tfa view: add userid to TFA data model
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-19 19:46:10 +01:00
867bfc4378 ui: login view: fix missing trailing comma
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-19 19:46:10 +01:00
feb1645f37 tape: generate random encryptions keys and store key_config on media 2021-01-19 11:20:07 +01:00
8ca37d6a65 cleanup: factor out decrypt_key_config 2021-01-19 11:20:07 +01:00
ac163a7c18 ui: tfa/totp: fix setting issuer in secret URL
it's recommended to set the issuer for both, the get parameter and
the initial issuer label prefix[0].

[0]: https://github.com/google/google-authenticator/wiki/Key-Uri-Format#label

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 16:27:02 +01:00
9b6bddb24c tfa: remove/empty description for recovery keys
While the user chosen description is not allowed to be
empty, we do leave it empty for recovery keys, as a "dummy
description" makes little sense...

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 15:20:39 +01:00
f57ae48286 ui: tfa: fix ctime column width
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 14:31:15 +01:00
4cbd7eb7f9 gui: tfa: make description fill the remaining space
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 14:06:12 +01:00
310686726a gui: tfa: show when entries were created
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 14:06:12 +01:00
ad5cee1d22 tfa: add 'created' timestamp to entries
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 14:06:12 +01:00
bad6e32075 docs: fix typo in client manpage
Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2021-01-18 13:52:11 +01:00
8ae6d28cd4 gui: enumerate recovery keys and list in 2nd factor window
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 13:51:23 +01:00
ca1060862e tfa: remember recovery indices
and tell the client which keys are still available rather
than just yes/no/low

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-18 13:51:23 +01:00
8a0046f519 tape: implement encrypted backup - simple version
This is just a proof of concept, only storing the encryption key fingerprint
inside the media-set label.
2021-01-18 13:38:22 +01:00
84cbdb35c4 implement FromStr for Fingerprint 2021-01-18 13:38:22 +01:00
1e93fbb5c1 tape: add encrypt property to media pool configuration 2021-01-18 13:38:22 +01:00
619554af2b tape: clear encryption key before writing labels
We always write labels unencrypted.
2021-01-18 13:38:22 +01:00
d5a48b5ce4 tape: add hardware encryption key managenent api 2021-01-18 13:38:22 +01:00
4e9cc3e97c ui: tfa: fix title for removal confirmation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 13:28:02 +01:00
492bc2ba63 ui: tfa/recovery: add print button to key info window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 10:45:47 +01:00
995492100a ui: tfa/recovery: fix copy button text, add icon
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 10:45:28 +01:00
854319d88c ui: tfa/recovery: disallow to close key info window with ESC
to avoid accidental closing it

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 10:44:40 +01:00
3189d05134 ui: tfa: specify which confirmation password is required
Clarify that the password of the user one wants to add TFA too is
required, which is not necessarily the one of the current logged in
user. Use an empty text for that.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 10:12:23 +01:00
b2a43b987c ui: tfa totp: whitespace and padding fix
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 10:10:16 +01:00
6676409f7f ui: access: stream line add/edit/.. button order and separators
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-18 09:33:29 +01:00
44de5bcc00 pull: add error context for initial group list call
otherwise the user is confronted with a generic error like "permission
check failed" with no indication that it refers to a request made to the
remote PBS instance..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-18 06:51:05 +01:00
e2956c605d pull: rustfmt
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-18 06:50:23 +01:00
b22b6c2299 tape: encryption scsi command cleanup 2021-01-16 18:24:04 +01:00
90950c9c20 tape: add scsi commands to control drive hardware encryption 2021-01-16 15:59:05 +01:00
0c5b9e7820 tape: sgutils2.rs - add do_out_command()
Make it possible to run commands that writes data.
2021-01-16 15:59:05 +01:00
a9ffa010c8 ui: webauthn config: set default values for unconfigured case
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-15 16:25:47 +01:00
a6a903293b ui: webauthn config: use ID instead of Id/id
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-15 16:25:26 +01:00
3fffcb5d77 gui: tfa configuration
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-15 15:19:52 +01:00
a670b99db1 tfa: add webauthn configuration API entry points
Currently there's not yet a node config and the WA config is
somewhat "tightly coupled" to the user entries in that
changing it can lock them all out, so for now I opted for
fewer reorganization and just use a digest of the
canonicalized config here, and keep it all in the tfa.json
file.

Experimentally using the flatten feature on the methods with
an`Updater` struct similar to what the api macro is supposed
to be able to derive on its own in the future.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-15 15:19:52 +01:00
aefd74197a bakckup::manifest: use tools::json for canonical representation
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-15 15:19:52 +01:00
9ff747ef50 add tools::json for canonical json generation
moving this from backup::manifest, no functional changes

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-15 15:19:52 +01:00
a08a198577 tape: do not abort backup if tape drive does not support tape-alert-flags 2021-01-15 11:43:17 +01:00
4cfb123448 tape: update restore docu 2021-01-15 09:44:46 +01:00
198ebc6c86 d/rules: patch out wrongly linked libraries from ELFs
this is a HACK!

It seems that due to lots of binaries getting compiled from a single
crate the compiler is confused when linking in dependencies to each
binaries ELF.

It picks up the combined set (union) of all dependencies and sets
those to every ELF. This results in the client, for example, linking
to libapt-pkg or libsystemd even if none of that symbols are used..

This could be possibly fixed by restructuring the source tree into
sub crates/workspaces or what not, not really tested and *lots* of
work.

So as stop gap measure use `ldd -u` to find out unused linkage and
remove them using `patchelf`.

While this works well, and seems to not interfere with any debug
symbol usage or other usage in general it still is a hack and should
be dropped once the restructuring of the source tree has shown to
bring similar effects.

This allows for much easier re-use of the generated client .deb
package on other Debian derivaties (e.g., Ubuntu) which got blocked
until now due to wrong libt-apt verison or the like.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-15 08:52:53 +01:00
a8abcd9b30 debian/control: set VCS urls
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-15 08:52:53 +01:00
b7469f5a9a d/control: sort and fix whitespace errors
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-15 08:52:53 +01:00
6bbe49aa14 access: restrict password changes on @pam realm to superuser
for behavior consistency with `update_user`

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2021-01-15 08:49:22 +01:00
5aa1019010 access: limit editing pam credentials to superuser
modifying @pam users credentials should be only possible for root@pam,
otherwise it can have unintended consequences.

also enforce the same limit on user creation (except self_service check,
since it makes no sense during user creation)

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2021-01-15 08:49:22 +01:00
29a59b380c proxmox 0.10: adapt to moved ParameterSchema
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
0bfcea6a11 cleanup: remove unnecessary 'mut' and '.clone()'
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
19f5aa252f examples: unify h2 examples
update them to the new tokio-openssl API and remove socket buffer size
setting - it was removed from the TcpStream API, and is now only
available via TcpSocket (which can in turn be converted to a
TcpListener), but this is not needed for this example.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
89e9134a3f hyper: use new hyper::upgrade
the old Body::on_upgrade method is no more

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
b5a202acb6 tokio 1.0: update to new Signal interface
Signal does not yet re-implement Stream (and is not yet wrapped in
tokio-stream either).

see https://github.com/tokio-rs/tokio/pull/3383

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
0f860f712f tokio 1.0: update to new tokio-openssl interface
connect/accept are now happening on pinned SslStreams

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
7c66701366 tokio 1.0: use ReceiverStream from tokio-stream
to wrap a Receiver in a Stream. this will likely move back into tokio
proper once we have a std Stream..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
585e90c0de tokio: adapt to 1.0 process:Child changes
Child itself is no longer a Future, but it has a new wait() async fn
that does the same thing

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
5c852d5b82 tokio: adapt to 1.0 runtime changes
enter() now returns a guard, and the builder got revamped to make the
choice between MT and current thread explicit.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
484172b5f8 tokio 1.0: AsyncRead/Seek with ReadBuf
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
d148958b67 proxmox 0.10: use tokio::time::timeout directly
TimeoutFutureExt is no more

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
0a8d773ad0 tokio 1.0: delay -> sleep
almost the same thing, new name(s), no longer Unpin

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
427d90e6c1 update to tokio 1.0
and various related crates

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-01-14 16:01:33 +01:00
9b2e4079d0 d/control: sort and fix whitespace errors
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-14 15:11:06 +01:00
1a0b410554 manager: user/token list: fix rendering 0 (never) expire date
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-14 13:59:08 +01:00
2d50a6192f tape: sg-tape-cmd - add more ways to specify devices 2021-01-14 13:05:26 +01:00
781da7f6f0 tape: add --inventorize flag to read-label API/CLI 2021-01-14 11:51:23 +01:00
646221cc29 ui: window/{AddWebauthn, TfaEdit}: fix spacing/border of the windows
the password field should not be indented differently than the rest of
the fields, and we never have a border on the panels

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
b168a27f73 ui: window/AddTotp: fix spacing styling of form fields
by moving the lower fields into the form itself and dropping the padding

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
a442bd9792 ui: window/AddTfaRecovery: fix style of TfaRecoveryShow window
to have a more similar layout/spacing to our other windows

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
884fec7735 ui: window/AddTfaRecovery: rewrite to a Proxmox.window.Edit
we can reuse the edit window from widget toolkit for the most part
this solves some spacing and layout issues and is less code

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
1cb89f302f ui: config/TfaView: disable Remove button by default
gets enabled when an item is clicked

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
da36bbe756 ui: LoginView: remove not used viewModel
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-13 16:46:47 +01:00
25e464c5ce tape: MediaPool - allow to allocate free tapes 2021-01-13 14:25:51 +01:00
8446fbca85 tape: rename changer_id to label_text 2021-01-13 13:26:59 +01:00
9738dd545f tape: docu - explain manual backups and tape cleaning 2021-01-12 17:26:15 +01:00
0bce2118e7 tape: improve docu 2021-01-12 16:37:23 +01:00
6543214dde tape: MediaListEntry - add ctime 2021-01-12 12:01:21 +01:00
d91c6fd4e1 ui: tfa: drop bogus gettext of empty string
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-01-12 11:44:05 +01:00
711d1f6fc3 ui: notify options: Remove gettext for root@pam
Translating root@pam is not useful, especially as the empty text symbolises the
default value.

Signed-off-by: Dominic Jäger <d.jaeger@proxmox.com>
2021-01-12 11:41:24 +01:00
e422beec74 fix #3245: only use default schedule for new jobs
an empty schedule means 'none', so do not fill it with the default
in case we edit an existing job (like we do already for sync jobs)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-01-12 10:26:59 +01:00
a484c9cf96 tape: automatically reload tapes inside autoloader
We always automatically unload tapes to free library slots,
so it should not happen that an ejected tape resides inside the drive.

This is just a safe guard to handle the situation in case it happens ...

You can manually produce the situation by ejecting a tape without unloading:

 mt -f /dev/nst0 eject

Note: Our "proxmox-tape eject" does automatic unload
2021-01-12 09:49:05 +01:00
5654d8ceba tape: make eject/export more reliable, improve logging 2021-01-12 09:16:16 +01:00
31cf625af5 tape: improve backup logs 2021-01-11 13:23:12 +01:00
93be18ffd2 tape: fix tape alert flag values 2021-01-11 13:23:12 +01:00
e96464c795 d/control bump
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 12:09:19 +01:00
ad0ed40a59 api: return "invalid" as CSRF token for partial tickets
So that old clients don't `unwrap` a `None` value.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:13 +01:00
63fd8e58b2 gui: masks for: adding recovery and removals
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:13 +01:00
758a827c2d gui: add load mask during webauthn api calls
so that if we run into the 3s delay due to the wrong
password the window is properly masked

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:13 +01:00
7ad33e8052 tfa: use UNAUTHORIZED http status in password check
to trigger our 3s delay in the rest handler

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:13 +01:00
abfe0c0e70 tfa: fixup for challenge file split
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:13 +01:00
f22dfb5ece tfa: remove tfa user when a user is deleted
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:10 +01:00
4bda51688b tfa: improve user existence check
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
eab25e2f33 tfa: allow deletion of entries of non-existent users
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
94bd11bae2 typo fixups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
759af9f00c tfa api: return types and 'pub' structs/methods
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
f58e5132aa tfa: entry access/iteration cleanup
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
d831846706 tfa: r#type parameter name
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
1fc9ac0433 tfa: _entry api method name suffix consistency
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:23:03 +01:00
5c48d0af1f tfa gui: fix adding recovery keys as user
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
30fb19be35 tfa view: html-escape description text
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
fbeac4ea28 gui: tfa support
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
7f066a9b21 proxy: expose qrcodejs
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
c5a767cd1d depend on libjs-qrcodejs
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
027ef213aa api: tfa management and login
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
dc1fdd6267 config: add tfa configuration
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
96918252e5 buildcfg: add rundir helper macro
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
014dc5f9d7 tools: add create_run_dir helper
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
59e94227af add tools::serde_filter submodule
can be used to perform filtering at parse time

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-01-11 10:22:32 +01:00
e84b801c2e tape: improve retention period docu 2021-01-11 07:11:17 +01:00
6638c034d2 tape: remove unused eject_on_unload method 2021-01-10 16:20:18 +01:00
04df41cec1 tape: more MediaChange cleanups
Try to provide generic implementation for complex operations:

- unload_to_free_slot
- load_media
- export media
- clean drive
- online_media_changer_ids
2021-01-10 15:32:52 +01:00
483da89d03 tape: improve export media to directly export from drive, add CLI 2021-01-10 13:44:44 +01:00
c92e3832bf tape: cleanup: s/transfer/transfer_media/, avoid compiler warnings 2021-01-10 12:18:30 +01:00
edb90f6afa tape: backup - implement export-media-set option 2021-01-10 11:59:55 +01:00
0057f0e580 tape: MediaChange - add transfer, implement export 2021-01-10 11:51:09 +01:00
e6217b8b36 tape: renamed src/tape/changer/linux_tape.rs -> src/tape/changer/mtx.rs 2021-01-10 10:07:40 +01:00
6fe16039b9 tape: simplify media changer implementation - new struct MtxMediaChanger 2021-01-10 10:02:01 +01:00
42967bf185 tape: backup - implement --eject-media option 2021-01-09 15:17:03 +01:00
5843268c47 tape: abort backup when we detect critical tape alert flags 2021-01-09 12:34:00 +01:00
7273ba3de2 tape: change default media set naming template to "%c" 2021-01-09 10:51:51 +01:00
0bf1c314da tape: show catalog status in media list 2021-01-09 10:24:48 +01:00
c7926d8e8c tape: split MediaSet into extra file 2021-01-09 08:54:58 +01:00
44ce25e7ac tape: docu - improve Administration section 2021-01-08 19:17:31 +01:00
3a2cc5c66e tape: minor docu update in retention policy 2021-01-08 19:01:38 +01:00
3838ce3330 tape: add retention policy docu 2021-01-08 17:34:58 +01:00
59217472aa tape: improve media set docu 2021-01-08 16:53:46 +01:00
df69a4fc59 tape: implement drive clean 2021-01-08 11:32:56 +01:00
25d3965769 tape: correctly skip cleaning tapes (not regular tapes) 2021-01-08 09:16:42 +01:00
08d8b2a4fd tape: add some media pool docu 2021-01-08 08:46:25 +01:00
879569d73f tape: changer transfer - make name parameter optional 2021-01-07 17:09:47 +01:00
b63f833d36 tape: fix paramater name - s/slot/source-slot/ 2021-01-07 15:39:25 +01:00
482c6e33dd tape: changer status command: make changer name optional 2021-01-07 15:12:19 +01:00
46a1863f88 tape: improve MediaChange trait
We expose the whole MtxStatus, and we can load/store from/to
specified slot numbers.
2021-01-07 14:26:43 +01:00
632756b6fb tape: more docs 2021-01-06 16:13:58 +01:00
04eba29c55 tape: document tape drive configuration 2021-01-06 16:00:31 +01:00
0912878ecf tape: document new export-slots feature 2021-01-06 14:11:35 +01:00
d5035c5600 tape: mtx_status - consider new export-slots property 2021-01-06 11:53:33 +01:00
38ae42b11a tape: changer - add export-slot config 2021-01-06 11:06:50 +01:00
a174854a0d tape: improve tape changer docs 2021-01-06 09:45:36 +01:00
c4b2b9ab41 tape: only query volume stats if we can read MAM 2021-01-06 09:20:36 +01:00
ef942e04c2 tape: add function to classify tape-alert-flags 2021-01-05 17:23:30 +01:00
f54cd66924 ui: running tasks: Use gettext for column labels
Signed-off-by: Dominic Jäger <d.jaeger@proxmox.com>
2021-01-05 13:53:33 +01:00
b40ab10d38 tape: add volume_mounts and medium_passes to LinuxDriveAndMediaStatus 2021-01-05 13:43:17 +01:00
f8ccbfdedd tape: implement read_volume_statistics 2021-01-05 12:58:18 +01:00
470f1c798a tape: status - show thape alert flags 2021-01-04 13:15:30 +01:00
5c012b392a tape: use LP 12h TapeAlert Response to query tape alert flags 2021-01-04 13:14:02 +01:00
165b641c1d tape: changer status - show full slots (for cartridge without barcode) 2021-01-04 12:06:05 +01:00
66e42bec05 tape: further PoolWriter cleanups 2021-01-03 12:08:40 +01:00
c503ea7045 tape: cleanup - rename 'info' to 'media_id'
Second try.
2021-01-03 11:38:00 +01:00
745ec187ce Revert "tape: cleanup - rename 'info' to 'media_id'"
This reverts commit f046313c0e.

media_id is already use as parameter, so this commit is totally buggy.
2021-01-03 11:14:58 +01:00
f046313c0e tape: cleanup - rename 'info' to 'media_id' 2021-01-03 10:37:42 +01:00
74595b8821 tape: sg-tape-cmd tape-alert-flags 2021-01-03 10:09:43 +01:00
c9fdd142a4 tape: commit missing file 2021-01-02 13:39:34 +01:00
abaa6d0ac9 tape: decode TapeAlertFlags in cartridge-memory command 2021-01-02 10:55:30 +01:00
cfae8f0656 tape: merge MediaStateDatabase into Inventory 2021-01-01 16:15:13 +01:00
54f4ecd46a tape: implement MediaPool flag to consider offline media
For standalone tape drives.
2021-01-01 10:03:59 +01:00
1835d86e9d gui: update tape job descriptions 2020-12-31 10:37:09 +01:00
b9b4b31284 tape: add basic restore api/command 2020-12-31 10:26:48 +01:00
b4772d1c43 tape: new inventory helper - lookup_media_set_pool 2020-12-31 10:03:17 +01:00
9933dc3133 update TODO 2020-12-31 08:38:22 +01:00
08ac90f920 api: allow tokens to list users
their owner, or all if they have the appropriate privileges.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-31 08:29:49 +01:00
13f5863561 api: improve error messages for restricted endpoints
the old variant attempted to parse a tokenid as userid and returned the
cryptic parsing error to the client, which is rather confusing.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-31 08:29:09 +01:00
81764111fe tape: media_change - log all errors 2020-12-30 19:17:18 +01:00
cb022525ff tape: only log to stdout in CLI environment 2020-12-30 19:01:39 +01:00
75656a78c6 tape: improve inline docu 2020-12-30 17:28:33 +01:00
284eb5daff tape: cleanup/simplify media_change code 2020-12-30 17:16:57 +01:00
ff58c51919 tape: improve media request/load 2020-12-30 13:09:28 +01:00
2fb1bdda20 verify-api: fix allOf duplicates check
it triggered with a wrongly-formatted message on schemas that did NOT
contain any duplicates..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-30 12:36:00 +01:00
12299b333b tape: set minimal media label length to 2 2020-12-30 10:15:02 +01:00
b017bbc441 tape: add restore code, implement catalog api/command 2020-12-30 09:48:18 +01:00
9e8c0d2e33 tape: cleanup - remove debug messages 2020-12-30 08:41:30 +01:00
250c29edd2 tape: correctly sort media api entries 2020-12-29 12:09:51 +01:00
c431659d05 cleanup: remove debug output 2020-12-29 11:59:57 +01:00
a33389c391 tape: implement media content list api 2020-12-29 11:58:26 +01:00
3460565414 tape: create the MediaCatalog when we label a tape 2020-12-29 10:55:20 +01:00
26b62138ee cleanup: disable debug message when we detect a stopped worker task 2020-12-29 10:53:16 +01:00
afb0220642 tape: cleanup LinuxDriveStatus - make density optional 2020-12-29 09:10:30 +01:00
0993923ed5 tape: factor out get_drive_and_media_status 2020-12-29 08:39:06 +01:00
e0362b0d0f tape: correctly parse mtx import/export slots 2020-12-28 13:32:56 +01:00
df3a74d7e0 debian: correctly install sg-tape-cmd setuid binary 2020-12-28 13:22:17 +01:00
d5d457e667 fix typo in Makefile 2020-12-28 11:41:10 +01:00
b27c32821c tape: install new sg-tape-cmd setuid binary 2020-12-28 11:10:25 +01:00
76b15a035f tape: MediaCatalog: write magic number before content 2020-12-26 11:05:25 +01:00
eb8feb1281 tape: add LTO1 to TapeDensity 2020-12-26 10:48:32 +01:00
fc6ce9835b tape: fix non-rewinding tape device check 2020-12-25 15:38:29 +01:00
8ae9f4efc2 tape: minor cleanups 2020-12-25 13:45:26 +01:00
c9d13b0fc4 tape: expose check_tape_is_linux_tape_device 2020-12-24 15:51:49 +01:00
bfacc1d8c3 tape: cleanup - factor out open_linux_tape_device 2020-12-24 11:24:45 +01:00
02d484370f fix build depends 2020-12-23 11:54:44 +01:00
5ae86dfaa1 tape: return media usage info with status command 2020-12-23 11:24:34 +01:00
dbe7e556b0 tape: implement binding for libsgutils2
So that we can read cartridge memory without calling "sg_raw". In future,
we may need further low level command to control the tape..
2020-12-23 09:44:53 +01:00
4799280ccd http_client: add timeouts for critical connects
Use timeout futures for sections that might hang in certain error
conditions. This is mostly intended to be used as a safeguard, not a
first line of defense - i.e. best-effort avoidance of total hangs.

Not every future used for the HttpClient/H2Client is changed, only those
where a quick response is to be expected. For example, the response
reading futures are left alone, so data transfer is never capped with
timeout, only the initial server connect.

It is also used for upgrading to H2 connections, as that can take a long
time on overloaded servers.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-12-22 13:31:10 +01:00
cb4865466e depend on proxmox 0.9.1 2020-12-22 13:30:41 +01:00
cb80d900b3 tape: add drive status api 2020-12-22 10:42:22 +01:00
ee01737e87 tape: rename 'mam' api to 'cartridge-memory' 2020-12-22 09:27:34 +01:00
2012825913 depend on proxmox 0.9.0 2020-12-22 08:52:24 +01:00
eb5e3420ae tests: verify-api: check AllOf schemas
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-12-22 07:31:38 +01:00
b2362a1207 adaptions for proxmox 0.9 and proxmox-api-macro 0.3
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-12-22 07:31:05 +01:00
54d968664a tape: update user docu 2020-12-21 12:13:35 +01:00
1e20f819d5 tape: add command to read cartridge memory (MAM)
Thsi add an additional dependency to sg3-utils (small).
2020-12-21 12:12:33 +01:00
8001c82e81 tape: update user docu - howto label tapes 2020-12-20 10:41:40 +01:00
baefbc444e tape: update user docu 2020-12-20 09:16:09 +01:00
4a227b54bf add LTO barcode generator App 2020-12-19 17:39:48 +01:00
8a192bedde tape: update user docu 2020-12-19 16:56:54 +01:00
d5efa18ae4 tape: update user docu 2020-12-19 15:13:38 +01:00
5f79dc2805 tape: start user documentation 2020-12-19 11:14:56 +01:00
9aa58f0143 cleanup: rename mtfsf into forward_space_count_files 2020-12-18 16:57:49 +01:00
8835664653 tape: add tape backup api 2020-12-18 15:32:12 +01:00
d37da6b7fc tape: add PoolWriter 2020-12-18 15:27:44 +01:00
b9ee86efe1 tape: use SnapshotReader to create snapshot archive 2020-12-18 12:11:29 +01:00
d108b610fd tape: fix write_media_set_label - move to correct position 2020-12-18 12:11:29 +01:00
0ec79339f7 tools/daemon: improve reload behaviour
it seems that sometimes, the child process signal gets handled
before the parent process signal. Systemd then ignores the
childs signal (finished reloading) and only after going into
reloading state because of the parent. this will never finish.

Instead, wait for the state to change to 'reloading' after sending
that signal in the parent, an only fork afterwards. This way
we ensure that systemd knows about the reloading before actually trying
to do it.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Tested-By: Fabian Ebner <f.ebner@proxmox.com>
2020-12-18 10:30:37 +01:00
2afdc7f27d tape: MediaPool::with_config() - remove name parameter
Not required, because config already contains the pool name.
2020-12-18 08:14:24 +01:00
26aa9aca40 tape: return current_file_number as u64 2020-12-18 07:44:50 +01:00
3e2984bcb9 tools/process_locker: Decrement writer count in drop handler
of ProcessLockSharedGuard.

We use a counter to determine if we can unlock the file again, but
we never actually decremented the writer count, so we held the
lock forever.

This fixes the issue that we could not start a garbage collect after
a reload, as long as the old process is still running, even when that
process has no active backup anymore but another long running task
(e.g. file download, terminal, etc.).

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-12-18 07:15:08 +01:00
a7a5406c32 acl: rustfmt module
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-18 07:07:01 +01:00
4f727a783e acl: reformat privileges
for better readability, and tell rustfmt to leave those definitions
alone.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-18 07:05:45 +01:00
23dc68fdea acl: add docs and adapt visibility
document all public things, add some doc links and make some
previously-public things only available for test cases or within the
crate:

previously public, now private:
- AclTreeNode::extract_user_roles (we have extract_roles())
- AclTreeNode::extract_group_roles (same)
- AclTreeNode::delete_group_role (exists on AclTree)
- AclTreeNode::delete_user_role (same)
- AclTreeNode::insert_group_role (same)
- AclTreeNode::insert_user_role (same)
- AclTree::write_config (we have save_config())
- AclTree::load (we have config()/cached_config())

previously public, now crate-internal:
- AclTree::from_raw (only used by tests)
- split_acl_path (used by some test binaries)

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-18 07:05:11 +01:00
b532dd00c4 tape: add helper to read snapshot contents
- lock the snapshot for reading
- use openat to open files
- provides an iterator over all chunks
2020-12-17 13:07:52 +01:00
c01742855a KeyConfig: bail on wrong fingerprint
instead of just logging the error. this should never happen in practice
unless someone is messing with the keyfile, in which case, it's better
to abort.

update tests accordingly (wrong fingerprint should fail, no fingerprint
should get the expected one).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 11:27:06 +01:00
9c953dd260 tape: add code to write backup snapshot files (without chunks) to tape 2020-12-17 08:28:47 +01:00
3fbf2d2fcd tape: cleanup MediaCatalog 2020-12-17 08:05:53 +01:00
e0af222ec3 KeyConfig: always calculate fingerprint
and warn if stored and calculated fingerprint don't match.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:52:55 +01:00
73b5011786 KeyConfig: add encrypt/decrypt test
the RSA key and the encryption key itself are hard-coded to avoid
stalling the test runs because of lack of entropy, they have no special
significance otherwise.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:47:45 +01:00
2ea5abcd65 docs: replace openssl command with client
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:46:59 +01:00
7137630d43 client: add 'import-with-master-key' command
to import an encrypted encryption key using a master key.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:46:24 +01:00
8acfd15d6e key: move RSA-encryption to KeyConfig
since that is what gets encrypted, and not a CryptConfig.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:43:34 +01:00
48fbbfeb7e fix #3197: skip fingerprint check when restoring key
when restoring an encrypted key, the original one is obviously not
available to check the fingerprint with.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:37:54 +01:00
9990af3042 master key: store blob name in constant
since we will use it in more than one place.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-17 06:36:06 +01:00
fe6c19383b tape: remove MediaLabelInfo, use MediaId instead
The additional content_uuid was quite useless...
2020-12-16 13:31:32 +01:00
42150d263b update pxar dependency to 0.6.2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-12-16 13:13:31 +01:00
9839d3f778 tape: improve docu 2020-12-16 12:43:51 +01:00
dd59e3c2a1 tape: improve docu 2020-12-16 12:23:52 +01:00
0b7432ae09 tape: add chunk archive reader/writer 2020-12-16 12:08:34 +01:00
c1c2c8f635 tape: cleanup MediaLocation type for direct use with API 2020-12-16 10:49:01 +01:00
7680525eec docs: prune-sim: folluwp: add missing semicolon
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-12-16 10:08:11 +01:00
42298d5896 tape: add magic number to identify media catalog files 2020-12-16 09:00:14 +01:00
39478aa52c prune sim: correctly keep track of already included backups
This needs to happen in a separate loop, because some time intervals are not
subsets of others, i.e. weeks and months. Previously, with a daily backup
schedule, having:
* a backup on Sun, 06 Dec 2020 kept by keep-daily
* a backup on Sun, 29 Nov 2020 kept by keep-weekly
would lead to the backup on Mon, 30 Nov 2020 to be selected for keep-monthly,
because the iteration did not yet reach the backup on Sun, 29 Nov 2020 that
would mark November as being covered.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-12-15 14:03:18 +01:00
6a99b930c4 followup: use arrow function for sorting
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-12-15 13:45:51 +01:00
f6ce45b373 prune sim: fix #3192: by fixing usage of sort()
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-12-15 13:45:51 +01:00
205e187613 tape: add MediaCatalog implementation 2020-12-15 13:40:49 +01:00
a78348acbb tape: rename DriveLabel to MediaLabel 2020-12-14 17:37:16 +01:00
410611b4f2 tape: improve file format docu 2020-12-14 17:29:57 +01:00
af07ec8f29 tape: minor code cleanup 2020-12-14 16:56:26 +01:00
3f803af00b tape: scan - print more debug info 2020-12-14 13:16:18 +01:00
ac461bd651 tape: implement scan command (useful for debug) 2020-12-14 12:55:49 +01:00
ce955e1635 tape: implement eod cli command (debug tool) 2020-12-14 09:56:59 +01:00
e20d008c6a tape: rename cli 'media media-destroy' toö 'media destroy' 2020-12-14 09:30:32 +01:00
fb657d8ee5 tape: implement destroy_media 2020-12-14 08:58:40 +01:00
fba0b77469 tape: add media api 2020-12-14 07:55:57 +01:00
b5c1296eaa tape: make changer get_status async 2020-12-14 07:14:24 +01:00
065df12872 tape: split api type definitions for changers into extra file 2020-12-13 09:31:02 +01:00
7e1d4712b8 tape: rename CHANGER_ID_SCHEMA to CHANGER_NAME_SCHEMA 2020-12-13 09:22:08 +01:00
49c965a497 tape: rename DRIVE_ID_SCHEMA to DRIVE_NAME_SCHEMA 2020-12-13 09:18:16 +01:00
6fe9aedd0b tape: correctly call Async handler in proxmox-tape 2020-12-12 09:58:47 +01:00
42cb9bd6a5 tape: avoid executor blocking in changer api 2020-12-12 09:45:08 +01:00
66dbe5639e tape: avoid executor blocking in drive API
By using tokio::task::spawn_blocking().
2020-12-12 09:20:04 +01:00
2d87f2fb73 bump version to 1.0.6-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-12-11 14:19:28 +01:00
4c81273274 debian: just install whole images directory
fixes build for recently added tape icon (and includes it for real)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-12-11 14:19:28 +01:00
73b8f6793e tape: add svg icon 2020-12-11 13:02:23 +01:00
663ef85992 tape: use WorkerTask for erase and rewind 2020-12-11 11:19:33 +01:00
e92c75815b tape: split inventory api
inventory: sync, list labels with uuids,
update_inventory: WorkerTask, updates database
2020-12-11 10:42:29 +01:00
6dbad5b4b5 tape: run label commands as WorkerTask (threads) 2020-12-11 09:10:22 +01:00
bff7e3f3e4 tape: implement barcode-label-mdedia 2020-12-11 07:50:19 +01:00
83abc7497d tape: implement inventory command 2020-12-11 07:39:28 +01:00
8bc5eebeb8 depend on package mt-st
We do not use the mt utility directly, but the package also provides
an udev helper to correctly initialize tape drives (stinit). Also,
the mt utility is helpful for debugging tap issues.
2020-12-11 06:38:45 +01:00
1433b96ba0 control.in: fix indentation
Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-12-11 06:31:30 +01:00
be1a8c94ae fix build: add missing file 2020-12-10 13:40:20 +01:00
4606f34353 tape: implement read-label command 2020-12-10 13:20:39 +01:00
7bb720cb4d tape: implement label command 2020-12-10 12:30:27 +01:00
c4d8542ec1 tape: add media pool handling 2020-12-10 11:41:35 +01:00
9700d5374a tape: add media pool cli 2020-12-10 11:13:12 +01:00
05e90d6463 tape: add media pool config api 2020-12-10 10:52:27 +01:00
55118ca18e tape: correctly sort drive api subdir 2020-12-10 10:09:12 +01:00
f70d8091d3 tape: implement option changer-drive-id 2020-12-10 09:09:06 +01:00
a3c709ef21 tape: cli cleanup - avoid api redefinition 2020-12-10 08:35:11 +01:00
4917f1e2d4 tape: implement delete property for drive update command 2020-12-10 08:25:46 +01:00
93829fc680 tape: cleanup load-slot api 2020-12-10 08:04:55 +01:00
5605ca5619 tape: cli cleanup - rename scana-for-* into scan 2020-12-10 07:58:45 +01:00
e49f0c03d9 tape: implement load-media command 2020-12-10 07:52:56 +01:00
0098b712a5 tape: implement eject 2020-12-09 17:50:48 +01:00
5fb694e8c0 tape: implement rewind 2020-12-09 17:43:38 +01:00
583a68a446 tape: implement erase media 2020-12-09 17:35:31 +01:00
e6604cf391 tape: add command line interface proxmox-tape 2020-12-09 13:00:20 +01:00
43cfb3c35a tape: do not remove changer while still used 2020-12-09 12:55:54 +01:00
8a16c571d2 tape: add changer property to drive create api 2020-12-09 12:55:10 +01:00
314652a499 tape: set protected flag for configuration change api methods 2020-12-09 12:02:55 +01:00
6b68e5d597 client: move connect_to_localhost into client module 2020-12-09 11:59:50 +01:00
cafd51bf42 tape: add media state database 2020-12-09 11:21:56 +01:00
eaff09f483 update control file 2020-12-09 11:21:56 +01:00
9b93c62044 remove unused descriptions from api macros
these are now a hard error in the api macro

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-12-09 10:55:18 +01:00
5d90860688 tape: expose basic tape/changer functionality at api2/tape/ 2020-12-08 15:42:50 +01:00
5ba83ed099 tape: check digest on config update 2020-12-08 11:24:38 +01:00
50bf10ad56 tape: add changer configuration API 2020-12-08 09:04:56 +01:00
16d444c979 tape: add tape drive configuration API 2020-12-07 13:04:32 +01:00
fa9c9be737 tape: add tape device driver 2020-12-07 08:29:22 +01:00
2e7014e31d tape: add BlockeReader/BlockedWriter streams
This is the basic format used to write data to tapes.
2020-12-06 12:09:55 +01:00
a84050c1f0 tape: add BlockHeader impl 2020-12-06 10:26:24 +01:00
7c9835465e tape: add helpers to emulate tape read/write behavior 2020-12-06 09:41:16 +01:00
ec00200411 fix bug #3189: fix change_password permission checks, run protected 2020-12-05 16:20:29 +01:00
956e5fec1f depend on mtx (tape changer control)
A very small package with no additional dependencies.
2020-12-05 14:54:12 +01:00
b107fdb99a tape: add tape changer support using 'mtx' command 2020-12-05 14:54:12 +01:00
7320e9ff4b tape: add media invenotry 2020-12-05 12:54:15 +01:00
c4d2d54a6d tape: define useful constants 2020-12-05 12:20:46 +01:00
1142350e8d tape: add media pool config 2020-12-05 11:59:38 +01:00
d735b31345 tape: add tape read trait 2020-12-05 10:54:38 +01:00
e211fee562 tape: add tape write trait 2020-12-05 10:51:34 +01:00
8c15560b68 tape: add file format definitions 2020-12-05 10:45:08 +01:00
327e93711f commit missing file: tape api type definitions 2020-12-04 16:00:52 +01:00
a076571470 tape support: add drive configuration 2020-12-04 15:42:32 +01:00
ff50c07ebf start experimental tape management GUI
You need to set the environment TEST_TAPE_GUI=1 to enable this.
The current GUI is only a placeholder.
2020-12-04 12:50:08 +01:00
179145dc24 backup/datastore: move manifest locking to /run
this fixes the issue that on some filesystems, you cannot recursively
remove a directory when you hold a lock on a file inside (e.g. nfs/cifs)

it is not really backwards compatible (so during an upgrade, there
could be two daemons have the lock), but since the locking was
broken before (see previous patch) it should not really matter
(also it seems very unlikely that someone will trigger this)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-12-03 09:56:42 +01:00
6bd0a00c46 backup/datastore: really lock manifest on delete
'lock_manifest' returns a Result<File, Error> so we always got the result,
even when we did not get the lock, but we acted like we had.

bubble the locking error up

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-12-02 14:37:05 +01:00
f6e28f4e62 client/pull: log how many groups to pull were found
if no groups were found, the task log was very confusing as it
contained no real information why nothing was synced, e.g.:

 Starting datastore sync job 'remote:datastore:local-datastore:s-79412799-e6ee'
 Sync datastore 'local-datastore' from 'remote/datastore'
 sync job 'remote:datastore:local-datastore:s-79412799-e6ee' end
 TASK OK

this patch simply logs how many groups were found and are about to be synced:

 Starting datastore sync job 'remote:datastore:local-datastore:s-79412799-e6ee'
 Sync datastore 'local-datastore' from 'remote/datastore'
 found 0 groups to sync
 sync job 'remote:datastore:local-datastore:s-79412799-e6ee' end
 TASK OK

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-12-02 07:22:50 +01:00
37f1b7dd8d docs: add more thoughts about chunk size 2020-12-01 10:28:06 +01:00
60e6ee46de doc: add some thoughts about large chunk sizes 2020-12-01 08:47:15 +01:00
2260f065d4 cleanup: use extra file for StoreProgress 2020-12-01 06:34:33 +01:00
6eff8dec4f cleanup: remove unnecessary StoreProgress clone() 2020-12-01 06:29:11 +01:00
7e25b9aaaa verify: use same progress as pull
percentage of verified groups, interpolating based on snapshot count
within the group. in most cases, this will also be closer to 'real'
progress since added snapshots (those which will be verified) in active
backup groups will be roughly evenly distributed, while number of total
snapshots per group will be heavily skewed towards those groups which
have existed the longest, even though most of those old snapshots will
only be re-verified very infrequently.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:22:55 +01:00
f867ef9c4a progress: add format variants
for iterating over a single group, or iterating just on the group level

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:22:12 +01:00
fc8920e35d pull: factor out interpolated progress
and add group/snapshot count info.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:13:11 +01:00
7f3b0f67e7 remove BackupGroup::list_groups
BackupInfo::list_backup_groups is identical code-wise, and makes more
sense as entry point for listing groups.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:09:44 +01:00
844660036b gc: don't limit index listing to same filesystem
WalkDir does not follow symlinks by default anyway, and this behaviour
is not documented anywhere. e.g., if a sysadmin mounts 'extra storage'
for some backup group or type (not knowing that only metadata is stored
in those directories), GC will ignore all the indices contained within
and happily garbage collect their chunks..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:07:09 +01:00
efcac39d34 gc: remove duplicate variable
list_images already returns absolute paths, we don't need to prepend
anything.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:06:51 +01:00
cb4b721cb0 gc: log index files found outside of expected scheme
for safety reason, GC finds and marks all index files below the
datastore base path. as a result of regular operations, only index files
within the expected scheme of <TYPE>/<ID>/<TIMESTAMP> should exist.

add a small check + warning if the index list contains index files out
side of this expected scheme, so that an admin with shell access can
investigate.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:06:17 +01:00
7956877f14 gc: shorten progress messages
we have messages starting the phases anyway, and limit the number of
progress updates so that context remains available at all times.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-12-01 06:04:13 +01:00
2241c6795f d/control bump
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 14:28:02 +01:00
43e60ceb41 file logger: remove test.log after test as well
and a doc formatting fixup

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 14:13:21 +01:00
b760d8a23f derive PartialEq for Userid
the manual implementation is equivalent

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 14:10:17 +01:00
2c1592263d tiny clippy hint
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 14:03:43 +01:00
616533823c don't enforce Vec and String in tools::join
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 13:56:59 +01:00
913dddea85 minor cleanup
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 13:56:21 +01:00
3530430365 tools avoid unnecessary copying of parameters/properties
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 13:53:49 +01:00
a4ba60be8f minor cleanups
whitespace, formatting and superfluous lifetime annotations

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 13:47:31 +01:00
99e98f605c network helpers: fix fd leak in get_network_interfaces
This one always leaked.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
935ee97b17 use fd_change_cloexec helper
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
6b9bfd7fe9 minor cleanup
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
dd519bbad1 pxar: stricter file descriptor guards
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
35fe981c7d client: use tools::pipe instead of nix
nix::unistd::pipe returns unguarded RawFds which should be
avoided

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
b6570abe79 changes for proxmox 0.8
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
54813c650e bump proxmox dep to 0.8.0
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-30 11:25:53 +01:00
781106f8c5 ui: fix usage of findRecord
findRecord does not match exactly, but only at the beginning and
case insensitive, by default. Change all calls to be case sensitive
and an exactmatch (we never want the default behaviour afaics).

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-27 07:20:32 +01:00
96f35520a0 bump version to 1.0.5-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-25 15:30:06 +01:00
490560e0c6 restore: print to STDERR
else restoring to STDOUT is broken..

Reported-by: Dominic Jäger <d.jaeger@proxmox.com>

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-25 14:38:02 +01:00
52f53d8280 control: update versions 2020-11-25 10:35:51 +01:00
27b8a3f671 bump version to 1.0.4-1 2020-11-25 08:03:11 +01:00
abf9b6da42 docs: fix renamed commands 2020-11-25 08:03:11 +01:00
0c9209b04c cli: rename command "upload-log" to "snapshot upload-log" 2020-11-25 07:57:39 +01:00
edebd52374 cli: rename command "forget" to "snapshot forget" 2020-11-25 07:57:39 +01:00
61205f00fb cli: rename command "files" to "snapshot files" 2020-11-25 07:57:39 +01:00
a303e00289 fingerprint: add new() method 2020-11-25 07:57:39 +01:00
af9f72e9d8 fingerprint: add bytes() accessor
needed for libproxmox-backup-qemu0

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-25 06:34:34 +01:00
5176346b30 ui: fix broken gettext use
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-25 00:21:17 +01:00
731eeef25b cli: use new alias feature for "snapshots"
Now maps to "snapshot list".
2020-11-24 13:26:43 +01:00
a65e3e4bc0 client: add 'snapshot notes show/update' command
to show and update snapshot notes from the cli

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-24 11:44:19 +01:00
027eb2bbe6 bump version to 1.0.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-24 08:56:18 +01:00
6982a54701 gui: add snapshot/file fingerprint tooltip
display short key ID, like backend's Display trait.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
035c40e638 list_snapshots: return manifest fingerprint
for display in clients.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
79c535955d refactor BackupInfo -> SnapshotListItem helper
before adding more fields to the tuple, let's just create the struct
inside the match arms to improve readability.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
8b7f8d3f3d expose previous backup time in backup env
and use this information to add more information to client backup log
and guide the download manifest decision.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:44:55 +01:00
866c859a1e bump version to 1.0.2-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-24 08:33:20 +01:00
23e4e90540 verification: fix message in notification mail
the errors Vec can contain failed groups as well (e.g., if a group has
no or an invalid owner).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:33:20 +01:00
a4fa3fc241 verification job: log failed dirs
else users have to manually search through a potentially very long task
log to find the entries that are different.. this is the same summary
printed at the end of a manual verify task.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 08:33:20 +01:00
81d10c3b37 cleanup: remove dead code 2020-11-24 08:03:00 +01:00
f1e2904150 paperkey: refactor common code
from formatting functions to main function, and pass along the key data
lines instead of the full string.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:57:21 +01:00
23f9503a31 client: check fingerprint after downloading manifest
this is stricter than the check that happened on manifest load, as it
also fails if the manifest is signed but we don't have a key available.

add some additional output at the start of a backup to indicate whether
a previous manifest is available to base the backup on.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:55:12 +01:00
a0ef68b93c manifest: check fingerprint when loading with key
otherwise loading will run into the signature mismatch which is
technically true, but not the complete picture in this case.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:49:51 +01:00
6b127e6ea0 fix #3139: add key fingerprint to manifest
if the manifest is signed/the contained archives/blobs are encrypted.
stored in 'unprotected' area, since there is already a strong binding
between key and manifest via the signature, and this avoids breaking
backwards compatibility for a simple usability improvement.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-24 07:45:11 +01:00
5e17dbf2bb cli: cleanup 'key show' - use format_and_print_result_full
We now expose all key derivation functions on the cli, so users can
choose between scrypt or pbkdf2.
2020-11-24 07:32:34 +01:00
dfb04575ad client: add 'key show' command
for (pretty-)printing a keyfile.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:15:29 +01:00
6f2626ae19 client: print key fingerprint and master key
for operations where it makes sense.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:11:26 +01:00
37e60ddcde key: add fingerprint to key config
and set/generate it on
- key creation
- key passphrase change
- key decryption if not already set
- key encryption with master key

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:03:46 +01:00
05cdc05347 crypt config: add fingerprint mechanism
by computing the ID digest of a hash of a static string.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-23 13:03:16 +01:00
6364115b4b OnlineHelpInfo.js problems
Anybody known why I always get the following diff:
2020-11-23 12:57:41 +01:00
2133cd9103 update debian/control 2020-11-23 12:13:58 +01:00
01f84fcce1 ui: datastore content: use our keep field for group pruning
sets some defaults and provides the clear trigger, so less code and
slightly nicer UX.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-21 19:52:03 +01:00
08b3823025 bump dependency on proxmox to 0.7.1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-20 17:38:34 +01:00
968a0ab261 fix systemd-encoded upid strings in http client
since we systemd-encode parts of the upid string, and those can contain
characters that are invalid in urls (e.g. '\'), we have to percent encode
those

add a 'percent_encode_component' helper, so that we can maybe change
the AsciiSet for all uses at the same time

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-19 11:01:19 +01:00
21b552848a prune sim: make numberfields more similar to PBS's
by creating a new class that adds a clear trigger and also uses the
clear-trigger image. Code was taken from the one in PBS's prune window,
but we have default values here, so a bit of adapting was necessary. For
example, we don't want to reset to the original value (which might have
been one of the defaults) when clearing, but always to 'null'.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-19 09:47:51 +01:00
fd19256470 gc: treat .bad files like regular chunks
Simplify the phase 2 code by treating .bad files just like regular
chunks, with the exception of stat logging.

To facilitate, we need to touch .bad files in phase 1. We only do this
under the condition that 1) the original chunk is missing (as before),
and 2) the original chunk is still referenced somewhere (since the code
lives in the error handler for a failed chunk touch, it only gets called
for chunks we expect to be there, i.e. ones that are referenced).

Untouched they will then be cleaned up after 24 hours (or after the last
longer-running task finishes).

Reason 2) is also a fix for .bad files not being cleaned up at all if
the original is no longer referenced anywhere (e.g. a user deleting all
snapshots after seeing some corrupt chunks appear).

cond_touch_path is introduced to touch arbitrary paths in the chunk
store with the same logic as touching chunks.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-18 14:04:49 +01:00
1ed022576c api: include store in invalid owner errors
since a group might exist in plenty stores

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:11:24 +01:00
f6aa7b38bf drop now unused BackupInfo::list_backups
all global backup listing now happens via BackupGroup

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:11:21 +01:00
fdfcb74d67 api: filter snapshot counts
unprivileged users should only see the counts related to their part of
the datastore.

while we're at it, switch to a list groups, filter groups, count
snapshots approach (like list_snapshots) to speedup calls to this
endpoint when many unprivileged users share a datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:05:50 +01:00
98afc7b152 api: make expensive parts of datastore status opt-in
used in the PBS GUI, but also for PVE usage queries which don't need all
the extra expensive information..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 11:05:47 +01:00
0d08fceeb9 improve group/snapshot listing
by listing groups first, then filtering, then listing group snapshots.

this cuts down the number of openat/getdirents calls for users that just
have a partial view of the datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-18 10:37:04 +01:00
3c945d73c2 client/http_client: add put method
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 16:59:14 +01:00
58fcbf5ab7 client: expose all-file-systems option
Useful to avoid the need for a long (and possibly changing) list of include-dev
options in certain situations, e.g. nested ZFS file systems. The option is
already implemented and seems to work as expected. The checks for virtual
filesystems are not affected by this option.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-16 16:59:14 +01:00
3a3f31c947 ui: datastores: hide "no datastore" box by default
avoids that it shows during store load, we do not know if there are
no datastores at that point and have already a loading mask.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-16 16:59:14 +01:00
8fc63287df ui: improve comment behaviour for datastore Summary
when we could not load the config (e.g. missing permissions)
show the comment from the global datastore-list

also show a messagebox for a load error instead of setting
the text of the comment box

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
172473e4de ui: DataStoreList: show message when there are no datastores
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
76f549debb ui: DataStoreList: remove datastores also from hash
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-16 10:39:34 +01:00
c9097ff801 pxar: avoid including archive root's exclude patterns in .pxarexclude-cli
The patterns from the archive root's .pxarexclude file are already present in
self.patterns when encode_pxarexclude_cli is called. Pass along the number of
CLI patterns and slice accordingly.

Suggested-By: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 13:05:09 +01:00
fb01fd3af6 visibility cleanups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:53:50 +01:00
fa4bcbcad0 pxar: only generate .pxarexclude-cli if there were CLI parameters
previously a .pxarexclude entry in the root of the archive caused the file to
be generated as well, because the patterns are read before calling
generate_directory_file_list and within the function it wasn't possible to
distinguish between a pattern coming from the CLI and a pattern coming from
archive/root/.pxarexclude

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:08 +01:00
189cdb7427 pxar: include .pxarexclude files in the archive
The documentation states:
.pxarexclude files are treated as regular files and will be included in the
backup archive.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:06 +01:00
874bd5454d pxar: fix anchored exclusion at archive root
There is no leading slash in an entry's full_path, causing an anchored
exclude at the root level to fail, e.g. having "/name" as the content of the
file archive/root/.pxarexclude didn't match the file archive/root/name

Fix this by prepending a leading slash before matching.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:18:04 +01:00
b649887e9a remove unused function
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 11:15:15 +01:00
8c62c15f56 follouwp: whitespace cleanup
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 11:02:45 +01:00
51ac17b56e api: apt/versions: fix running_kernel string for unknown package case
Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-12 11:02:20 +01:00
fc5a012068 manager: versions: non-verbose should actually print server pkg info
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 10:28:03 +01:00
5e293f1315 apt: use typed response for get_versions
...and cleanup get_versions for manager CLI.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-12 10:15:32 +01:00
87367decf2 ui: tell ESLint to be strict in check target
the .lint-incremental target, which is implicitly used by the install
target, is still more forgiving to allow faster "change, build, test"
iteration when developing.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 09:54:39 +01:00
f792220dd4 d/control: update for new pin-project dependency
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-12 09:54:39 +01:00
97030c9407 cleanup clippy leftovers
this used to contain a pointer cast, now it doesn't

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 09:43:38 +01:00
5d1d0f5d6c use pin-project to remove more unsafe blocks
we already have it in our dependency tree, so use it

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-12 09:43:38 +01:00
294466ee61 manager: versions: unify printing
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
c100fe9108 add versions command to proxmox-backup-manager
Add the versions command to proxmox-backup-manager with a similar output
to pveversion [-v]. It prints the packages line by line with only the
package name, followed by the version and, for proxmox-backup and
proxmox-backup-server, some additional information (running kernel,
running version).

In addition it supports the optional output-format parameter which can
be used to print the complete data in either json, json-pretty or text
format. If output-format is specified, the --verbose parameter is
ignored and the detailed list of packages is printed.

With the addition of the versions command, the report is extended as
well.

Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-11 18:30:33 +01:00
e754da3ac2 api: versions: add version also in server package unknown case
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
bc1e52bc38 api: versions: rust fmt cleanups
line length limit is 100

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
6f0073bbb5 api: apt update info: do not serialize extra info if none
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 18:30:33 +01:00
2decf85d6e add extra_info field to APTUpdateInfo
Add an optional string field to APTUpdateInfo which can be used for
extra information.

This is used for passing running kernel and running version information
in the versions API call together with proxmox-backup and
proxmox-backup-server.

Signed-off-by: Mira Limbeck <m.limbeck@proxmox.com>
2020-11-11 16:39:11 +01:00
1d8f849457 api2/node/syslog: use 'real_service_name' here also
for now this only does the 'postfix' -> 'postfix@-' conversion,
fixes the issue that we only showed the 'postfix' service syslog
(which is rather empty in a default setup) instead of the instance one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 16:36:42 +01:00
beb07279b6 log source of encryption key
This patch prints the source of the encryption key when running
operations with proxmox-backup-client.

Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-11-11 16:35:20 +01:00
8c6854c8fd inform user when using default encryption key
Currently if you generate a default encryption key:
`proxmox-backup-client key create --kdf none`

all backup operations which don't explicitly disable encryption will be
encrypted with this key.

I found it quite surprising, that my backups were all encrypted without
me explicitly specfying neither key nor encryption mode

This patch informs the user when the default key is used (and no
crypt-mode is provided explicitly)

Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-11-11 16:35:20 +01:00
57f472d9bb report: use '$' instead of '#' for showing commands
since some files can contain '#' character for comments. (i.e.,
/etc/hosts)

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-11-11 16:19:37 +01:00
94ffca10a2 report: fix grammar error
Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-11-11 16:19:33 +01:00
0a274ab0a0 ui: UserView: render name as 'Firstname Lastname'
instead of only the firstname

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 14:09:40 +01:00
c0026563b0 make user properties deletable
by using our usual pattern for the update call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 14:09:40 +01:00
e411924c7c rest: check for disabled token (user)
when authenticating a token, and not just when authenticating a
user/ticket.

Reported-By: Dominik Jäger <d.jaeger@proxmox.com>

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-11 12:21:29 +01:00
709c15abaa bump version to 1.0.1-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:21:30 +01:00
b404e4d930 d/control: check in new dependnecies to generated control
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:21:30 +01:00
f507580c3f docs: faq: fix first releases
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
291b786076 docs: fix prune retention example
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
06c9059dac daemon: rename method, endless loop, bail on exec error
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 10:14:01 +01:00
d7c6ad60dd daemon: add hack for sd_notify
sd_notify is not synchronous, iow. it only waits until the message
reaches the queue not until it is processed by systemd

when the process that sent such a message exits before systemd could
process it, it cannot be associated to the correct pid

so in case of reloading, we send a message with 'MAINPID=<newpid>'
to signal that it will change. if now the old process exits before
systemd knows this, it will not accept the 'READY=1' message from the
child, since it rejects the MAINPID change

since there is no (AFAICS) library interface to check the unit status,
we use 'systemctl is-active <SERVICE_NAME>' to check the state until
it is not 'reloading' anymore.

on newer systemd versions, there is 'sd_notify_barrier' which would
allow us to wait for systemd to have all messages from the current
pid to be processed before acknowledging to the child, but on buster
the systemd version is to old...

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-11 09:43:00 +01:00
0a0ba0785b prune sim: avoid colon to separate keep desc from count
hack for space issues for monthly keeps and >9 counts

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:20:13 +01:00
6ed79592f2 prune sim: make backup schedule a form, bind update button to its validity
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:11:46 +01:00
4c75ee3471 prune sim: do not use unecesarry variable, declare in line
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:11:16 +01:00
6f997da8cd prune sim: set min-heigth for calendar day cells
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 08:10:43 +01:00
03e40aa4ee ui: datastore add: set default schedule
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:49:01 +01:00
be1d6cbcc6 ui: shorten automatic ID length a bit
Without hyphens, we had 20 hex digits, so ~80 bit which is probably overkill.
Use 12 (13 with hyphen), this is still 48 bit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:40:23 +01:00
ffaca016ad ui: datastore summary: drop removed bytes display
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:27:21 +01:00
71f82a98d7 d/control: add missing dependencies for non ISO installations
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-11 07:26:05 +01:00
deef6fbc0c cargo: extend authors list
this was mostly selected by executing

and adding those with more than a hand full of commits, so no hard
feelings here, this was definitively also a team effort to get stuff
polished!

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
4ac529141f bump version to 1.0.0-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
a108a2e967 ui: drop debug beta code
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 14:47:48 +01:00
ff7a29104c postinst: fix version check for remote.cfg cleanup
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:35:37 +01:00
240b2ffb9b ui: improve activeTab selection from fragment and state
handle invalid fragments for tabs, as well as not rendered tabpanels

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 14:21:54 +01:00
a86e703661 tools::runtime: pin_mut instead of unsafe block
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 14:18:45 +01:00
1ecf4e6d20 async_io: require Unpin for EitherStream and HyperAccept
We use it with Unpin types and this way we get rid of a lot
of `unsafe` blocks.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 14:18:45 +01:00
9f9a661b1a verify: cleanup logging order/messages
otherwise we end up printing warnings before the start message..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:11:36 +01:00
1b1cab8321 verify: log/warn on invalid owner
in order to trigger a notification/make the problem more visible than
just in syslog.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 14:11:36 +01:00
f4f9a503de ui: add mising panel help buttons
add missing help buttons (question mark, top right) so that we are
consistent and each panel has it.

I chose the IMHO most fitting sections.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 13:53:21 +01:00
a4971d5f90 docs: add ref for sysadmin host admin section
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 13:53:21 +01:00
477ebe6b78 docs: user management: avoid some inconsistencies
The space between '--' and 'path' in two of the commands was wrong. The other
changes make the names of the store and token consistent with the rest of the
section and should improve readability.

Also add the Datastore.Verify permission in the output of the command:
proxmox-backup-manager user permissions john@pbs --path /datastore/store1
A DatastoreAdmin now has this permission and that's what john@pbs is in the
example.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 13:47:52 +01:00
38efbfc148 ui: app: fix fixme
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:38:30 +01:00
10052ea644 remote.cfg: rename userid to 'auth-id'
and fixup config file on upgrades accordingly

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 13:25:24 +01:00
b57619ea29 ui: datastores sync: future proof and move local store column in front
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:22:54 +01:00
445b0043b2 ui: show (local)datastore column only in global sync/verifyview
its rather hacky, but our cbind mixin does not support columns (yet).
if it does sometime in the future, we could use that instead

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 13:14:47 +01:00
8b62cbe752 docs: update package repositories
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 13:14:04 +01:00
81f99362d9 docs: installation: don't mention ext3 as an option anymore
Support for ext3 was removed by commit 0abf0d3683b74421eca24ba61d1d4e100d35211a
in pve-installer.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 13:13:44 +01:00
414c23facb fix #3060:: improve get_owner error handling
log invalid owners to system log, and continue with next group just as
if permission checks fail for the following operations:
- verify store with limited permissions
- list store groups
- list store snapshots

all other call sites either handle it correctly already (sync/pull), or
operate on a single group/snapshot and can bubble up the error.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-10 12:58:44 +01:00
c5608cf86c encryption: add best practice for storing master key
Further clarify that the paperkey should be a last resort
recovery option, after a password manager and usb drive.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-10 12:51:30 +01:00
5d08c750ef HttpsConnector: include destination on connect errors
for more useful log output
old:
Nov 10 11:50:51 foo pvestatd[3378]: proxmox-backup-client failed: Error: error trying to connect: tcp connect error: No route to host (os error 113)
new:
Nov 10 11:55:21 foo pvestatd[3378]: proxmox-backup-client failed: Error: error trying to connect: error connecting to https://thebackuphost:8007/ - tcp connect error: No route to host (os error 113)

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 11:58:19 +01:00
f3fde36beb client: error context when building HttpClient
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-10 11:58:19 +01:00
0c83e8891e ui: fix task description 2020-11-10 11:53:39 +01:00
133de2dd1f ui: add/fix help buttons
added a few more help buttons were appropriate:

* GC and Prune schedule windows
* Create Directory window
* API Tokens, link directly to token section
* verify jobs window

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:51:03 +01:00
c8219747f0 ui: add all online help refs found in docs
recommit the onlinehelp after the scanrefs script has been adapted and
the docs are up to date

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:56 +01:00
0247f794e9 docs: add network management reference
needed in order for the help button in the network edit window to work.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:17 +01:00
710f787c41 docs: add maintenance chapter prefix to verification ref
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:12 +01:00
d8916a326c scanrefs: only scan docs, not JS files
This is a temporary hack until we find a sensible way to scan the
proxmox-widget-toolkit JS files as well.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-10 11:50:09 +01:00
924d6d4072 prune sim: show count for rule
and rename 'all zero' to 'keep-all' to make it consistent with the prune dialog
in PBS.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 11:47:37 +01:00
984ac33d5c ui: subscription: usage chart: render date as ISO 8601
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:22 +01:00
0a4dfd63c9 ui: usage graph: show axis and set maximum
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:05 +01:00
a6e746f652 ui: datastore list summary: add more padding between elements
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 11:46:05 +01:00
30f73fa2e0 fix bug #3060: continue sync if we cannot aquire the group lock 2020-11-10 11:29:36 +01:00
9f0ee346e9 ui: Datastores Summary: change layout and chart
changes the layout to look i little bit more like the statistics panel
we have for ceph in pve, while changing to the UsageChart and adding
some more datastore infos (from last garbage collect)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
48d6dede4a ui: refactor calculate_dedup_factor
so that we can reuse this

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
8432e4a655 ui: add panel/UsageChart
heavily inspired by pveRunningChart, without the dynamically adding
of data and specific for the usage of datastores

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
b35eb0a175 api2/status/datastore-usage: add gc-status and history start and delta
so that we can show more info and calculate the points in time for the
history

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-10 10:43:07 +01:00
c3a1b34ed3 ui: subscription: add more button icons, small UX fix
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:42:45 +01:00
bb26843cd6 ui/docs: add get help onlineHelp
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:35:35 +01:00
ee0ab12dd0 ui: move disks/directory stuff to tab panel
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:15:44 +01:00
d5f7755467 docs: online help scanner: also include help tool links
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 10:15:08 +01:00
5c64e83b1e ui: datastore: set onlineHelp for chaging group owner
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:53:05 +01:00
0f6f99b4ec ui: prune: set onlineHelp
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:51:30 +01:00
f668862ae0 ui: prune: add clear-trigger to keep fields
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:51:20 +01:00
c960d2b501 bail if mount point already exists for directories
similar to what we do for zfs. By bailing before partitioning, the disk is
still considered unused after a failed attempt.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:25:58 +01:00
f5d9f2534b mount zpools created via API under /mnt/datastore
as we do for other file systems

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:25:58 +01:00
9a3ddcea33 ui: utils: eslint format fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:24:35 +01:00
030464d3a9 docs: s/DataStore/Datastore/
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 09:24:13 +01:00
3f30b32c2e ui: prune: show count for rule
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:24:13 +01:00
5eafe6aabc ui: prune: show which rule keeps backup
and adjust layout so the description fits.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-10 09:24:13 +01:00
2c9f274efa ui: add help tool to user and remote config 2020-11-10 09:23:22 +01:00
31112c79ac ui: add help tool to datastore panel 2020-11-10 09:15:12 +01:00
d89f91b538 ui: acl editor: disallow path editing for datastore permission views
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 08:19:17 +01:00
a6310ec294 ui: fix widget height in dashboard 2020-11-10 08:12:35 +01:00
98d9323534 ui: add link to www.proxmox.com for subscription plans 2020-11-10 08:07:49 +01:00
09f1f28800 ui: ACL view: fix path filtering
and add some comments about actual behavior of those config
properties..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-10 07:33:20 +01:00
e1da9ca4bb ui: datastore dashboard: use gauge for usage, rework layout a bit
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 19:26:48 +01:00
625c7bfc0b ui: task summary: enable grid mouse track over
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 19:25:43 +01:00
d9503950e3 ui: tasl summary: add pointer cursor if clickable
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 18:09:05 +01:00
376e927980 ui: datastore summary: increase usage graph height
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:55:59 +01:00
5204cbcf0f ui: datastore summary: add line chart icon to full-estimation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:48:53 +01:00
e373dcc564 ui: datastore/content: improve action button layout
Fix font-size to 14px to improve font-awesome rendering, add some
slight margin between the buttons so that they are not glued
together, add a slight text-shadow on mouse over.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 17:45:08 +01:00
137a6ebcad apt: allow changelog retrieval from enterprise repo
If a package is or will be installed from the enterprise repo, retrieve
the changelog from there as well (securely via HTTPS and authenticated
with the subcription key).

Extends the get_string method to take additional headers, in this case
used for 'Authorization'. Hyper does not have built-in basic auth
support AFAICT but it's simple enough to just build the header manually.

Take the opportunity and also set the User-Agent sensibly for GET
requests, just like for POST.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-11-09 17:28:58 +01:00
ed1329ecf7 ui: make Datastore clickable again
by showing the previously added pbsDataStores panel

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
2371c1e371 ui: add Panels necessary for Datastores Overview
a panel for a single datastore that gets updated from an external caller
shows the usage, estimated full date, history and task summary grid

a panel that dynamically generates the panel above for each datastore

and a tabpanel that includes the panel above, as well as a global
syncview, verifiyview and aclview

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
63c07d950c ui: TaskSummary: handle less defined parameters of tasks
this makes it a little easier to provide good data, without
hardcoding all types in the source object

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
a3cdb19e33 ui: TaskSummary: add subPanelModal and datastore parameters
in preparation for the per-datastore grid

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
4623cd6497 ui: TaskSummary: move state/types/titles out of the controller
it seems that under certain circumstances, extjs does not initialize
or remove the content from objects in controllers

move it to the view, were they always exist

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
ab81bb13ad ui: make Sync/VerifyView and Edit usable without datastore
we want to use this panel again for a 'global' overview, without
any datastore preselected, so we have to handle that, and
adding a datastore selector in the editwindow

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
616650a198 ui: Utils: add parse_datastore_worker_id
to parse the datastore out of a worker_id
for this we need some regexes that are the same as in the backend

for now we only parse out the datastore, but we can extend this
in the future to parse relevant info (e.g. remote for syncs,
id/type for backups)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
78763d21b1 ui: refactor render_size_usage to Utils
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
f2d6324958 ui: refactor render_estimate
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
6e880f19cc api2/node/tasks: add check_job_store and use it
to easily check the store of a worker_id
this fixes the issue that one could not filter by type 'syncjob' and
datastore simultaneously

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-09 16:37:24 +01:00
64623f329e ui: recommit onlinehelp
now that the last commit fixed the title generation

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:36:00 +01:00
407f3fb994 scanrefs: remove term prefix from title
It can happen, that a title is defined as term in the following way:
:term:`My title`

This patch checks for it and strips the leading part and the last `.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-11-09 16:35:29 +01:00
0eb0c4bd63 proxy: fix log message for auth log rotation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:34:03 +01:00
82422c115a ui: admin/summary: add versions button/window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:33:22 +01:00
ed2beb334d api: node/apt: add versions call
very basic, based on API/concepts of PVE one.

Still missing, addint an extra_info string option to APTUpdateInfo
and pass along running kernel/PBS version there.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 16:31:56 +01:00
f3b4820d06 www: show more ACLs in datastore panel
since just the ACLs defined on the exact datastore path don't give
anywhere near a complete picture of who has access to it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-09 15:19:15 +01:00
8f7cd96df4 installation: minor wording fix
very minor but worthwhile edits

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-09 15:18:44 +01:00
4accbc5853 backup-client: encryption: discuss paperkey command
adds a paragraph to the encryption section about
encoding the master key into a qr code for printing

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-09 15:18:44 +01:00
2791318ff1 fix bug #3121: forbid removing used reemotes 2020-11-09 12:48:29 +01:00
47208b4147 pxar: log when skipping mount points
Clippy complains about the number of paramters we have for
create_archive and it really does need to be made somewhat
less awkward and more usable. For now we just log to stderr
as we previously did. Added todo-comments for this.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-09 12:43:16 +01:00
b783591fb5 ui: datastore content: ensure action column is wide enough
with the "change owner" action added we now need more than the
default of 100 px, so increase to 120 px for now.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:31:14 +01:00
9dd6175808 ui: token selector: use same layout as auth id selector
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:24:54 +01:00
5e8b97178e ui: auth/token selector: tell ExtJS we injected data into the store
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 12:21:02 +01:00
38260cddf5 tools apt: include package name in filter data
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 08:55:08 +01:00
80b0423d54 bump version to 0.9.7-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-09 07:37:05 +01:00
b690bb69eb prune sim: align documentation style with sphinx/alabaster ones
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 14:02:27 +01:00
8a40e22691 docs: scroll navigation to current active section
Add a custom JavaScript file to all HTML rendered docs output.

For now it only hosts a small code snipped which gets the current
active section link and bring it into view.
Needs to be triggered after DOM is initially loaded (which is still
before *all* resources like images, iframes, ... are necessarily
loaded), else the query cannot work.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 13:29:09 +01:00
f5c6a2c956 prune sim: slight layout adaptions
add some margin to the calendar table, to not make it seem glued to
the left and top, this follow what ExtJS does in general.

Further, adapt layout flex so that docs has 2/5 and calendar has 3/5
of space on small screens (e.g., 720p), makes it look much better
there.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-08 13:24:27 +01:00
6d5803399b ui: add some onlineHelp reference uses for pruning
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 16:03:07 +01:00
3896f80cb3 docs: expand prune section, mention simulator, add onlineHelp refs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 15:51:09 +01:00
60d2a6157a prune sim: make prune options panel scrollable
Else it's cutoff on 720p resolution

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:33:15 +01:00
b83b12cf80 prune sim: add daily 00:00 as predefined schedule in selector
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
86847f487b prune sim: allow simulating up to 5 years
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
1b03910dea prune sim: spell out PBS, add some flex to layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
435a6c5e0a prune sim: fine tune calendar layout/style
Avoid black on white, to much contrast hurts the eye, use a dark grey
instead.

Highlight Sundays, and show month boundaries explicitly with strong
dashed border.

Factor out some manual set styles to classes and use them instead,
decoupling logic and styling a bit more.

Use span elements for plain text stuff, which should not be a block
(e.g., div) element.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 14:08:41 +01:00
1f4befe136 prune sim: enable calendar by default
it has a really good non-intrusive layout now, so show it's glory by
default.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:36:58 +01:00
7f0f366675 prune sim: do not continue with reload if we caught an exception
as we then try to dereference hours which is null, for example.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:35:58 +01:00
362e69610c prune sim: set update button handler directly
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:35:26 +01:00
bad26df102 prune sim: factor out toggling color, and default to true
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:34:20 +01:00
790627b4bf prune sim: avoid unnecessary viewmodel formula
we set a reference on the checkbox, so we get this for free

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:33:08 +01:00
6de14a55ed prune sim: fix numberfield spinner scroll with firefox
copied over from widget toolkit

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:32:04 +01:00
8b24c6880a prune sim: eslint fixes, do not define console
really not required nowadays, and we do not use it anyway here..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:31:14 +01:00
5174956548 prune sim: improve documentation layout
Better line height, some margin on the edges, and max width to avoid
very long lines on wide displays.

Avoid to much contrast by using black on white, use a very dark grey
instead.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-07 13:28:50 +01:00
d669a739b2 ui: datastore: backup owner change: fix layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:48:08 +01:00
c7fa61619e ui: move backup group owner changer into window folder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:47:45 +01:00
009a04f8d0 ui: auth-id selector: validity, code-style and layout fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 19:46:08 +01:00
0953044cfb ui: use AuthidSelector for selecting new owner
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 19:06:35 +01:00
d923671a7b ui: use AuthidSelector for sync job owner
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 19:06:34 +01:00
db8a606707 proxmox-backup-proxy: remove unnecessary alias
the basedir is already /usr/share/javascript/proxmox-backup/
so adding a subdir of that as alias is not needed

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-06 18:08:18 +01:00
b614b29bea ui: datastore: add option view tab
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
65595e169f ui: add NotifyOptions edit window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
10db4717f1 docs: maintenance: document notifications
can surely be improved, just to have anything..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
1d9d2f0f7c ui: utils: add property format string helpers from PVE
slightly adapted, i.e., the delete_if_default helper always sets the
delete property to an array if not existing.

Also, filtering out undefined values when printing properties.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:52:15 +01:00
ad53c1d6dd api: datastore: allow to set "verify-new" option over API
Until now, one could only set this by editing the configuration file
manually.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 17:24:14 +01:00
beeadb8a4b Remove reference to backup@pam 2020-11-06 16:32:35 +01:00
b997524912 Add screenshots
For:
- api tokens
- new user management interface
- updatae server administration

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:30:59 +01:00
cc4a9d250a maintenance: add verification and prune to section
Includes new screen shots of interface

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:29:59 +01:00
6227b9bab0 Update where to find certain items since GUI update
- Sync jobs in datastore
- "User management" is now section of Access Control

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:28:47 +01:00
f608e74c8b datastore: description of new datastore view
- Add screenshots from new datastore view
- Add description of comment field in create datastore window
- Add description of each tab in the datastore panel
- Update instructions to add datastore from GUI

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:28:16 +01:00
08379a21d1 backup-client: add section on change-owner command
Add section "Changing the Owner of a Backup Group"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:27:20 +01:00
8f1d972149 installation & gui: Formatting fixup
Fix some minor formatting errors in the docs

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 16:26:09 +01:00
b59c308219 Vec::new is Vec's default default
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-06 14:55:34 +01:00
0224c3c273 client: properly complete new-owner
with remote Authids, not local Userids.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-06 14:54:08 +01:00
f0609851fc www: add AuthidSelector
similar to TokenSelector, but with different fields / mapping of data.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 13:06:16 +01:00
dbd45a72c3 tasks: allow access to job tasks
if the user/token could have either configured/manually executed the
task, but it was either executed via the schedule (root@pam) or
another user/token.

without this change, semi-privileged users (that cannot read all tasks
globally, but are DatastoreAdmin) could schedule jobs, but not read
their logs once the schedule executes them. it also makes sense for
multiple such users to see eachothers manually executed jobs, as long as
the privilege level on the datastore (or remote/remote_store/local
store) itself is sufficient.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:56:06 +01:00
4c979d5450 verify: allow unprivileged access to admin API
which is the one used by the GUI.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:41:41 +01:00
35c80d696f verify: fix unprivileged verification jobs
since the store is not a path parameter, we need to do manual instead of
schema checks. also dropping Datastore.Backup here

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-06 12:39:06 +01:00
6823fdc7f9 ui: improve prune simulator layout 2020-11-06 12:12:59 +01:00
3323798b54 include prune simulator in build
Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
2020-11-06 09:59:24 +01:00
67fd09791f create prune simulator
A stand-alone ExtJS app that allows experimenting with different backup
schedules and prune parameters.

The HTML for the documentation was taken from the PBS docs and adapted to the
context of the simulator.

For performance reasons, the week table does not use
subcomponents, but raw HTML.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-11-06 09:13:43 +01:00
1b37ebf6f6 ui: require owner for sync jobs 2020-11-06 08:48:07 +01:00
043406d662 ui: use pbsUserSelector for BackupGroupChangeOwner 2020-11-06 08:48:07 +01:00
61db0851d6 gui: Add button for changing backup group owner
Extension of fix #2847

Adds an action button to the datastore content view,
to change the owner of a backup.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-11-06 08:48:00 +01:00
ad54df3178 get rid of backup@pam 2020-11-06 08:39:30 +01:00
71103afd69 fixup: acutally commit all changes..
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 08:24:30 +01:00
6465d809cd ui: move datastore related files into own folder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-06 08:11:22 +01:00
ae8635c307 www: add remote store selector
(hopefully) improved upon NFS export selection in PVE

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
e0100d618e api: refactor remote client and add remote scan
to allow on-demand scanning of remote datastores accessible for the
configured remote user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
455e5f7110 types: extract DataStoreListItem
for reuse in remote scan API call

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-05 12:56:20 +01:00
c26c9390ff config: make notify a property string
For example "gc=never,verify=always,sync=error".
2020-11-05 11:35:14 +01:00
9e45e03aef tools/daemon: fix reload with open connections
instead of await'ing the result of 'create_service' directly,
poll it together with the shutdown_future

if we reached that, fork_restart the new daemon, and await
the open future from 'create_service'

this way the old process still handles open connections until they finish,
while we already start a new process that handles new incoming connections

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 11:14:56 +01:00
e144810d73 pxar: more concise EOF handling
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:32:48 +01:00
3c2dd8ad05 pxar/create: handle ErrorKind::Interrupted for file reads
they are not an error and we should retry the read

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:27:36 +01:00
91e3b38da4 pxar/create: fix endless loop for shrinking files
when a file shrunk during backup, we endlessly looped, reading/copying 0 bytes
we already have code that handles shrunk files, but we forgot to
break from the read loop

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-11-05 10:27:30 +01:00
9d79cec4d5 bump version to 0.9.6-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:13:04 +01:00
4935681cf4 ui: sync jobs: add tooltip for remove vanished
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:07:07 +01:00
669fa672d9 ui: sync jobs: reorder fields
group local ones togeteher on the left side, and source + schedule
on the right side.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:05:48 +01:00
a797583535 ui: sync jobs: fix originalValue of owner and improve label
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:42 +01:00
54ed1b2a71 ui: sync jobs: only set default schedule when creating new jobs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:06 +01:00
8e12e86f0b ui: add shell panel under administration
some users prefer an inline console
we still have the pop-out console in 'Administration'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 18:16:49 +01:00
fe7bdc9d29 proxy: also rotate auth.log file
no need for triggering re-open here, we always re-open that file.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
546b6a23df proxy: logrotate: do not serialize sending async log-reopen commands
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
4fdf13f95f api: factor out auth logger and use for all API authentication failures
we have information here not available in the access log, especially
if the /api2/extjs formatter is used, which encapsulates errors in a
200 response.

So keep the auth log for now, but extend it use from create ticket
calls to all authentication failures for API calls, this ensures one
can also fail2ban tokens.

Do that logging in a central place, which makes it simple but means
that we do not have the user ID information available to include in
the log.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
385681c9ab worker task: fix passing upid to send command
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
be99df2767 log rotate: only add .zst to new file after second rotation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
30200b5c4a ui: fix task description for log rotate
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 14:20:44 +01:00
f47c1d3a2f proxy: use new datastore notify settings 2020-11-04 11:54:29 +01:00
6e545d0058 config: allow to configure who receives job notify emails 2020-11-04 11:54:29 +01:00
84006f98b2 ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 11:39:52 +01:00
42ca9e918a sync: improve log format 2020-11-04 09:10:56 +01:00
ea93bea7bf proxy: log if there are too many open connections 2020-11-04 08:49:35 +01:00
0081903f7c fix bug #2870: use updated tickets 2020-11-04 08:20:36 +01:00
c53797f627 ui: set default deduplication factor to 1.0 2020-11-04 07:12:55 +01:00
e1d367df47 proxy: use env PROXMOX_DEBUG to enable/disable debug output
We only print early connection errors when this env var is set.
2020-11-04 06:55:57 +01:00
71f413cd27 cleanup: use Arc to count open connections 2020-11-04 06:35:44 +01:00
48aa2b93b7 fix #3106: correctly queue incoming connections 2020-11-04 06:24:42 +01:00
641862ddad bump version to 0.9.5-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:41:26 +01:00
2f08ee1fe3 report: add more commands/files to check
add all of our configuration files in /etc/proxmox-backup/ further,
call some ZFS tool to get that status.

Also, use the subscription command form manager, as we often require
more info than the status. Also, adapt formatting a bit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:33:16 +01:00
93f077c5cf report: avoid lazy_static for command/files/.. definitions
those are not in a hot code path, and it is not really much work to
build them on the go..

It may not matther much, but it is unnecessary. Rust will probably
inline most of it anyway..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:27:16 +01:00
941342f70e manager: report: call method directly, avoid HTTPS request
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 17:23:43 +01:00
9a556c8a30 manager: add report cli command
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
46dce62be6 report: add webui button for system report
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
b0ef9631e6 report: add api endpoint and function to generate report
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-11-03 15:16:42 +01:00
fb0d9833af ui: task filter: add button icons
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 14:49:04 +01:00
bfe4b7d782 ui: task filter: reorder to avoid wasting vertical space
Includes some eslint fixes and label changes as well, was to much
work to split that out in its own commit.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-03 14:48:20 +01:00
185dab7678 ui: add panel/Tasks and use it for the node tasks
this is a panel that is heavily inspired from widget-toolkits
node/Tasks panel, but is adapted to use the extended api calls of
pbs (e.g. since/until filter)

has 'filter' panel (like pmgs log tracker gui), but it is collapsible

if we extend the api calls of the other projects, we can merge this
again into the widget-toolkit one and use that

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
c1fa057cce api2/node/tasks: add optional until filter
so that users select specific time ranges with 'since' and 'until'
(e.g. a single day)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
f66565203a api2/status: remove list_task api call
we do not need it anymore, we can do everything with nodes/NODE/tasks
instead

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
a2a7dd1535 api2/node/tasks: add optional since/typefilter/statusfilter
and change all users of the /status/tasks api call to this

with this change we can now delete the /status/tasks api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
e7dd169fdf api2/node/tasks: change limit behaviour when it is 0
instead of returning 0 elements (which does not really make sense anyway),
change it so that there is no limit anymore (besides usize::MAX)

this is technically a breaking change for the api, but i guess
no one is using limit=0 for anything sensible anyway

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
fa31f4c54c server/worker_task: add tasktype to return the api type of a taskstate
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 11:35:21 +01:00
038ee59960 cleanup: use const_regex, use BACKUP_ID_REGEX for api too 2020-11-03 06:36:50 +01:00
e1c1533790 fix #3039: use the same ID regex for info and api
in the api we use PROXMOX_SAFE_ID_REGEX for backup ids, but here
(where we use it to list them) we use a local regex

since the first is a superset of the one used here, simply extend
the local one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-03 06:25:06 +01:00
9de7c71a81 docs: extend managing remotes
with information about required privileges and limitations

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
aa64e06540 sync: add access check tests
should cover all the current scenarios. remote server-side checks can't
be meaningfully unit-tested, but they are simple enough so should
hopefully never break.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
18077ac633 user.cfg/user info: add test constructors
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 21:13:24 +01:00
a71a009313 proxy: drop now unused UPID import
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 21:08:38 +01:00
b6ba5acd29 proxmox-backup-proxy: use only jobstate for garbage_collection schedule
in case the garbage_collection errors out, we never set the in-memory
state, so if it failed, the last 'good' starttime was considered
for the schedule

this could lead to the job running every minute instead of the
correct schedule

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
4fdf5ddf5b api2/admin/datastore: start the garbage_collection task with our helper
instead of manually, this has the advantage that we now set
the jobstate correctly and can return with an error if it is
currently running (instead of failing in the task)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
c724f65805 server/gc_job: add 'to_stdout'
we will use this for the manual api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
79c9bf55b9 backup/{dynamic, fixed}_index: improve error message for small index files
index files that were smaller than their respective header size,
would fail with

"failed to fill whole buffer"

instead now check explicitely for the size and fail with
"index too small (size)"

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 21:08:38 +01:00
788d82d9b7 gc: mark_used_chunks: reduce implementation noise
try do reduce some unecessary lines, make match arms more precise so
one can faster see what's actually happening.

Also, avoid
> return Err(format_err!(...))
stuff, just use bail!()

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 21:08:38 +01:00
2f0b92352d garbage collect: improve index error messages
so that in case of a broken index file, the user knows which it is

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-02 20:08:50 +01:00
b7f2be5137 log rotate task: make task archive limits be binary based
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
72aa1834dc log rotate task: adapt internal jobstate ID, set worker one to None for now
as we have only one logrotate task currently..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
fe4cc5b1a1 server: implement access log rotation with re-open via command socket
re-use the future we already have for task log rotation to trigger
it.

Move the FileLogger in ApiConfig into an Arc, so that we can actually
update it and REST using the new one.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:53:30 +01:00
04b053d87e server: write main daemon PID to run directory
so that we can easily get the main PID of the last recently launched
daemon. Will be used to get the control socket of that one for access
lgo rotate in a future patch

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:50:24 +01:00
b469011fd1 command socket: make create_control_socket private
this is internal for now, use the comanndo socket struct
implementation, and ideally not a new one but the existing ones
created in the proxy and api daemons.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:50:24 +01:00
a68768cf31 server: use generalized commando socket for worker tasks commands
Allows to extend the use of that socket in the future, e.g., for log
rotate re-open signaling.

To reflect this we use a more general name, and change the commandos
to a more clear namespace.

Both are actually somewhat a breaking change, but the single real
world issue it should be able to cause is, that one won't be able to
stop task from older daemons, which still use the older abstract
socket name format.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:48:04 +01:00
f3df613cb7 server: add CommandoSocket where multiple users can register commands
This is a preparatory step to replace the task control socket with it
and provide a "reopen log file" command for the rest server.

Kept it simple by disallowing to register new commands after the
socket gets spawned, this avoids the need for locking.

If we really need that we can always wrap it in a Arc<RWLock<..>> or
something like that, or even nicer, register at compile time.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
056ee78567 config: network: use error message when parsing netmask failed
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
3cd529ea51 tools: file logger: avoid some possible unwraps in log method
writing to a file can explode quite easily.
time formatting to rfc3339 should be more robust, but it has a few
conditions where it could fail, so catch that too (and only really
do it if required).

The writes to stdout are left as is, it normally is redirected to
journal which is in memory, and thus breaks later than most stuff,
and at that point we probably do not care anymore anyway.

It could make sense to actually return a result here..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 19:32:22 +01:00
3aade17125 tools: log rotate: compressing rotated files
We renamed the last one always to a file without compression
extension, even if it was .zst previously. So always add the correct
ending to the new last one, if compress was true.

Further, we cannot detect if there'd be a compression required if we
rotated (renamed) it already to the file with .zst included.

So check on rotation itself if it would be a "no .zst" -> ",zst"
transition, and call compress there.

it really should be OK now *knocking wood*

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 18:35:13 +01:00
1dc2fe20dd tools: log rotate: fix file ending for compressed files
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 18:35:13 +01:00
645a47ff6e config: support netmask when parsing interfaces file 2020-11-02 14:32:35 +01:00
b1456a8ea7 ui: fix verificationjob task description 2020-11-02 10:15:52 +01:00
a9fcbec9dc file logger: allow reopening file
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 10:03:10 +01:00
346a488e35 pull out /run and /var/log directory constants to buildcfg
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 10:03:10 +01:00
3066f56481 notify: add link to server GUI 2020-11-02 09:12:14 +01:00
07ca4e3609 gc: remove extra empty lines in email notification template 2020-11-02 09:12:14 +01:00
dcd75edb72 ui: fix dashboard subscription
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-02 08:08:44 +01:00
59af9ca98e sync: allow sync for non-superusers
by requiring
- Datastore.Backup permission for target datastore
- Remote.Read permission for source remote/datastore
- Datastore.Prune if vanished snapshots should be removed
- Datastore.Modify if another user should own the freshly synced
snapshots

reading a sync job entry only requires knowing about both the source
remote and the target datastore.

note that this does not affect the Authid used to authenticate with the
remote, which of course also needs permissions to access the source
datastore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 07:10:12 +01:00
f1694b062d fix #2864: add owner option to sync
instead of hard-coding 'backup@pam'. this allows a bit more flexibility
(e.g., syncing to a datastore that can directly be used as restore
source) without overly complicating things.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-11-02 07:08:05 +01:00
fa7aceeb15 manager: subscription commands s/delete/remove/
no idea why I added it as "delete", for all other such operations we
use the "remove" sub-command...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-01 13:19:30 +01:00
0e16f57e37 apt: sort packages for update notifcation mail
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:58:52 +01:00
bc00289bce add daily update and maintenance task
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:51:26 +01:00
86d602457a api: apt: implement support to send notification email on new updates
again, base idea copied off PVE, but, we safe the information about
which pending version we send a mail out already in a separate
object, to keep the api return type APTUpdateInfo clean.

This also makes a few things a bit easier, as we can update the
package status without saving/restoring the notify information.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 22:51:26 +01:00
33508b1237 api: implement apt pkg cache
based on the idea of PVE

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:42:49 +01:00
b282557563 api: apt: factor out and improve calling apt update
apt changes some of its state/cache also if it errors out, most of
the time, so we actually want to print both, stderr and stdout.

Further, only warn if its exit code is non-zero, for the same
rationale, it may bring updates available even if it errors (e.g.,
because a future pbs-enterprise repo is additionally configured but
not accessible).

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:59 +01:00
e6513bd5de api/tools: split out apt helpers from api to own module
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
5911f74096 api types: derive Debug for APTUpdateInfo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
0bb74e54b1 worker task: drop debug prints
they are not useful anymore, rather noisy

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
f254a27071 tools: do not unnecessarily prefix module path
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
d0abba3397 trivial: fix typo in comment
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 21:31:36 +01:00
54adea366c ui: ACL view: do not save grid state
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 11:36:48 +01:00
ba2e4b15da ui: improve ACL view layout
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 11:33:31 +01:00
0ccdd1b6a4 ui: bump sync/verify grid stateid
so that people get the improved view by default

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:58:57 +01:00
fb66c85363 ui: improve sync job view layout
Avoid overuse of flex, that is as bad as having all to fixed widths.

In spirit similar to the previous commit for the verify panel, see
that for some rationale.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:56:51 +01:00
aae4c30ceb ui: improve verify job view layout, show job-id
Avoid overuse of flex, that is as bad as having all to fixed widths.

* Set date-time fields to 150 px as they are fixed width text.
* Duration is maximal 3 units, so it can be made fixed too.
* Schedule is flex with lower and upper limits, this is useful as
  it's a field which can be both, quite short (daily) or long
  (mon..fri *-10..12-1..7 02:00/30:30)
* Status and comment is flex, this way we always get a filled grid

Move status after last verify date and duration field, increases
information density at the left of the grid - reducing need for eye
movement, also, it groups together the "information about last job"
nicer.

Show job-id by default even if they are auto generated when adding
over the gui, as it can help finding the respective job faster when
getting a mail with an error.

Reported-by: Dietmar Maurer <dietmar@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 10:56:51 +01:00
0656344ae4 ui: administration: set icons for tabs
orient on PVE, the ones for Updates, ServerStatus, should by
self-explanatory.

Services is in PVE named "System", but reusing that cogs icon makes
similar sense here too, and seems in line with search result of a
"service icons" query.

Syslog is the same as our general log icon, but as we also use this
normally for worker task logs and that is present here too, I
changed the worker task log icon to the alternative list, which
resembles a task view window - so IMO even better than before.

Sync that change also into the always present tasks button at the top
right.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-31 09:11:11 +01:00
1143f6ca93 cleanup: fix wording in GC status emails 2020-10-31 07:56:42 +01:00
90e94aa280 docs: client: avoid that repo gets detected as email address
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:08:08 +01:00
c0af05e143 docs: fixup bad RST table format
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:05:49 +01:00
4aef06f1b6 docs: add token example to client, and reformat a bit
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 17:01:22 +01:00
034cf70b72 docs: add API tokens to documentation
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:46:19 +01:00
8b600f9965 api: replace auth_id with auth-id
in parameters, and fix up the completion for the ACL update parameter.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:46:19 +01:00
e4e280183e privs: add some more comments explaining privileges
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:42:30 +01:00
2fc45a97a9 privs: remove PRIV_REMOVE_PRUNE
it's not used anywhere, and not needed either until the day we might
implement push syncs.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:42:26 +01:00
b7ce2e575f verify jobs: add permissions
equivalent to verifying a whole datastore, except for reading job
(entries), which is accessible to regular Datastore.Audit/Backup users
as well.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
09f6a24078 verify: introduce & use new Datastore.Verify privilege
for verifying a whole datastore. Datastore.Backup now allows verifying
only backups owned by the triggering user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
b728a69e7d privs: use Datastore.Modify|Backup to set backup notes
Datastore.Backup is limited to owned groups, as usual.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
1401f4be5f privs: allow reading notes with Datastore.Audit
they are returned when reading the manifest, which just requires
Datastore.Audit as well. Datastore.Read is for reading backup contents,
not metadata.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 16:36:52 +01:00
fdb4416bae ui: permission path selector: cbind typeAhead to editable
ExtJS throws an exception if 'typeAhead' is true but 'editable' is
false.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 16:31:53 +01:00
abe1edfc95 update d/control
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 16:11:50 +01:00
e4a864bd21 impl From<Authid> for Userid
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 15:19:07 +01:00
7a7368ee08 bump proxmox dependency to 0.7.0 for totp udpates
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-30 15:19:07 +01:00
e707fd2b3b ui: Utils: add product specific task descriptions
and sort them alphabetically

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-30 14:05:17 +01:00
625a56b75e server/rest: accept also = as token separator
Like we do in Proxmox VE

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:34:26 +01:00
6d8a1ac9e4 server/rest: user constants for HTTP headers
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:33:36 +01:00
362739054e api tokens: add authorization method
and properly decode secret (which is a no-op with the current scheme).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 13:15:14 +01:00
2762481cc8 proxmox-backup-manager: add subscription commands
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:03:58 +01:00
652506e6b8 api: define subscription module and methods as public
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 13:03:58 +01:00
926d253126 api: define subscription key schema and use it
nicer to have the correct regex checked in parameter verification
already

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 12:57:14 +01:00
1cd951c93e proxy: fix warnings
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-30 12:49:43 +01:00
3b707fbb8f proxy: split out code to run garbage collection job 2020-10-30 11:01:45 +01:00
b15751bf55 check_schedule cleanup: use &str instead of String
This way we can avoid many clone() calls.
2020-10-30 09:49:50 +01:00
82c05b41fa proxy: extract commonly used logic for scheduling into new function
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-10-30 09:49:50 +01:00
b8d9079835 proxy: move prune logic into new file
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-10-30 09:49:50 +01:00
f8a682a873 ui: user menu: allow changing language while logged in
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-30 09:46:04 +01:00
b03a19b6e8 bump version to 0.9.4-2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 20:25:37 +01:00
603a6bd183 d/postinst: followup: grep and sed use different regex escaping ..
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 20:25:37 +01:00
83b039af35 d/postinst: make more resilient
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 19:58:41 +01:00
c9299e76fc bump version to 0.9.3-2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 17:20:04 +01:00
2f1a46f748 ui: move user, token and permissions into an access control tab panel
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 16:47:18 +01:00
2b38dfb456 d/control: update
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 16:18:40 +01:00
f487a622ce ui: datastore summary: handle missing snapshot of a types
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 15:52:53 +01:00
906ef6c5bd api2/access/user: fix return type schema
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:20:10 +01:00
ea1853a17b api2/access/user: drop Option, treat empty Vec as None
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:17:54 +01:00
221177ba41 fixup hardcoded paths
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:15:17 +01:00
184a37635b gui: add API token ACLs
and the needed API token selector.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
b2da7fbd1c acls: allow viewing/editing user's token ACLs
even for otherwise unprivileged users.

since effective privileges of an API token are always intersected with
those of their owning user, this does not allow an unprivileged user to
elevate their privileges in practice, but avoids the need to involve a
privileged user to deploy API tokens.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
7fe76d3491 gui: add API token UI
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
e6b5bf69a3 gui: add permissions button to user view
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
4615325f9e manager: add user permissions command
useful for debugging complex ACL setups.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
2156dec5a9 manager: add token commands
to generate, list and delete tokens. adding them to ACLs already works
out of the box.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
16245d540c tasks: allow unpriv users to read their tokens' tasks
and tighten down the return schema while we're at it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
bff8557298 owner checks: handle backups owned by API tokens
a user should be allowed to read/list/overwrite backups owned by their
own tokens, but a token should not be able to read/list/overwrite
backups owned by their owning user.

when changing ownership of a backup group, a user should be able to
transfer ownership to/from their own tokens if the backup is owned by
them (or one of their tokens).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
34aa8e13b6 client/remote: allow using ApiToken + secret
in place of user + password.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
babab85b56 api: add permissions endpoint
and adapt privilege calculation to return propagate flag

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
6746bbb1a2 api: allow listing users + tokens
since it's not possible to extend existing structs, UserWithTokens
duplicates most of user::User.. to avoid duplicating user::ApiToken as
well, this returns full API token IDs, not just the token name part.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
942078c40b api: add API token endpoints
beneath the user endpoint.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
c30816c1f8 REST: extract and handle API tokens
and refactor handling of headers in the REST server while we're at it.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:14:27 +01:00
e6dc35acb8 replace Userid with Authid
in most generic places. this is accompanied by a change in
RpcEnvironment to purposefully break existing call sites.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
e10c5c74f6 bump proxmox dependency to 0.6.0 for api tokens and tfa
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-10-29 15:11:39 +01:00
f8adf8f83f config: add token.shadow file
containing pairs of token ids and hashed secret values.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
e0538349e2 api: add Authid as wrapper around Userid
with an optional Tokenname, appended with '!' as delimiter in the string
representation like for PVE.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-10-29 15:11:39 +01:00
0903403ce7 bump version to 0.9.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:58:21 +01:00
b6563f48ad GC: improve task logs
Make it more clear that removed files are chunks (not indexes or
something like that, user cannot know that we do not touch them here)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:47:39 +01:00
932390bd46 GC: fix logging leftover bad chunks
fixes commit b4fb262335, which copied
over the "Removed bad files:" block, but only adapted the log text,
not the actual variable.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:40:29 +01:00
6b7688aa98 ui: datastore: fix sync/verify job removal prompt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:34:31 +01:00
ab0cf7e6a1 ui: drop id field from verify/sync add window
the config is shared between multiple datastores with the ID as, well
the unique ID, but we only show those of a single datastore.

So if a user adds a new one with a fixed ID "12345" but a job with
that ID exists already on another store, they get a error about
duplicate IDs, but cannot relate as that duplicate job is not visible
(filtered away)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 14:22:43 +01:00
264779e704 server/worker_task: simplify task log writing
instead of prerotating 1000 tasks
(which resulted in 2 writes each time an active worker was finished)
simply append finished tasks to the archive (which will be rotated)

page cache should be good enough so that we can get the task logs fast

since existing installations might have an 'index' file, we
still have to read tasks from there, but only if it exists

this simplifies the TaskListInfoIterator a good amount

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 12:41:20 +01:00
7f3d91003c worker task: remove debug print, faster modulo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 12:35:33 +01:00
14e0862509 api: datstore status: introduce proper structs and restore compatibility
by moving the properties of the storage status out again to the top
level object

also introduce proper structs for the types used, to get type-safety
and better documentation for the api calls

this changes the backup counts from an array of [groups,snapshots] to
an object/struct with { groups, snapshots } and include 'other' types
(though we do not have any at this moment)

this way it is better documented

this also adapts the ui code to cope with the api changes

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 12:31:27 +01:00
9e733dae48 send sync job status emails 2020-10-29 12:22:50 +01:00
bfea476be2 schedule_datastore_sync_jobs: remove unneccessary clone() 2020-10-29 12:22:41 +01:00
385cf2bd9d send_job_status_mail: corectly escape html characters 2020-10-29 11:22:08 +01:00
d6373f3525 garbage_collection: log deduplication factor 2020-10-29 11:13:01 +01:00
01f37e01c3 ui: datastore: use pointer cursor for edit notes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-29 10:45:37 +01:00
b4fb262335 garbage_collection: log bad chunks (still_bad value) 2020-10-29 10:24:31 +01:00
5499bd3dee fix #2998: encode mtime as i64 instead of u64
saves files mtime as i64 instead of u64 which enables backup of
files with negative mtime

the catalog_decode_i64 is compatible to encoded u64 values (if < 2^63)
but not reverse, so all "old" catalogs can be read with the new
decoder, but catalogs that contain negative mtimes will decode wrongly
on older clients

also remove the arbitrary maximum value of 2^63 - 1 for
encode_u64 (we just use up to 10 bytes now) and correctly
decode them and update the comments accordingly

adds also test for i64 encode/decode and for compatibility between
u64 encode and i64 decode

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-10-29 08:51:10 +01:00
d771a608f5 verify: directly pass manifest to filter function
In order to avoid loading the manifest twice during verify.
2020-10-29 07:59:19 +01:00
227a39b34b bump version to 0.9.2-2
re-use the changelog as this was not released publicly and it's just
a small fix

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-28 23:05:58 +01:00
f9beae9cc9 client: adapt to change datastroe status return schema
fixes commit 16f9f244cf which extended
the return schema of the status API but did not adapted the client
status command to that.

Simply define our own tiny return schema and use that.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-28 22:59:40 +01:00
389 changed files with 42528 additions and 5052 deletions

View File

@ -1,7 +1,16 @@
[package]
name = "proxmox-backup"
version = "0.9.2"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
version = "1.0.7"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
"Christian Ebner <c.ebner@proxmox.com>",
"Fabian Grünbichler <f.gruenbichler@proxmox.com>",
"Stefan Reiter <s.reiter@proxmox.com>",
"Thomas Lamprecht <t.lamprecht@proxmox.com>",
"Wolfgang Bumiller <w.bumiller@proxmox.com>",
"Proxmox Support Team <support@proxmox.com>",
]
edition = "2018"
license = "AGPL-3"
description = "Proxmox Backup"
@ -14,22 +23,22 @@ name = "proxmox_backup"
path = "src/lib.rs"
[dependencies]
apt-pkg-native = "0.3.1" # custom patched version
apt-pkg-native = "0.3.2"
base64 = "0.12"
bitflags = "1.2.1"
bytes = "0.5"
bytes = "1.0"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
anyhow = "1.0"
futures = "0.3"
h2 = { version = "0.2", features = ["stream"] }
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
http = "0.2"
hyper = "0.13.6"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4"
nix = "0.19"
nix = "0.19.1"
num-traits = "0.2"
once_cell = "1.3.1"
openssl = "0.10"
@ -37,31 +46,34 @@ pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
pin-utils = "0.1.0"
pin-project = "1.0"
pathpatterns = "0.1.2"
proxmox = { version = "0.5.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox-fuse = "0.1.0"
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
proxmox-fuse = "0.1.1"
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
regex = "1.2"
rustyline = "6"
rustyline = "7"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
siphasher = "0.3"
syslog = "4.0"
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
tokio-openssl = "0.4.0"
tokio-util = { version = "0.3", features = [ "codec" ] }
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.6", features = [ "codec" ] }
tower-service = "0.3.0"
udev = ">= 0.3, <0.5"
url = "2.1"
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
walkdir = "2"
webauthn-rs = "0.2.5"
xdg = "2.2"
zstd = { version = "0.4", features = [ "bindgen" ] }
nom = "5.1"
crossbeam-channel = "0.4"
crossbeam-channel = "0.5"
[features]
default = []

View File

@ -8,8 +8,9 @@ SUBDIRS := etc www docs
# Binaries usable by users
USR_BIN := \
proxmox-backup-client \
pxar
proxmox-backup-client \
pxar \
pmtx
# Binaries usable by admins
USR_SBIN := \
@ -19,7 +20,8 @@ USR_SBIN := \
SERVICE_BIN := \
proxmox-backup-api \
proxmox-backup-banner \
proxmox-backup-proxy
proxmox-backup-proxy \
proxmox-daily-update
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release
@ -140,6 +142,8 @@ install: $(COMPILED_BINS)
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
# install sg-tape-cmd as setuid binary
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
$(MAKE) -C www install

View File

@ -53,3 +53,83 @@ Setup:
Note: 2. may be skipped if you already added the PVE or PBS package repository
You are now able to build using the Makefile or cargo itself.
Design Notes
============
Here are some random thought about the software design (unless I find a better place).
Large chunk sizes
-----------------
It is important to notice that large chunk sizes are crucial for
performance. We have a multi-user system, where different people can do
different operations on a datastore at the same time, and most operation
involves reading a series of chunks.
So what is the maximal theoretical speed we can get when reading a
series of chunks? Reading a chunk sequence need the following steps:
- seek to the first chunk start location
- read the chunk data
- seek to the first chunk start location
- read the chunk data
- ...
Lets use the following disk performance metrics:
:AST: Average Seek Time (second)
:MRS: Maximum sequential Read Speed (bytes/second)
:ACS: Average Chunk Size (bytes)
The maximum performance you can get is::
MAX(ACS) = ACS /(AST + ACS/MRS)
Please note that chunk data is likely to be sequential arranged on disk, but
this it is sort of a best case assumption.
For a typical rotational disk, we assume the following values::
AST: 10ms
MRS: 170MB/s
MAX(4MB) = 115.37 MB/s
MAX(1MB) = 61.85 MB/s;
MAX(64KB) = 6.02 MB/s;
MAX(4KB) = 0.39 MB/s;
MAX(1KB) = 0.10 MB/s;
Modern SSD are much faster, lets assume the following::
max IOPS: 20000 => AST = 0.00005
MRS: 500Mb/s
MAX(4MB) = 474 MB/s
MAX(1MB) = 465 MB/s;
MAX(64KB) = 354 MB/s;
MAX(4KB) = 67 MB/s;
MAX(1KB) = 18 MB/s;
Also, the average chunk directly relates to the number of chunks produced by
a backup::
CHUNK_COUNT = BACKUP_SIZE / ACS
Here are some staticics from my developer worstation::
Disk Usage: 65 GB
Directories: 58971
Files: 726314
Files < 64KB: 617541
As you see, there are really many small files. If we would do file
level deduplication, i.e. generate one chunk per file, we end up with
more than 700000 chunks.
Instead, our current algorithm only produce large chunks with an
average chunks size of 4MB. With above data, this produce about 15000
chunks (factor 50 less chunks).

View File

@ -35,3 +35,4 @@ Chores:
Suggestions
===========

297
debian/changelog vendored
View File

@ -1,4 +1,297 @@
rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
* fix #3197: skip fingerprint check when restoring key
* client: add 'import-with-master-key' command
* fix #3192: correct sort in prune sim
* tools/daemon: improve reload behaviour
* http client: add timeouts for critical connects
* api: improve error messages for restricted endpoints
* api: allow tokens to list users
* ui: running tasks: Use gettext for column labels
* login: add two-factor authenication (TFA) and integrate in web-interface
* login: support webAuthn, recovery keys and TOTP as TFA methods
* make it possible to abort tasks with CTRL-C
* fix #3245: only use default schedule for new jobs
* manager CLI: user/token list: fix rendering 0 (never) expire date
* update the event-driven, non-blocking I/O tokio platform to 1.0
* access: limit editing all pam credentials to superuser
* access: restrict password changes on @pam realm to superuser
* patch out wrongly linked libraries from ELFs to avoid extra, bogus
dependencies in resulting package
* add "password hint" to encryption key config
* improve GC error handling
* cli: make it possible to abort tasks with CTRL-C
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Feb 2021 10:34:23 +0100
rust-proxmox-backup (1.0.6-1) unstable; urgency=medium
* stricter handling of file-descriptors, fixes some cases where some could
leak
* ui: fix various usages of the findRecord emthod, ensuring it matches exact
* garbage collection: improve task log format
* verification: improve progress log, make it similar to what's logged on
pull (sync)
* datastore: move manifest locking to /run. This avoids issues with
filesystems which cannot natively handle removing in-use files ("delete on
last close"), and create a virtual, internal, replacement file to work
around that. This is done, for example, by NFS or CIFS (samba).
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Dec 2020 12:51:33 +0100
rust-proxmox-backup (1.0.5-1) unstable; urgency=medium
* client: restore: print meta information exclusively to standard error
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 15:29:58 +0100
rust-proxmox-backup (1.0.4-1) unstable; urgency=medium
* fingerprint: add bytes() accessor
* ui: fix broken gettext use
* cli: move more commands into "snapshot" sub-command
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 06:37:41 +0100
rust-proxmox-backup (1.0.3-1) unstable; urgency=medium
* client: inform user when automatically using the default encryption key
* ui: UserView: render name as 'Firstname Lastname'
* proxmox-backup-manager: add versions command
* pxar: fix anchored exclusion at archive root
* pxar: include .pxarexclude files in the archive
* client: expose all-file-systems option
* api: make expensive parts of datastore status opt-in
* api: include datastore ID in invalid owner errors
* garbage collection: treat .bad files like regular chunks to ensure they
are removed if not referenced anymore
* client: fix issues with encoded UPID strings
* encryption: add fingerprint to key config
* client: add 'key show' command
* fix #3139: add key fingerprint to backup snapshot manifest and check it
when loading with a key
* ui: add snapshot/file fingerprint tooltip
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Nov 2020 08:55:47 +0100
rust-proxmox-backup (1.0.1-1) unstable; urgency=medium
* ui: datastore summary: drop 'removed bytes' display
* ui: datastore add: set default schedule
* prune sim: make backup schedule a form, bind update button to its validity
* daemon: add workaround for race in reloading and systemd 'ready' notification
-- Proxmox Support Team <support@proxmox.com> Wed, 11 Nov 2020 10:18:12 +0100
rust-proxmox-backup (1.0.0-1) unstable; urgency=medium
* fix #3121: forbid removing used remotes
* docs: backup-client: encryption: discuss paperkey command
* pxar: log when skipping mount points
* ui: show also parent ACLs which affect a datastore in its panel
* api: node/apt: add versions call
* ui: make Datastore a selectable panel again. Show a datastore summary
list, and provide unfiltered access to all sync and verify jobs.
* ui: add help tool-button to various paneös
* ui: set various onlineHelp buttons
* zfs: mount new zpools created via API under /mnt/datastore/<id>
* ui: move disks/directory views to its own tab panel
* fix #3060: continue sync if we cannot aquire the group lock
* HttpsConnector: include destination on connect errors
* fix #3060:: improve get_owner error handling
* remote.cfg: rename userid to 'auth-id'
* verify: log/warn on invalid owner
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Nov 2020 14:36:13 +0100
rust-proxmox-backup (0.9.7-1) unstable; urgency=medium
* ui: add remote store selector
* tools/daemon: fix reload with open connections
* pxar/create: fix endless loop for shrinking files
* pxar/create: handle ErrorKind::Interrupted for file reads
* ui: add action-button for changing backup group owner
* docs: add interactive prune simulator
* verify: fix unprivileged verification jobs
* tasks: allow access to job tasks
* drop internal 'backup@pam' owner, sync jobs need to set a explicit owner
* api: datastore: allow to set "verify-new" option over API
* ui: datastore: add Options tab, allowing one to change per-datastore
notification and verify-new options
* docs: scroll navigation bar to current active section
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Nov 2020 07:36:58 +0100
rust-proxmox-backup (0.9.6-1) unstable; urgency=medium
* fix #3106: improve queueing new incoming connections
* fix #2870: sync: ensure a updated ticket is used, if available
* proxy: log if there are too many open connections
* ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
* datastore config: allow to configure who receives job notify emails
* ui: fix task description for log rotate
* proxy: also rotate auth.log file
* ui: add shell panel under administration
* ui: sync jobs: only set default schedule when creating new jobs and some
other small fixes
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Nov 2020 19:12:57 +0100
rust-proxmox-backup (0.9.5-1) unstable; urgency=medium
* ui: user menu: allow one to change the language while staying logged in
* proxmox-backup-manager: add subscription commands
* server/rest: also accept = as token separator
* privs: allow reading snapshot notes with Datastore.Audit
* privs: enforce Datastore.Modify|Backup to set backup notes
* verify: introduce and use new Datastore.Verify privilege
* docs: add API tokens to documentation
* ui: various smaller layout and icon improvements
* api: implement apt pkg cache for caching pending updates
* api: apt: implement support to send notification email on new updates
* add daily update and maintenance task
* fix #2864: add owner option to sync
* sync: allow sync for non-superusers under special conditions
* config: support depreacated netmask when parsing interfaces file
* server: implement access log rotation with re-open via command socket
* garbage collect: improve index error messages
* fix #3039: use the same ID regex for info and api
* ui: administration: allow extensive filtering of the worker task
* report: add api endpoint and function to generate report
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Nov 2020 17:41:17 +0100
rust-proxmox-backup (0.9.4-2) unstable; urgency=medium
* make postinst (update) script more resilient
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 20:09:30 +0100
rust-proxmox-backup (0.9.4-1) unstable; urgency=medium
* implement API-token
* client/remote: allow using API-token + secret
* ui/cli: implement API-token management interface and commands
* ui: add widget to view the effective permissions of a user or token
* ui: datastore summary: handle error when havin zero snapshot of any type
* ui: move user, token and permissions into an access control tab panel
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 17:19:13 +0100
rust-proxmox-backup (0.9.3-1) unstable; urgency=medium
* fix #2998: encode mtime as i64 instead of u64
* GC: log the number of leftover bad chunks we could not yet cleanup, as no
valid one replaced them. Also log deduplication factor.
* send sync job status emails
* api: datstore status: introduce proper structs and restore compatibility
to 0.9.1
* ui: drop id field from verify/sync add window, they are now seen as internal
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 14:58:13 +0100
rust-proxmox-backup (0.9.2-2) unstable; urgency=medium
* rework server web-interface, move more datastore related panels as tabs
inside the datastore view
@ -76,7 +369,7 @@ rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
* ui: datastore: show snapshot manifest comment and allow to edit them
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 21:27:02 +0100
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 23:05:41 +0100
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium

89
debian/control vendored
View File

@ -7,24 +7,25 @@ Build-Depends: debhelper (>= 11),
rustc:native,
libstd-rust-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-base64-0.12+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-0.5+default-dev,
librust-bytes-1+default-dev,
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.4+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.2+default-dev,
librust-h2-0.2+stream-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev,
librust-http-0.2+default-dev,
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
librust-hyper-0.14+default-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev,
librust-nix-0.19+default-dev,
librust-nix-0.19+default-dev (>= 0.19.1-~~),
librust-nom-5+default-dev (>= 5.1-~~),
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
@ -33,42 +34,42 @@ Build-Depends: debhelper (>= 11),
librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-1+default-dev,
librust-pin-utils-0.1+default-dev,
librust-proxmox-0.5+api-macro-dev,
librust-proxmox-0.5+default-dev,
librust-proxmox-0.5+sortable-macro-dev,
librust-proxmox-0.5+websocket-dev,
librust-proxmox-fuse-0.1+default-dev,
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-pxar-0.8+default-dev,
librust-pxar-0.8+tokio-io-dev,
librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-6+default-dev,
librust-rustyline-7+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev,
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
librust-tokio-openssl-0.4+default-dev,
librust-tokio-util-0.3+codec-dev,
librust-tokio-util-0.3+default-dev,
librust-tokio-1+default-dev,
librust-tokio-1+fs-dev,
librust-tokio-1+io-util-dev,
librust-tokio-1+macros-dev,
librust-tokio-1+net-dev,
librust-tokio-1+parking-lot-dev,
librust-tokio-1+process-dev,
librust-tokio-1+rt-dev,
librust-tokio-1+rt-multi-thread-dev,
librust-tokio-1+signal-dev,
librust-tokio-1+time-dev,
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev,
librust-tokio-util-0.6+codec-dev,
librust-tokio-util-0.6+default-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.4+bindgen-dev,
librust-zstd-0.4+default-dev,
@ -76,43 +77,53 @@ Build-Depends: debhelper (>= 11),
libfuse3-dev,
libsystemd-dev,
uuid-dev,
debhelper (>= 12~),
libsgutils2-dev,
bash-completion,
pve-eslint,
python3-docutils,
python3-pygments,
rsync,
debhelper (>= 12~),
fonts-dejavu-core <!nodoc>,
fonts-lato <!nodoc>,
fonts-open-sans <!nodoc>,
graphviz <!nodoc>,
latexmk <!nodoc>,
patchelf,
pve-eslint (>= 7.18.0-1),
python3-docutils,
python3-pygments,
python3-sphinx <!nodoc>,
rsync,
texlive-fonts-extra <!nodoc>,
texlive-fonts-recommended <!nodoc>,
texlive-xetex <!nodoc>,
xindy <!nodoc>
Maintainer: Proxmox Support Team <support@proxmox.com>
Standards-Version: 4.4.1
Vcs-Git:
Vcs-Browser:
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
Homepage: https://www.proxmox.com
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libjs-qrcodejs (>= 1.20201119),
libsgutils2-2,
libzstd1 (>= 1.3.8),
lvm2,
mt-st,
mtx,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.

8
debian/control.in vendored
View File

@ -2,17 +2,25 @@ Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libjs-qrcodejs (>= 1.20201119),
libsgutils2-2,
libzstd1 (>= 1.3.8),
lvm2,
mt-st,
mtx,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.

22
debian/debcargo.toml vendored
View File

@ -2,33 +2,32 @@ overlay = "."
crate_src_path = ".."
whitelist = ["tests/*.c"]
# needed for pinutils alpha
allow_prerelease_deps = true
maintainer = "Proxmox Support Team <support@proxmox.com>"
[source]
# TODO: update once public
vcs_git = ""
vcs_browser = ""
maintainer = "Proxmox Support Team <support@proxmox.com>"
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
section = "admin"
build_depends = [
"debhelper (>= 12~)",
"bash-completion",
"pve-eslint",
"python3-docutils",
"python3-pygments",
"rsync",
"debhelper (>= 12~)",
"fonts-dejavu-core <!nodoc>",
"fonts-lato <!nodoc>",
"fonts-open-sans <!nodoc>",
"graphviz <!nodoc>",
"latexmk <!nodoc>",
"patchelf",
"pve-eslint (>= 7.18.0-1)",
"python3-docutils",
"python3-pygments",
"python3-sphinx <!nodoc>",
"rsync",
"texlive-fonts-extra <!nodoc>",
"texlive-fonts-recommended <!nodoc>",
"texlive-xetex <!nodoc>",
"xindy <!nodoc>",
]
build_depends_excludes = [
"debhelper (>=11)",
]
@ -39,4 +38,5 @@ depends = [
"libfuse3-dev",
"libsystemd-dev",
"uuid-dev",
"libsgutils2-dev",
]

View File

@ -1,2 +1,2 @@
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbs-enterprise.list
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target

3
debian/pmtx.bc vendored Normal file
View File

@ -0,0 +1,3 @@
# pmtx bash completion
complete -C 'pmtx bashcomplete' pmtx

30
debian/postinst vendored
View File

@ -6,6 +6,9 @@ set -e
case "$1" in
configure)
# need to have user backup in the tapoe group
usermod -a -G tape backup
# modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
@ -15,12 +18,33 @@ case "$1" in
fi
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
flock -w 30 /etc/proxmox-backup/.datastore.lck sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg
# FIXME: Remove with 1.1
if test -n "$2"; then
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
echo "NOTE: drop all verify schedules from datastore config."
echo "You can now add more flexible verify jobs"
flock -w 30 /etc/proxmox-backup/.datastore.lck \
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
fi
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
if [ -e /etc/proxmox-backup/remote.cfg ]; then
echo "NOTE: Switching over remote.cfg to new field names.."
flock -w 30 /etc/proxmox-backup/.remote.lck \
sed -i \
-e 's/^\s\+userid /\tauth-id /g' \
/etc/proxmox-backup/remote.cfg || true
fi
fi
fi
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi
;;

3
debian/prerm vendored
View File

@ -6,5 +6,6 @@ set -e
# modeled after dh_systemd_start output
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' \
'proxmox-backup.service' 'proxmox-backup-daily-update.timer' >/dev/null || true
fi

View File

@ -1 +1,3 @@
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs

View File

@ -1 +1,2 @@
debian/proxmox-backup-manager.bc proxmox-backup-manager
debian/pmtx.bc pmtx

View File

@ -1,16 +1,22 @@
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/proxmox-backup-banner.service /lib/systemd/system/
etc/pbstest-beta.list /etc/apt/sources.list.d/
etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/pbs-enterprise.list /etc/apt/sources.list.d/
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
usr/sbin/proxmox-backup-manager
usr/bin/pmtx
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
usr/share/javascript/proxmox-backup/images/logo-128.png
usr/share/javascript/proxmox-backup/images/proxmox_logo.png
usr/share/javascript/proxmox-backup/images
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
usr/share/man/man1/proxmox-backup-manager.1
usr/share/man/man1/proxmox-backup-proxy.1
usr/share/man/man1/pmtx.1
usr/share/zsh/vendor-completions/_proxmox-backup-manager
usr/share/zsh/vendor-completions/_pmtx

View File

@ -0,0 +1 @@
rm_conffile /etc/apt/sources.list.d/pbstest-beta.list 1.0.0~ proxmox-backup-server

11
debian/rules vendored
View File

@ -38,13 +38,24 @@ override_dh_auto_install:
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
# note: we start/try-reload-restart services manually in postinst
dh_installsystemd --no-start --no-restart-after-upgrade
override_dh_fixperms:
dh_fixperms --exclude sg-tape-cmd
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
# TODO: remove once available (Debian 11 ?)
override_dh_dwz:
dh_dwz --no-dwz-multifile
override_dh_strip:
dh_strip
for exe in $$(find debian/proxmox-backup-client/usr \
debian/proxmox-backup-server/usr -executable -type f); do \
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
done
override_dh_compress:
dh_compress -X.pdf

View File

@ -0,0 +1,20 @@
#!/bin/bash
binary=$1
exec 3< <(ldd -u "$binary" | grep -oP '[^/:]+$')
patchargs=""
dropped=""
while read -r dep; do
dropped="$dep $dropped"
patchargs="--remove-needed $dep $patchargs"
done <&3
exec 3<&-
if [[ $dropped == "" ]]; then
exit 0
fi
echo -e "patchelf '$binary' - removing unused dependencies:\n $dropped"
patchelf $patchargs $binary

View File

@ -5,15 +5,35 @@ GENERATED_SYNOPSIS := \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
backup-protocol-api.rst \
reader-protocol-api.rst
MANUAL_PAGES := \
pxar.1 \
pmtx.1 \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \
prune-simulator/documentation.html \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
LTO_BARCODE_FILES := \
lto-barcode/index.html \
lto-barcode/code39.js \
lto-barcode/prefix-field.js \
lto-barcode/label-style.js \
lto-barcode/tape-type.js \
lto-barcode/paper-size.js \
lto-barcode/page-layout.js \
lto-barcode/page-calibration.js \
lto-barcode/label-list.js \
lto-barcode/label-setup.js \
lto-barcode/lto-barcode.js
# Sphinx documentation setup
SPHINXOPTS =
@ -49,6 +69,14 @@ pxar/synopsis.rst: ${COMPILEDIR}/pxar
pxar.1: pxar/man1.rst pxar/description.rst pxar/synopsis.rst
rst2man $< >$@
pmtx/synopsis.rst: ${COMPILEDIR}/pmtx
${COMPILEDIR}/pmtx printdoc > pmtx/synopsis.rst
pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
rst2man $< >$@
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
@ -74,10 +102,13 @@ onlinehelpinfo:
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
.PHONY: html
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
cp custom.css $(BUILDDIR)/html/_static/
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -dm 0755 $(BUILDDIR)/html/prune-simulator
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
install -dm 0755 $(BUILDDIR)/html/lto-barcode
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@ -44,7 +44,7 @@ def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize
js_files.append(os.path.join(root, filename))
for js_file in js_files:
fd = open(js_file).read()
allmatch = re.findall("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd, re.M)
allmatch = re.findall("(?:onlineHelp:|get_help_tool\s*\()\s*[\'\"](.*?)[\'\"]", fd, re.M)
for match in allmatch:
anchor = match
anchor = re.sub('_', '-', anchor) # normalize labels
@ -73,7 +73,9 @@ class ReflabelMapper(Builder):
'link': '/docs/index.html',
'title': 'Proxmox Backup Server Documentation Index',
}
self.env.used_anchors = scan_extjs_files()
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
# as well
#self.env.used_anchors = scan_extjs_files()
if not os.path.isdir(self.outdir):
os.mkdir(self.outdir)
@ -93,6 +95,9 @@ class ReflabelMapper(Builder):
logger.info('traversing section {}'.format(title.astext()))
ref_name = getattr(title, 'rawsource', title.astext())
if (ref_name[:7] == ':term:`'):
ref_name = ref_name[7:-1]
self.env.online_help[labelid] = {'link': '', 'title': ''}
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
self.env.online_help[labelid]['title'] = ref_name
@ -112,15 +117,18 @@ class ReflabelMapper(Builder):
def validate_anchors(self):
#pprint(self.env.online_help)
to_remove = []
for anchor in self.env.used_anchors:
if anchor not in self.env.online_help:
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
for anchor in self.env.online_help:
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
logger.info("[*] anchor {} not used! deleting...".format(anchor))
to_remove.append(anchor)
for anchor in to_remove:
self.env.online_help.pop(anchor, None)
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
# as well
#for anchor in self.env.used_anchors:
# if anchor not in self.env.online_help:
# logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
#for anchor in self.env.online_help:
# if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
# logger.info("[*] anchor {} not used! deleting...".format(anchor))
# to_remove.append(anchor)
#for anchor in to_remove:
# self.env.online_help.pop(anchor, None)
return
def finish(self):

View File

@ -12,31 +12,31 @@ on the backup server.
[[username@]server[:port]:]datastore
The default value for ``username`` is ``root@pam``. If no server is specified,
The default value for ``username`` is ``root@pam``. If no server is specified,
the default is the local host (``localhost``).
You can specify a port if your backup server is only reachable on a different
port (e.g. with NAT and port forwarding).
Note that if the server is an IPv6 address, you have to write it with
square brackets (e.g. [fe80::01]).
Note that if the server is an IPv6 address, you have to write it with square
brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command
line option, or by setting the ``PBS_REPOSITORY`` environment
variable.
You can pass the repository with the ``--repository`` command line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
Here some examples of valid repositories and the real values
================================ ============ ================== ===========
Example User Host:Port Datastore
================================ ============ ================== ===========
mydatastore ``root@pam`` localhost:8007 mydatastore
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ============ ================== ===========
================================ ================== ================== ===========
Example User Host:Port Datastore
================================ ================== ================== ===========
mydatastore ``root@pam`` localhost:8007 mydatastore
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
user\@pbs!token@host:store ``user@pbs!token`` myhostname:8007 mydatastore
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
Environment Variables
---------------------
@ -45,16 +45,16 @@ Environment Variables
The default backup repository.
``PBS_PASSWORD``
When set, this value is used for the password required for the
backup server.
When set, this value is used for the password required for the backup server.
You can also set this to a API token secret.
``PBS_ENCRYPTION_PASSWORD``
When set, this value is used to access the secret encryption key (if
protected by password).
``PBS_FINGERPRINT`` When set, this value is used to verify the server
certificate (only used if the system CA certificates cannot
validate the certificate).
certificate (only used if the system CA certificates cannot validate the
certificate).
Output Format
@ -353,8 +353,10 @@ To set up a master key:
.. code-block:: console
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
Enter pass phrase for ./master-private.pem: *********
# proxmox-backup-client key import-with-master-key /path/to/target --master-keyfile /path/to/master-private.pem --encrypted-keyfile /path/to/rsa-encrypted.key
Master Key Password: ******
New Password: ******
Verify Password: ******
7. The target file will now contain the encryption key information in plain
text. The success of this can be confirmed by passing the resulting ``json``
@ -365,9 +367,22 @@ To set up a master key:
backed up. It can happen, for example, that you back up an entire system, using
a key on that system. If the system then becomes inaccessible for any reason
and needs to be restored, this will not be possible as the encryption key will be
lost along with the broken system. In preparation for the worst case scenario,
you should consider keeping a paper copy of this key locked away in
a safe place.
lost along with the broken system.
It is recommended that you keep your master key safe, but easily accessible, in
order for quick disaster recovery. For this reason, the best place to store it
is in your password manager, where it is immediately recoverable. As a backup to
this, you should also save the key to a USB drive and store that in a secure
place. This way, it is detached from any system, but is still easy to recover
from, in case of emergency. Finally, in preparation for the worst case scenario,
you should also consider keeping a paper copy of your master key locked away in
a safe place. The ``paperkey`` subcommand can be used to create a QR encoded
version of your master key. The following command sends the output of the
``paperkey`` command to a text file, for easy printing.
.. code-block:: console
proxmox-backup-client key paperkey --output-format text > qrkey.txt
Restoring Data
@ -379,11 +394,11 @@ periodic recovery tests to ensure that you can access the data in
case of problems.
First, you need to find the snapshot which you want to restore. The snapshot
command provides a list of all the snapshots on the server:
list command provides a list of all the snapshots on the server:
.. code-block:: console
# proxmox-backup-client snapshots
# proxmox-backup-client snapshot list
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
│ snapshot │ size │ files │
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
@ -535,6 +550,29 @@ To remove the ticket, issue a logout:
# proxmox-backup-client logout
.. _changing-backup-owner:
Changing the Owner of a Backup Group
------------------------------------
By default, the owner of a backup group is the user which was used to originally
create that backup group (or in the case of sync jobs, ``root@pam``). This
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
can not be used to create backups in that same backup group. In case you want
to change the owner of a backup, you can do so with the below command, using a
user that has ``Datastore.Modify`` privileges on the datastore.
.. code-block:: console
# proxmox-backup-client change-owner vm/103 john@pbs
This can also be done from within the web interface, by navigating to the
`Content` section of the datastore that contains the backup group and
selecting the user icon under the `Actions` column. Common cases for this could
be to change the owner of a sync job from ``root@pam``, or to repurpose a
backup group.
.. _backup-pruning:
Pruning and Removing Backups
@ -545,7 +583,7 @@ command:
.. code-block:: console
# proxmox-backup-client forget <snapshot>
# proxmox-backup-client snapshot forget <snapshot>
.. caution:: This command removes all archives in this backup

View File

@ -171,6 +171,8 @@ html_theme_options = {
'extra_nav_links': {
'Proxmox Homepage': 'https://proxmox.com',
'PDF': 'proxmox-backup.pdf',
'Prune Simulator' : 'prune-simulator/index.html',
'LTO Barcode Generator' : 'lto-barcode/index.html',
},
'sidebar_width': '320px',
@ -228,6 +230,10 @@ html_favicon = 'images/favicon.ico'
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_js_files = [
'custom.js',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.

7
docs/custom.js Normal file
View File

@ -0,0 +1,7 @@
window.addEventListener('DOMContentLoaded', (event) => {
let activeSection = document.querySelector("a.current");
if (activeSection) {
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView
activeSection.scrollIntoView({ block: 'center' });
}
});

View File

@ -27,7 +27,7 @@ How long will my Proxmox Backup Server version be supported?
+-----------------------+--------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+====================+===============+============+====================+
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
+-----------------------+--------------------+---------------+------------+--------------------+
@ -53,9 +53,12 @@ checksums. This manifest file is used to verify the integrity of each backup.
When backing up to remote servers, do I have to trust the remote server?
------------------------------------------------------------------------
Proxmox Backup Server supports client-side encryption, meaning your data is
encrypted before it reaches the server. Thus, in the event that an attacker
gains access to the server, they will not be able to read the data.
Proxmox Backup Server transfers data via `Transport Layer Security (TLS)
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_ and additionally
supports client-side encryption. This means that data is transferred securely
and can be encrypted before it reaches the server. Thus, in the event that an
attacker gains access to the server or any point of the network, they will not
be able to read the data.
.. note:: Encryption is not enabled by default. To set up encryption, see the
`Encryption

View File

@ -4,7 +4,7 @@ Graphical User Interface
Proxmox Backup Server offers an integrated, web-based interface to manage the
server. This means that you can carry out all administration tasks through your
web browser, and that you don't have to worry about installing extra management
tools. The web interface also provides a built in console, so if you prefer the
tools. The web interface also provides a built-in console, so if you prefer the
command line or need some extra control, you have this option.
The web interface can be accessed via https://youripaddress:8007. The default
@ -28,7 +28,6 @@ Login
-----
.. image:: images/screenshots/pbs-gui-login-window.png
:width: 250
:align: right
:alt: PBS login window
@ -44,14 +43,13 @@ GUI Overview
------------
.. image:: images/screenshots/pbs-gui-dashboard.png
:width: 250
:align: right
:alt: PBS GUI Dashboard
The Proxmox Backup Server web interface consists of 3 main sections:
* **Header**: At the top. This shows version information, and contains buttons to view
documentation, monitor running tasks, and logout.
documentation, monitor running tasks, set the language and logout.
* **Sidebar**: On the left. This contains the configuration options for
the server.
* **Configuration Panel**: In the center. This contains the control interface for the
@ -79,18 +77,17 @@ Configuration
The Configuration section contains some system configuration options, such as
time and network configuration. It also contains the following subsections:
* **User Management**: Add users and manage accounts
* **Permissions**: Manage permissions for various users
* **Access Control**: Add and manage users, API tokens, and the permissions
associated with these items
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
* **Sync Jobs**: Manage and run sync jobs to remotes
* **Subscription**: Upload a subscription key and view subscription status
* **Subscription**: Upload a subscription key, view subscription status and
access a text-based system report.
Administration
^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
:width: 250
:align: right
:alt: Administration: Server Status overview
@ -105,7 +102,6 @@ tasks and information. These are:
* **Tasks**: Task history with multiple filter options
.. image:: images/screenshots/pbs-gui-disks.png
:width: 250
:align: right
:alt: Administration: Disks
@ -120,16 +116,21 @@ The administration menu item also contains a disk management subsection:
Datastore
^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore.png
:width: 250
.. image:: images/screenshots/pbs-gui-datastore-summary.png
:align: right
:alt: Datastore Configuration
The Datastore section provides an interface for creating and managing
datastores. It contains a subsection for each datastore on the system, in
which you can use the top panel to view:
The Datastore section contains interfaces for creating and managing
datastores. It contains a button to create a new datastore on the server, as
well as a subsection for each datastore on the system, in which you can use the
top panel to view:
* **Summary**: Access a range of datastore usage statistics
* **Content**: Information on the datastore's backup groups and their respective
contents
* **Statistics**: Usage statistics for the datastore
* **Permissions**: View and manage permissions for the datastore
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
collection <garbage-collection>` operations, and run garbage collection
manually
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
datastore

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -9,7 +9,7 @@ Debian_ from the provided package repository.
.. include:: package-repositories.rst
Server installation
Server Installation
-------------------
The backup server stores the actual backed up data and provides a web based GUI
@ -37,22 +37,21 @@ Download the ISO from |DOWNLOADS|.
It includes the following:
* The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
system
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
* Our Linux kernel with ZFS support
* Proxmox Linux kernel with ZFS support
* Complete tool-set to administer backups and all necessary resources
* Web based GUI management interface
* Web based management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
Install `Proxmox Backup`_ server on Debian
Install `Proxmox Backup`_ Server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages which can be installed on top of a
@ -84,11 +83,11 @@ support, and a set of common and useful packages.
when LVM_ or ZFS_ is used. The network configuration is completely up to you
as well.
.. note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. Note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
Install Proxmox Backup server on `Proxmox VE`_
Install Proxmox Backup Server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After configuring the
@ -104,14 +103,14 @@ After configuring the
server to store backups. Should the hypervisor server fail, you can
still access the backups.
.. note::
You can access the web interface of the Proxmox Backup Server with your web
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
.. Note:: You can access the web interface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
Client installation
Client Installation
-------------------
Install `Proxmox Backup`_ client on Debian
Install `Proxmox Backup`_ Client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages to be installed on

View File

@ -14,11 +14,12 @@ It supports deduplication, compression, and authenticated
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
performance, low resource usage, and a safe, high-quality codebase.
Proxmox Backup uses state of the art cryptography for client communication and
backup content :ref:`encryption <encryption>`. Encryption is done on the
client side, making it safer to back up data to targets that are not fully
trusted.
Proxmox Backup uses state of the art cryptography for both client-server
communication and backup content :ref:`encryption <encryption>`. All
client-server communication uses `TLS
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
be encrypted on the client-side before sending, making it safer to back up data
to targets that are not fully trusted.
Architecture
------------
@ -65,8 +66,9 @@ Main Features
several gigabytes of data per second.
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
provides very high performance on modern hardware.
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
provides very high performance on modern hardware. In addition to client-side
encryption, all data is transferred via a secure TLS connection.
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
user interface.
@ -127,8 +129,7 @@ language.
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
.. todo:: further explain the software stack
.. _get_help:
Getting Help
------------

351
docs/lto-barcode/code39.js Normal file
View File

@ -0,0 +1,351 @@
// Code39 barcode generator
// see https://en.wikipedia.org/wiki/Code_39
// IBM LTO Ultrium Cartridge Label Specification
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
let code39_codes = {
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
"U": ['B', 'S', 'b', 's', 'b', 's', 'b', 's', 'B'],
"2": ['b', 's', 'B', 'S', 'b', 's', 'b', 's', 'B'],
"B": ['b', 's', 'B', 's', 'b', 'S', 'b', 's', 'B'],
"L": ['b', 's', 'B', 's', 'b', 's', 'b', 'S', 'B'],
"V": ['b', 'S', 'B', 's', 'b', 's', 'b', 's', 'B'],
"3": ['B', 's', 'B', 'S', 'b', 's', 'b', 's', 'b'],
"C": ['B', 's', 'B', 's', 'b', 'S', 'b', 's', 'b'],
"M": ['B', 's', 'B', 's', 'b', 's', 'b', 'S', 'b'],
"W": ['B', 'S', 'B', 's', 'b', 's', 'b', 's', 'b'],
"4": ['b', 's', 'b', 'S', 'B', 's', 'b', 's', 'B'],
"D": ['b', 's', 'b', 's', 'B', 'S', 'b', 's', 'B'],
"N": ['b', 's', 'b', 's', 'B', 's', 'b', 'S', 'B'],
"X": ['b', 'S', 'b', 's', 'B', 's', 'b', 's', 'B'],
"5": ['B', 's', 'b', 'S', 'B', 's', 'b', 's', 'b'],
"E": ['B', 's', 'b', 's', 'B', 'S', 'b', 's', 'b'],
"O": ['B', 's', 'b', 's', 'B', 's', 'b', 'S', 'b'],
"Y": ['B', 'S', 'b', 's', 'B', 's', 'b', 's', 'b'],
"6": ['b', 's', 'B', 'S', 'B', 's', 'b', 's', 'b'],
"F": ['b', 's', 'B', 's', 'B', 'S', 'b', 's', 'b'],
"P": ['b', 's', 'B', 's', 'B', 's', 'b', 'S', 'b'],
"Z": ['b', 'S', 'B', 's', 'B', 's', 'b', 's', 'b'],
"7": ['b', 's', 'b', 'S', 'b', 's', 'B', 's', 'B'],
"G": ['b', 's', 'b', 's', 'b', 'S', 'B', 's', 'B'],
"Q": ['b', 's', 'b', 's', 'b', 's', 'B', 'S', 'B'],
"-": ['b', 'S', 'b', 's', 'b', 's', 'B', 's', 'B'],
"8": ['B', 's', 'b', 'S', 'b', 's', 'B', 's', 'b'],
"H": ['B', 's', 'b', 's', 'b', 'S', 'B', 's', 'b'],
"R": ['B', 's', 'b', 's', 'b', 's', 'B', 'S', 'b'],
".": ['B', 'S', 'b', 's', 'b', 's', 'B', 's', 'b'],
"9": ['b', 's', 'B', 'S', 'b', 's', 'B', 's', 'b'],
"I": ['b', 's', 'B', 's', 'b', 'S', 'B', 's', 'b'],
"S": ['b', 's', 'B', 's', 'b', 's', 'B', 'S', 'b'],
" ": ['b', 'S', 'B', 's', 'b', 's', 'B', 's', 'b'],
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
};
let colors = [
'#BB282E',
'#FAE54A',
'#9AC653',
'#01A5E2',
'#9EAAB6',
'#D97E35',
'#E27B99',
'#67A945',
'#F6B855',
'#705A81'
];
let lto_label_width = 70;
let lto_label_height = 17;
function foreach_label(page_layout, callback) {
let count = 0;
let row = 0;
let height = page_layout.margin_top;
while ((height + page_layout.label_height) <= page_layout.page_height) {
let column = 0;
let width = page_layout.margin_left;
while ((width + page_layout.label_width) <= page_layout.page_width) {
callback(column, row, count, width, height);
count += 1;
column += 1;
width += page_layout.label_width;
width += page_layout.column_spacing;
}
row += 1;
height += page_layout.label_height;
height += page_layout.row_spacing;
}
}
function compute_max_labels(page_layout) {
let max_labels = 0;
foreach_label(page_layout, function() { max_labels += 1; });
return max_labels;
}
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
let svg = "";
if (label.length != 6) {
throw "wrong label length";
}
if (label_type.length != 2) {
throw "wrong label_type length";
}
let ratio = 2.75;
let parts = 3*ratio + 6; // 3*wide + 6*small;
let barcode_width = (lto_label_width/12)*10; // 10*code + 2margin
let small = barcode_width/(parts*10 + 9);
let code_width = small*parts;
let wide = small*ratio;
let xpos = pagex + code_width;
let height = 12;
if (mode === 'placeholder') {
if (label_borders) {
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
}
return svg;
}
if (label_borders) {
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
}
if (mode === "color" || mode == "frame") {
let w = lto_label_width/8;
let h = lto_label_height - height;
for (var i = 0; i < 7; i++) {
let textx = w/2 + pagex + i*w;
let texty = pagey;
let fill = "none";
if (mode === "color" && (i < 6)) {
let letter = label.charAt(i);
if (letter >= '0' && letter <= '9') {
fill = colors[parseInt(letter, 10)];
}
}
svg += `<rect x='${textx}' y='${texty}' width='${w}' height='${h}' style='stroke:black;stroke-width:0.2;fill:${fill};'/>`;
if (i == 6) {
textx += 3;
texty += 3.7;
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:3px;font-family:sans-serif;'>${label_type}</text>`;
} else {
let letter = label.charAt(i);
textx += 3.5;
texty += 4;
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
}
}
}
let raw_label = `*${label}${label_type}*`;
for (var i = 0; i < raw_label.length; i++) {
let letter = raw_label.charAt(i);
let code = code39_codes[letter];
if (code === undefined) {
throw `unable to encode letter '${letter}' with code39`;
}
if (mode === "simple") {
let textx = xpos + code_width/2;
let texty = pagey + 4;
if (i > 0 && (i+1) < raw_label.length) {
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
}
}
for (let c of code) {
if (c === 's') {
xpos += small;
continue;
}
if (c === 'S') {
xpos += wide;
continue;
}
let w = c === 'B' ? wide : small;
let ypos = pagey + lto_label_height - height;
svg += `<rect x='${xpos}' y='${ypos}' width='${w}' height='${height}' style='fill:black'/>`;
xpos = xpos + w;
}
xpos += small;
}
return svg;
}
function html_page_header() {
let html = "<html5>";
html += "<style>";
/* no page margins */
html += "@page{margin-left: 0px;margin-right: 0px;margin-top: 0px;margin-bottom: 0px;}";
/* to hide things on printed page */
html += "@media print { .unprintable { visibility: hidden; } }";
html += "</style>";
//html += "<body onload='window.print()'>";
html += "<body style='background-color: white;'>";
return html;
}
function svg_page_header(page_width, page_height) {
let svg = "<svg version='1.1' xmlns='http://www.w3.org/2000/svg'";
svg += ` width='${page_width}mm' height='${page_height}mm' viewBox='0 0 ${page_width} ${page_height}'>`;
return svg;
}
function printBarcodePage() {
let frame = document.getElementById("print_frame");
let window = frame.contentWindow;
window.print();
}
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
let c = calibration;
console.log(calibration);
svg += "<g id='barcode_page'";
if (c !== undefined) {
svg += ` transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'`;
}
svg += '>';
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
if (count >= label_list.length) { return; }
let item = label_list[count];
svg += svg_label(item.mode, item.label, item.tape_type, xpos, ypos, page_layout.label_borders);
});
svg += "</g>";
svg += "</svg>";
let html = html_page_header();
html += svg;
html += "</body>";
html += "</html>";
let frame = document.getElementById(target_id);
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
let fwindow = frame.contentWindow;
fwindow.document.open();
fwindow.document.write(html);
fwindow.document.close();
}
function setupPrintFrame(frame, page_width, page_height) {
let dpi = 98;
let dpr = window.devicePixelRatio;
if (dpr !== undefined) {
dpi = dpi*dpr;
}
let ppmm = dpi/25.4;
frame.width = page_width*ppmm;
frame.height = page_height*ppmm;
}
function generate_calibration_page(target_id, page_layout, calibration) {
let frame = document.getElementById(target_id);
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
let svg = svg_page_header( page_layout.page_width, page_layout.page_height);
svg += "<defs>";
svg += "<marker id='endarrow' markerWidth='10' markerHeight='7' ";
svg += "refX='10' refY='3.5' orient='auto'><polygon points='0 0, 10 3.5, 0 7' />";
svg += "</marker>";
svg += "<marker id='startarrow' markerWidth='10' markerHeight='7' ";
svg += "refX='0' refY='3.5' orient='auto'><polygon points='10 0, 10 7, 0 3.5' />";
svg += "</marker>";
svg += "</defs>";
svg += "<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(0,0,0)'/>";
let text_style = "style='font-weight:bold;font-size:4;font-family:sans-serif;'";
svg += `<text x='10' y='99' ${text_style}>Sx = 50mm</text>`;
svg += "<line x1='0' y1='100' x2='50' y2='100' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
svg += `<text x='60' y='99' ${text_style}>Dx = 100mm</text>`;
svg += "<line x1='50' y1='100' x2='150' y2='100' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
svg += `<text x='142' y='10' ${text_style} writing-mode='tb'>Sy = 50mm</text>`;
svg += "<line x1='140' y1='0' x2='140' y2='50' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
svg += `<text x='142' y='60' ${text_style} writing-mode='tb'>Dy = 100mm</text>`;
svg += "<line x1='140' y1='50' x2='140' y2='150' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
let c = calibration;
if (c !== undefined) {
svg += `<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(255,0,0)' `;
svg += `transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'/>`;
}
svg += "</svg>";
let html = html_page_header();
html += svg;
html += "</body>";
html += "</html>";
let fwindow = frame.contentWindow;
fwindow.document.open();
fwindow.document.write(html);
fwindow.document.close();
}

View File

@ -0,0 +1,51 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<title>Proxmox LTO Barcode Label Generator</title>
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
<style>
/* fix action column icons */
.x-action-col-icon {
font-size: 13px;
height: 13px;
}
.x-grid-cell-inner-action-col {
padding: 6px 10px 5px;
}
.x-action-col-icon:before {
color: #555;
}
.x-action-col-icon {
color: #21BF4B;
}
.x-action-col-icon {
margin: 0 1px;
font-size: 14px;
}
.x-action-col-icon:before, .x-action-col-icon:after {
font-size: 14px;
}
.x-action-col-icon:hover:before, .x-action-col-icon:hover:after {
text-shadow: 1px 1px 1px #AAA;
font-weight: 800;
}
</style>
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
<script type="text/javascript" src="extjs/ext-all.js"></script>
<script type="text/javascript" src="code39.js"></script>
<script type="text/javascript" src="prefix-field.js"></script>
<script type="text/javascript" src="label-style.js"></script>
<script type="text/javascript" src="tape-type.js"></script>
<script type="text/javascript" src="paper-size.js"></script>
<script type="text/javascript" src="page-layout.js"></script>
<script type="text/javascript" src="page-calibration.js"></script>
<script type="text/javascript" src="label-list.js"></script>
<script type="text/javascript" src="label-setup.js"></script>
<script type="text/javascript" src="lto-barcode.js"></script>
</head>
<body>
</body>
</html>

View File

@ -0,0 +1,140 @@
Ext.define('LabelList', {
extend: 'Ext.grid.Panel',
alias: 'widget.labelList',
plugins: {
ptype: 'cellediting',
clicksToEdit: 1
},
selModel: 'cellmodel',
store: {
field: [
'prefix',
'tape_type',
{
type: 'integer',
name: 'start',
},
{
type: 'integer',
name: 'end',
},
],
data: [],
},
listeners: {
validateedit: function(editor, context) {
console.log(context.field);
console.log(context.value);
context.record.set(context.field, context.value);
context.record.commit();
return true;
},
},
columns: [
{
text: 'Prefix',
dataIndex: 'prefix',
flex: 1,
editor: {
xtype: 'prefixfield',
allowBlank: false,
},
renderer: function (value, metaData, record) {
console.log(record);
if (record.data.mode === 'placeholder') {
return "-";
}
return value;
},
},
{
text: 'Type',
dataIndex: 'tape_type',
flex: 1,
editor: {
xtype: 'ltoTapeType',
allowBlank: false,
},
renderer: function (value, metaData, record) {
console.log(record);
if (record.data.mode === 'placeholder') {
return "-";
}
return value;
},
},
{
text: 'Mode',
dataIndex: 'mode',
flex: 1,
editor: {
xtype: 'ltoLabelStyle',
allowBlank: false,
},
},
{
text: 'Start',
dataIndex: 'start',
flex: 1,
editor: {
xtype: 'numberfield',
allowBlank: false,
},
},
{
text: 'End',
dataIndex: 'end',
flex: 1,
editor: {
xtype: 'numberfield',
},
renderer: function(value) {
if (value === null || value === '' || value === undefined) {
return "Fill";
}
return value;
},
},
{
xtype: 'actioncolumn',
width: 75,
items: [
{
tooltip: 'Move Up',
iconCls: 'fa fa-arrow-up',
handler: function(grid, rowIndex) {
if (rowIndex < 1) { return; }
let store = grid.getStore();
let record = store.getAt(rowIndex);
store.removeAt(rowIndex);
store.insert(rowIndex - 1, record);
},
},
{
tooltip: 'Move Down',
iconCls: 'fa fa-arrow-down',
handler: function(grid, rowIndex) {
let store = grid.getStore();
if (rowIndex >= store.getCount()) { return; }
let record = store.getAt(rowIndex);
store.removeAt(rowIndex);
store.insert(rowIndex + 1, record);
},
},
{
tooltip: 'Delete',
iconCls: 'fa fa-scissors',
//iconCls: 'fa critical fa-trash-o',
handler: function(grid, rowIndex) {
grid.getStore().removeAt(rowIndex);
},
}
],
},
],
});

View File

@ -0,0 +1,107 @@
Ext.define('LabelSetupPanel', {
extend: 'Ext.panel.Panel',
alias: 'widget.labelSetupPanel',
layout: {
type: 'hbox',
align: 'stretch',
pack: 'start',
},
getValues: function() {
let me = this;
let values = {};
Ext.Array.each(me.query('[isFormField]'), function(field) {
let data = field.getSubmitData();
Ext.Object.each(data, function(name, val) {
let parsed = parseInt(val, 10);
values[name] = isNaN(parsed) ? val : parsed;
});
});
return values;
},
controller: {
xclass: 'Ext.app.ViewController',
init: function() {
let me = this;
let view = me.getView();
let list = view.down("labelList");
let store = list.getStore();
store.on('datachanged', function(store) {
view.fireEvent("listchanged", store);
});
store.on('update', function(store) {
view.fireEvent("listchanged", store);
});
},
onAdd: function() {
let list = this.lookupReference('label_list');
let view = this.getView();
let params = view.getValues();
list.getStore().add(params);
},
},
items: [
{
border: false,
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
items: [
{
xtype: 'prefixfield',
name: 'prefix',
value: 'TEST',
fieldLabel: 'Prefix',
},
{
xtype: 'ltoTapeType',
name: 'tape_type',
fieldLabel: 'Type',
value: 'L8',
},
{
xtype: 'ltoLabelStyle',
name: 'mode',
fieldLabel: 'Mode',
value: 'color',
},
{
xtype: 'numberfield',
name: 'start',
fieldLabel: 'Start',
minValue: 0,
allowBlank: false,
value: 0,
},
{
xtype: 'numberfield',
name: 'end',
fieldLabel: 'End',
minValue: 0,
emptyText: 'Fill',
},
{
xtype: 'button',
text: 'Add',
handler: 'onAdd',
},
],
},
{
margin: "0 0 0 10",
xtype: 'labelList',
reference: 'label_list',
flex: 1,
},
],
});

View File

@ -0,0 +1,20 @@
Ext.define('LtoLabelStyle', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.ltoLabelStyle',
editable: false,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: 'simple', text: "Simple" },
{ value: 'color', text: 'Color (frames with color)' },
{ value: 'frame', text: 'Frame (no color)' },
{ value: 'placeholder', text: 'Placeholder (empty)' },
],
},
});

View File

@ -0,0 +1,214 @@
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
if (Ext.isFirefox) {
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
}
function draw_labels(target_id, label_list, page_layout, calibration) {
let max_labels = compute_max_labels(page_layout);
let count_fixed = 0;
let count_fill = 0;
for (i = 0; i < label_list.length; i++) {
let item = label_list[i];
if (item.end === null || item.end === '' || item.end === undefined) {
count_fill += 1;
continue;
}
if (item.end <= item.start) {
count_fixed += 1;
continue;
}
count_fixed += (item.end - item.start) + 1;
}
let rest = max_labels - count_fixed;
let fill_size = 1;
if (rest >= count_fill) {
fill_size = Math.floor(rest/count_fill);
}
let list = [];
let count_fill_2 = 0;
for (i = 0; i < label_list.length; i++) {
let item = label_list[i];
let count;
if (item.end === null || item.end === '' || item.end === undefined) {
count_fill_2 += 1;
if (count_fill_2 === count_fill) {
count = rest;
} else {
count = fill_size;
}
rest -= count;
} else {
if (item.end <= item.start) {
count = 1;
} else {
count = (item.end - item.start) + 1;
}
}
for (j = 0; j < count; j++) {
let id = item.start + j;
if (item.prefix.length == 6) {
list.push({
label: item.prefix,
tape_type: item.tape_type,
mode: item.mode,
id: id,
});
rest += count - j - 1;
break;
} else {
let pad_len = 6-item.prefix.length;
let label = item.prefix + id.toString().padStart(pad_len, 0);
if (label.length != 6) {
rest += count - j;
break;
}
list.push({
label: label,
tape_type: item.tape_type,
mode: item.mode,
id: id,
});
}
}
}
generate_barcode_page(target_id, page_layout, list, calibration);
}
Ext.define('MainView', {
extend: 'Ext.container.Viewport',
alias: 'widget.mainview',
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
width: 800,
controller: {
xclass: 'Ext.app.ViewController',
update_barcode_preview: function() {
let me = this;
let view = me.getView();
let list_view = view.down("labelList");
let store = list_view.getStore();
let label_list = [];
store.each((record) => {
label_list.push(record.data);
});
let page_layout_view = view.down("pageLayoutPanel");
let page_layout = page_layout_view.getValues();
let calibration_view = view.down("pageCalibration");
let page_calibration = calibration_view.getValues();
draw_labels("print_frame", label_list, page_layout, page_calibration);
},
update_calibration_preview: function() {
let me = this;
let view = me.getView();
let page_layout_view = view.down("pageLayoutPanel");
let page_layout = page_layout_view.getValues();
let calibration_view = view.down("pageCalibration");
let page_calibration = calibration_view.getValues();
console.log(page_calibration);
generate_calibration_page('print_frame', page_layout, page_calibration);
},
control: {
labelSetupPanel: {
listchanged: function(store) {
this.update_barcode_preview();
},
activate: function() {
this.update_barcode_preview();
},
},
pageLayoutPanel: {
pagechanged: function(layout) {
this.update_barcode_preview();
},
activate: function() {
this.update_barcode_preview();
},
},
pageCalibration: {
calibrationchanged: function() {
this.update_calibration_preview();
},
activate: function() {
this.update_calibration_preview();
},
},
},
},
items: [
{
xtype: 'tabpanel',
items: [
{
xtype: 'labelSetupPanel',
title: 'Proxmox LTO Barcode Label Generator',
bodyPadding: 10,
},
{
xtype: 'pageLayoutPanel',
title: 'Page Layout',
bodyPadding: 10,
},
{
xtype: 'pageCalibration',
title: 'Printer Calibration',
bodyPadding: 10,
},
],
},
{
xtype: 'panel',
layout: "center",
title: 'Print Preview',
bodyStyle: "background-color: grey;",
bodyPadding: 10,
html: '<center><iframe id="print_frame" frameBorder="0"></iframe></center>',
border: false,
flex: 1,
scrollable: true,
tools:[{
type: 'print',
tooltip: 'Open Print Dialog',
handler: function(event, toolEl, panelHeader) {
printBarcodePage();
}
}],
},
],
});
Ext.onReady(function() {
Ext.create('MainView', {
renderTo: Ext.getBody(),
});
});

View File

@ -0,0 +1,142 @@
Ext.define('PageCalibration', {
extend: 'Ext.panel.Panel',
alias: 'widget.pageCalibration',
layout: {
type: 'hbox',
align: 'stretch',
pack: 'start',
},
getValues: function() {
let me = this;
let values = {};
Ext.Array.each(me.query('[isFormField]'), function(field) {
if (field.isValid()) {
let data = field.getSubmitData();
Ext.Object.each(data, function(name, val) {
let parsed = parseFloat(val, 10);
values[name] = isNaN(parsed) ? val : parsed;
});
}
});
if (values.d_x === undefined) { return; }
if (values.d_y === undefined) { return; }
if (values.s_x === undefined) { return; }
if (values.s_y === undefined) { return; }
scalex = 100/values.d_x;
scaley = 100/values.d_y;
let offsetx = ((50*scalex) - values.s_x)/scalex;
let offsety = ((50*scaley) - values.s_y)/scaley;
return {
scalex: scalex,
scaley: scaley,
offsetx: offsetx,
offsety: offsety,
};
},
controller: {
xclass: 'Ext.app.ViewController',
control: {
'field': {
change: function() {
let view = this.getView();
let param = view.getValues();
view.fireEvent("calibrationchanged", param);
},
},
},
},
items: [
{
border: false,
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
items: [
{
xtype: 'displayfield',
value: 'a4',
fieldLabel: 'Start Offset Sx (mm)',
labelWidth: 150,
value: 50,
},
{
xtype: 'displayfield',
value: 'a4',
fieldLabel: 'Length Dx (mm)',
labelWidth: 150,
value: 100,
},
{
xtype: 'displayfield',
value: 'a4',
fieldLabel: 'Start Offset Sy (mm)',
labelWidth: 150,
value: 50,
},
{
xtype: 'displayfield',
value: 'a4',
fieldLabel: 'Length Dy (mm)',
labelWidth: 150,
value: 100,
},
],
},
{
border: false,
margin: '0 0 0 20',
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
items: [
{
xtype: 'numberfield',
value: 'a4',
name: 's_x',
fieldLabel: 'Meassured Start Offset Sx (mm)',
allowBlank: false,
labelWidth: 200,
},
{
xtype: 'numberfield',
value: 'a4',
name: 'd_x',
fieldLabel: 'Meassured Length Dx (mm)',
allowBlank: false,
labelWidth: 200,
},
{
xtype: 'numberfield',
value: 'a4',
name: 's_y',
fieldLabel: 'Meassured Start Offset Sy (mm)',
allowBlank: false,
labelWidth: 200,
},
{
xtype: 'numberfield',
value: 'a4',
name: 'd_y',
fieldLabel: 'Meassured Length Dy (mm)',
allowBlank: false,
labelWidth: 200,
},
],
},
],
})

View File

@ -0,0 +1,167 @@
Ext.define('PageLayoutPanel', {
extend: 'Ext.panel.Panel',
alias: 'widget.pageLayoutPanel',
layout: {
type: 'hbox',
align: 'stretch',
pack: 'start',
},
getValues: function() {
let me = this;
let values = {};
Ext.Array.each(me.query('[isFormField]'), function(field) {
if (field.isValid()) {
let data = field.getSubmitData();
Ext.Object.each(data, function(name, val) {
values[name] = val;
});
}
});
let paper_size = values.paper_size || 'a4';
let param = Ext.apply({}, paper_sizes[paper_size]);
if (param === undefined) {
throw `unknown paper size ${paper_size}`;
}
param.paper_size = paper_size;
Ext.Object.each(values, function(name, val) {
let parsed = parseFloat(val, 10);
param[name] = isNaN(parsed) ? val : parsed;
});
return param;
},
controller: {
xclass: 'Ext.app.ViewController',
control: {
'paperSize': {
change: function(field, paper_size) {
let view = this.getView();
let defaults = paper_sizes[paper_size];
let names = [
'label_width',
'label_height',
'margin_left',
'margin_top',
'column_spacing',
'row_spacing',
];
for (i = 0; i < names.length; i++) {
let name = names[i];
let f = view.down(`field[name=${name}]`);
let v = defaults[name];
if (v != undefined) {
f.setValue(v);
f.setDisabled(defaults.fixed);
} else {
f.setDisabled(false);
}
}
},
},
'field': {
change: function() {
let view = this.getView();
let param = view.getValues();
view.fireEvent("pagechanged", param);
},
},
},
},
items: [
{
border: false,
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
items: [
{
xtype: 'paperSize',
name: 'paper_size',
value: 'a4',
fieldLabel: 'Paper Size',
},
{
xtype: 'numberfield',
name: 'label_width',
fieldLabel: 'Label width',
minValue: 70,
allowBlank: false,
value: 70,
},
{
xtype: 'numberfield',
name: 'label_height',
fieldLabel: 'Label height',
minValue: 17,
allowBlank: false,
value: 17,
},
{
xtype: 'checkbox',
name: 'label_borders',
fieldLabel: 'Label borders',
value: true,
inputValue: true,
},
],
},
{
border: false,
margin: '0 0 0 10',
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
items: [
{
xtype: 'numberfield',
name: 'margin_left',
fieldLabel: 'Left margin',
minValue: 0,
allowBlank: false,
value: 0,
},
{
xtype: 'numberfield',
name: 'margin_top',
fieldLabel: 'Top margin',
minValue: 0,
allowBlank: false,
value: 4,
},
{
xtype: 'numberfield',
name: 'column_spacing',
fieldLabel: 'Column spacing',
minValue: 0,
allowBlank: false,
value: 0,
},
{
xtype: 'numberfield',
name: 'row_spacing',
fieldLabel: 'Row spacing',
minValue: 0,
allowBlank: false,
value: 0,
},
],
},
],
});

View File

@ -0,0 +1,49 @@
let paper_sizes = {
a4: {
comment: 'A4 (plain)',
page_width: 210,
page_height: 297,
},
letter: {
comment: 'Letter (plain)',
page_width: 215.9,
page_height: 279.4,
},
avery3420: {
fixed: true,
comment: 'Avery Zweckform 3420',
page_width: 210,
page_height: 297,
label_width: 70,
label_height: 17,
margin_left: 0,
margin_top: 4,
column_spacing: 0,
row_spacing: 0,
},
}
function paper_size_combo_data() {
let data = [];
for (let [key, value] of Object.entries(paper_sizes)) {
data.push({ value: key, text: value.comment });
}
return data;
}
Ext.define('PaperSize', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.paperSize',
editable: false,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: paper_size_combo_data(),
},
});

View File

@ -0,0 +1,15 @@
Ext.define('PrefixField', {
extend: 'Ext.form.field.Text',
alias: 'widget.prefixfield',
maxLength: 6,
allowBlank: false,
maskRe: /([A-Za-z]+)$/,
listeners: {
change: function(field) {
field.setValue(field.getValue().toUpperCase());
},
},
});

View File

@ -0,0 +1,23 @@
Ext.define('LtoTapeType', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.ltoTapeType',
editable: false,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: 'L8', text: "LTO-8" },
{ value: 'L7', text: "LTO-7" },
{ value: 'L6', text: "LTO-6" },
{ value: 'L5', text: "LTO-5" },
{ value: 'L4', text: "LTO-4" },
{ value: 'L3', text: "LTO-3" },
{ value: 'CU', text: "Cleaning Unit" },
],
},
});

View File

@ -1,13 +1,184 @@
Maintenance Tasks
=================
.. _maintenance_pruning:
Pruning
-------
Prune lets you specify which backup snapshots you want to keep. The
following retention options are available:
``keep-last <N>``
Keep the last ``<N>`` backup snapshots.
``keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is kept.
``keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is kept.
``keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is kept.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is kept.
``keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.
Unfinished and incomplete backups will be removed by the prune command unless
they are newer than the last successful backup. In this case, the last failed
backup is retained.
Prune Simulator
^^^^^^^^^^^^^^^
You can use the built-in `prune simulator <prune-simulator/index.html>`_
to explore the effect of different retetion options with various backup
schedules.
Manual Pruning
^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-content-prune-group.png
:target: _images/pbs-gui-datastore-content-prune-group.png
:align: right
:alt: Prune and garbage collection options
To access pruning functionality for a specific backup group, you can use the
prune command line option discussed in :ref:`backup-pruning`, or navigate to
the **Content** tab of the datastore and click the scissors icon in the
**Actions** column of the relevant backup group.
Prune Schedules
^^^^^^^^^^^^^^^
To prune on a datastore level, scheduling options can be found under the
**Prune & GC** tab of the datastore. Here you can set retention settings and
edit the interval at which pruning takes place.
.. image:: images/screenshots/pbs-gui-datastore-prunegc.png
:target: _images/pbs-gui-datastore-prunegc.png
:align: right
:alt: Prune and garbage collection options
Retention Settings Example
^^^^^^^^^^^^^^^^^^^^^^^^^^
The backup frequency and retention of old backups may depend on how often data
changes, and how important an older state may be, in a specific work load.
When backups act as a company's document archive, there may also be legal
requirements for how long backup snapshots must be kept.
For this example, we assume that you are doing daily backups, have a retention
period of 10 years, and the period between backups stored gradually grows.
- **keep-last:** ``3`` - even if only daily backups, an admin may want to create
an extra one just before or after a big upgrade. Setting keep-last ensures
this.
- **keep-hourly:** not set - for daily backups this is not relevant. You cover
extra manual backups already, with keep-last.
- **keep-daily:** ``13`` - together with keep-last, which covers at least one
day, this ensures that you have at least two weeks of backups.
- **keep-weekly:** ``8`` - ensures that you have at least two full months of
weekly backups.
- **keep-monthly:** ``11`` - together with the previous keep settings, this
ensures that you have at least a year of monthly backups.
- **keep-yearly:** ``9`` - this is for the long term archive. As you covered the
current year with the previous options, you would set this to nine for the
remaining ones, giving you a total of at least 10 years of coverage.
We recommend that you use a higher retention period than is minimally required
by your environment; you can always reduce it if you find it is unnecessarily
high, but you cannot recreate backup snapshots from the past.
.. _maintenance_gc:
Garbage Collection
------------------
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
Proxmox Backup Server using the ``garbage-collection`` subcommand of
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
collection on an entire datastore and the ``status`` subcommand to see
attributes relating to the :ref:`garbage collection <garbage-collection>`.
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
start garbage collection on an entire datastore and the ``status`` subcommand to
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
.. todo:: Add section on verification
This functionality can also be accessed in the GUI, by navigating to **Prune &
GC** from the top panel. From here, you can edit the schedule at which garbage
collection runs and manually start the operation.
.. _maintenance_verification:
Verification
------------
.. image:: images/screenshots/pbs-gui-datastore-verifyjob-add.png
:target: _images/pbs-gui-datastore-verifyjob-add.png
:align: right
:alt: Adding a verify job
Proxmox Backup offers various verification options to ensure that backup data is
intact. Verification is generally carried out through the creation of verify
jobs. These are scheduled tasks that run verification at a given interval (see
:ref:`calendar-events`). With these, you can set whether already verified
snapshots are ignored, as well as set a time period, after which verified jobs
are checked again. The interface for creating verify jobs can be found under the
**Verify Jobs** tab of the datastore.
.. Note:: It is recommended that you reverify all backups at least monthly, even
if a previous verification was successful. This is becuase physical drives
are susceptible to damage over time, which can cause an old, working backup
to become corrupted in a process known as `bit rot/data degradation
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
have a regularly recurring (hourly/daily) verification job, which checks new
and expired backups, then another weekly/monthly job that will reverify
everything. This way, there will be no surprises when it comes to restoring
data.
Aside from using verify jobs, you can also run verification manually on entire
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
tab of the datastore and either click *Verify All*, or select the *V.* icon from
the *Actions* column in the table.
.. _maintenance_notification:
Notifications
-------------
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
By default, notifications are send to the email address configured for the
`root@pam` user. You can set that user for each datastore.
You can also change the level of notification received per task type, the
following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task resulting in an error
* Never: do not send any notification at all

View File

@ -59,13 +59,13 @@ Sync Jobs
:alt: Add a Sync Job
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
The configuration information for sync jobs is stored at
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually on the GUI or provide it with a schedule (see
:ref:`calendar-events`) to run regularly.
a local datastore. You can manage sync jobs in the web interface, from the
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
the ``proxmox-backup-manager sync-job`` command. The configuration information
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
sync job, click the add button in the GUI, or use the ``create`` subcommand.
After creating a sync job, you can either start it manually from the GUI or
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
.. code-block:: console
@ -79,4 +79,17 @@ either start it manually on the GUI or provide it with a schedule (see
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
# proxmox-backup-manager sync-job remove pbs2-local
For setting up sync jobs, the configuring user needs the following permissions:
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or set to something other than the configuring user,
``Datastore.Modify`` is required as well.
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.

View File

@ -1,3 +1,5 @@
.. _sysadmin_network_configuration:
Network Management
==================

View File

@ -26,11 +26,8 @@ update``.
.. FIXME for 7.0: change security update suite to bullseye-security
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
During the Proxmox Backup beta phase, only one repository (pbstest) will be
available. Once released, an Enterprise repository for production use and a
no-subscription repository will be provided.
In addition, you need a package repository from Proxmox to get Proxmox Backup
updates.
SecureApt
~~~~~~~~~
@ -72,68 +69,63 @@ Here, the output should be:
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
.. comment
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This will be the default, stable, and recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
This will be the default, stable, and recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
will be available.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
To never miss important security fixes, the superuser (``root@pam`` user) is
notified via email about new packages as soon as they are available. The
change-log and details of each package can be viewed in the GUI (if available).
To never miss important security fixes, the superuser (``root@pam`` user) is
notified via email about new packages as soon as they are available. The
change-log and details of each package can be viewed in the GUI (if available).
Please note that you need a valid subscription key to access this
repository. More information regarding subscription levels and pricing can be
found at https://www.proxmox.com/en/proxmox-backup/pricing.
Please note that you need a valid subscription key to access this
repository. More information regarding subscription levels and pricing can be
found at https://www.proxmox.com/en/proxmox-backup-server/pricing
.. note:: You can disable this repository by commenting out the above
line using a `#` (at the start of the line). This prevents error
messages if you do not have a subscription key. Please configure the
``pbs-no-subscription`` repository in that case.
.. note:: You can disable this repository by commenting out the above line
using a `#` (at the start of the line). This prevents error messages if you do
not have a subscription key. Please configure the ``pbs-no-subscription``
repository in that case.
`Proxmox Backup`_ No-Subscription Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ No-Subscription Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As the name suggests, you do not need a subscription key to access
this repository. It can be used for testing and non-production
use. It is not recommended to use it on production servers, because these
packages are not always heavily tested and validated.
As the name suggests, you do not need a subscription key to access
this repository. It can be used for testing and non-production
use. It is not recommended to use it on production servers, because these
packages are not always heavily tested and validated.
We recommend to configure this repository in ``/etc/apt/sources.list``.
We recommend to configure this repository in ``/etc/apt/sources.list``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
# PBS pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
# PBS pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
# security updates
deb http://security.debian.org/debian-security buster/updates main contrib
# security updates
deb http://security.debian.org/debian-security buster/updates main contrib
`Proxmox Backup`_ Beta Repository
`Proxmox Backup`_ Test Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
During the public beta, there is a repository called ``pbstest``. This one
contains the latest packages and is heavily used by developers to test new
features.
This repository contains the latest packages and is heavily used by developers
to test new features.
.. .. warning:: the ``pbstest`` repository should (as the name implies)
only be used to test new features or bug fixes.
@ -145,7 +137,3 @@ You can access this repository by adding the following line to
:caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs buster pbstest
If you installed Proxmox Backup Server from the official beta ISO, you should
have this repository already configured in
``/etc/apt/sources.list.d/pbstest-beta.list``

View File

@ -0,0 +1,6 @@
Description
^^^^^^^^^^^
The ``pmtx`` command controls SCSI media changer devices (tape
autoloader).

28
docs/pmtx/man1.rst Normal file
View File

@ -0,0 +1,28 @@
==========================
pmtx
==========================
.. include:: ../epilog.rst
-------------------------------------------------------------
Control SCSI media changer devices (tape autoloaders)
-------------------------------------------------------------
:Author: |AUTHOR|
:Version: Version |VERSION|
:Manual section: 1
Synopsis
==========
.. include:: synopsis.rst
Description
============
.. include:: description.rst
.. include:: ../pbs-copyright.rst

View File

@ -5,7 +5,7 @@ proxmox-backup-client
.. include:: ../epilog.rst
-------------------------------------------------------------
Command line toot for Backup and Restore
Command line tool for Backup and Restore
-------------------------------------------------------------
:Author: |AUTHOR|

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -0,0 +1,102 @@
<!DOCTYPE html>
<html>
<head>
<style>
/* similar to sphinx alabaster theme ones */
body {
max-width: 90ch;
margin-left: 2ch;
margin-right: 2ch;
line-height: 1.4em;
/* avoid the very high contrast of black on white, tone it down a bit */
color: #3E4349;
hyphens: auto;
text-align: left;
font-family: 'Open Sans', sans-serif;
font-size: 17px;
}
h1, h2, h3 {
font-family: Lato, sans-serif;
font-size: 150%;
line-height:1.2
}
tt, code {
background-color: #ecf0f3;
color: #222;
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
div.note {
background-color: #EEE;
border: 1px solid #CCC;
margin: 10px 0;
padding: 0px 20px;
}
p.note-title {
font-weight: bolder;
padding: 0;
margin: 10px 0 0 0;
}
div.note > p.last {
margin: 5px 0 10px 0;
}
</style>
</head>
<body>
<p>A simulator to experiment with different backup schedules and prune
options.</p>
<h3>Schedule</h3>
<p>Select weekdays with the combobox and input hour and minute
specification separated by a colon, i.e. <code>HOUR:MINUTE</code>. Each of
<code>HOUR</code> and <code>MINUTE</code> can be either a single value or
one of the following:</p>
<ul class="simple">
<li>a comma-separated list: e.g., <code>01,02,03</code></li>
<li>a range: e.g., <code>01..10</code></li>
<li>a repetition: e.g, <code>05/10</code> (means starting at <code>5</code> every <code>10</code>)</li>
<li>a combination of the above: e.g., <code>01,05..10,12/02</code></li>
<li>a <code>*</code> for every possible value</li>
</ul>
<h3>Pruning</h3>
<p>Prune lets you systematically delete older backups, retaining backups for
the last given number of time intervals. The following retention options are
available:</p>
<dl class="docutils">
<dt><code class="docutils literal notranslate"><span class="pre">keep-last</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> backup snapshots.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-hourly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> hours. If there is more than one
backup for a single hour, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-daily</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> days. If there is more than one
backup for a single day, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-weekly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> weeks. If there is more than one
backup for a single week, only the latest is kept.
<div class="last admonition note">
<p class="note-title">Note:</p>
<p class="last">Weeks start on Monday and end on Sunday. The software
uses the <a class="reference external" href="https://en.wikipedia.org/wiki/ISO_week_date">ISO week date</a> system and handles weeks at
the end of the year correctly.</p>
</div>
</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-monthly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> months. If there is more than one
backup for a single month, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-yearly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> years. If there is more than one
backup for a single year, only the latest is kept.</dd>
</dl>
<p>The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.</p>
<p>For example, in a week covered by <code>keep-weekly</code>, one backup is
kept while all others are removed; <code>keep-monthly</code> then does not
consider backups from that week anymore, even if part of the week is part of
an earlier month.</p>
</body>
</html>

View File

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<title>PBS Prune Simulator</title>
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
<style>
.cal {
margin: 5px;
}
.cal-day {
vertical-align: top;
width: 150px;
height: 75px; /* this is like min-height when used in tables */
border: #939393 1px solid;
color: #454545;
}
.cal-day-date {
border-bottom: #444 1px solid;
color: #000;
}
.strikethrough {
text-decoration: line-through;
}
.black {
color: #000;
}
.sun {
background-color: #ededed;
}
.first-of-month {
border-right: dashed black 4px;
}
.clear-trigger {
background-image: url(./clear-trigger.png);
}
</style>
<script type="text/javascript" src="extjs/ext-all.js"></script>
<script type="text/javascript" src="prune-simulator.js"></script>
</head>
<body></body>
</html>

View File

@ -0,0 +1,788 @@
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
if (Ext.isFirefox) {
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
}
Ext.onReady(function() {
const NOW = new Date();
const COLORS = {
'keep-last': 'orange',
'keep-hourly': 'purple',
'keep-daily': 'yellow',
'keep-weekly': 'green',
'keep-monthly': 'blue',
'keep-yearly': 'red',
'all zero': 'white',
};
const TEXT_COLORS = {
'keep-last': 'black',
'keep-hourly': 'white',
'keep-daily': 'black',
'keep-weekly': 'white',
'keep-monthly': 'white',
'keep-yearly': 'white',
'all zero': 'black',
};
Ext.define('PBS.prunesimulator.Documentation', {
extend: 'Ext.Panel',
alias: 'widget.prunesimulatorDocumentation',
html: '<iframe style="width:100%;height:100%;border:0px;" src="./documentation.html"/>',
});
Ext.define('PBS.prunesimulator.CalendarEvent', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.prunesimulatorCalendarEvent',
editable: true,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: '0/2:00', text: "Every two hours" },
{ value: '0/6:00', text: "Every six hours" },
{ value: '2,22:30', text: "At 02:30 and 22:30" },
{ value: '00:00', text: "At 00:00" },
{ value: '08..17:00/30', text: "From 08:00 to 17:30 every 30 minutes" },
{ value: 'HOUR:MINUTE', text: "Custom schedule" },
],
},
tpl: [
'<ul class="x-list-plain"><tpl for=".">',
'<li role="option" class="x-boundlist-item">{text}</li>',
'</tpl></ul>',
],
displayTpl: [
'<tpl for=".">',
'{value}',
'</tpl>',
],
});
Ext.define('PBS.prunesimulator.DayOfWeekSelector', {
extend: 'Ext.form.field.ComboBox',
alias: 'widget.prunesimulatorDayOfWeekSelector',
editable: false,
displayField: 'text',
valueField: 'value',
queryMode: 'local',
store: {
field: ['value', 'text'],
data: [
{ value: 'mon', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[1]) },
{ value: 'tue', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[2]) },
{ value: 'wed', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[3]) },
{ value: 'thu', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[4]) },
{ value: 'fri', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[5]) },
{ value: 'sat', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[6]) },
{ value: 'sun', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[0]) },
],
},
});
Ext.define('pbs-prune-list', {
extend: 'Ext.data.Model',
fields: [
{
name: 'backuptime',
type: 'date',
dateFormat: 'timestamp',
},
{
name: 'mark',
type: 'string',
},
{
name: 'keepName',
type: 'string',
},
],
});
Ext.define('PBS.prunesimulator.PruneList', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorPruneList',
initComponent: function() {
let me = this;
if (!me.store) {
throw "no store specified";
}
me.items = [
{
xtype: 'grid',
store: me.store,
border: false,
columns: [
{
header: 'Backup Time',
dataIndex: 'backuptime',
renderer: function(value, metaData, record) {
let text = Ext.Date.format(value, 'Y-m-d H:i:s');
if (record.data.mark === 'keep') {
if (me.useColors) {
let bgColor = COLORS[record.data.keepName];
let textColor = TEXT_COLORS[record.data.keepName];
return '<div style="background-color: ' + bgColor + '; ' +
'color: ' + textColor + ';">' + text + '</div>';
} else {
return text;
}
} else {
return '<div style="text-decoration: line-through;">' + text + '</div>';
}
},
flex: 1,
sortable: false,
},
{
header: 'Keep (reason)',
dataIndex: 'mark',
renderer: function(value, metaData, record) {
if (record.data.mark === 'keep') {
if (record.data.keepCount) {
return 'keep (' + record.data.keepName +
': ' + record.data.keepCount + ')';
} else {
return 'keep (' + record.data.keepName + ')';
}
} else {
return value;
}
},
width: 200,
sortable: false,
},
],
},
];
me.callParent();
},
});
Ext.define('PBS.prunesimulator.WeekTable', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorWeekTable',
reload: function() {
let me = this;
let backups = me.store.data.items;
let html = '<table class="cal">';
let now = new Date(NOW.getTime());
let skip = 7 - parseInt(Ext.Date.format(now, 'N'), 10);
let tableStartDate = Ext.Date.add(now, Ext.Date.DAY, skip);
let bIndex = 0;
for (let i = 0; bIndex < backups.length; i++) {
html += '<tr>';
for (let j = 0; j < 7; j++) {
let date = Ext.Date.subtract(tableStartDate, Ext.Date.DAY, j + 7 * i);
let currentDay = Ext.Date.format(date, 'd/m/Y');
let dayOfWeekCls = Ext.Date.format(date, 'D').toLowerCase();
let firstOfMonthCls = Ext.Date.format(date, 'd') === '01'
? 'first-of-month'
: '';
html += `<td class="cal-day ${dayOfWeekCls} ${firstOfMonthCls}">`;
const isBackupOnDay = function(backup, day) {
return backup && Ext.Date.format(backup.data.backuptime, 'd/m/Y') === day;
};
let backup = backups[bIndex];
html += '<table><tr>';
html += `<th class="cal-day-date">${Ext.Date.format(date, 'D, d M Y')}</th>`;
while (isBackupOnDay(backup, currentDay)) {
html += '<tr><td>';
let text = Ext.Date.format(backup.data.backuptime, 'H:i');
if (backup.data.mark === 'remove') {
html += `<span class="strikethrough">${text}</span>`;
} else {
if (backup.data.keepCount) {
text += ` (${backup.data.keepName} ${backup.data.keepCount})`;
} else {
text += ` (${backup.data.keepName})`;
}
if (me.useColors) {
let bgColor = COLORS[backup.data.keepName];
let textColor = TEXT_COLORS[backup.data.keepName];
html += `<span style="background-color: ${bgColor};
color: ${textColor};">${text}</span>`;
} else {
html += `<span class="black">${text}</span>`;
}
}
html += '</td></tr>';
backup = backups[++bIndex];
}
html += '</table>';
html += '</div>';
html += '</td>';
}
html += '</tr>';
}
me.setHtml(html);
},
initComponent: function() {
let me = this;
if (!me.store) {
throw "no store specified";
}
let reload = function() {
me.reload();
};
me.store.on("datachanged", reload);
me.callParent();
me.reload();
},
});
Ext.define('PBS.PruneSimulatorKeepInput', {
extend: 'Ext.form.field.Number',
alias: 'widget.prunesimulatorKeepInput',
allowBlank: true,
fieldGroup: 'keep',
minValue: 1,
listeners: {
afterrender: function(field) {
this.triggers.clear.setVisible(field.value !== null);
},
change: function(field, newValue, oldValue) {
this.triggers.clear.setVisible(newValue !== null);
},
},
triggers: {
clear: {
cls: 'clear-trigger',
weight: -1,
handler: function() {
this.triggers.clear.setVisible(false);
this.setValue(null);
},
},
},
});
Ext.define('PBS.PruneSimulatorPanel', {
extend: 'Ext.panel.Panel',
alias: 'widget.prunesimulatorPanel',
viewModel: {
},
getValues: function() {
let me = this;
let values = {};
Ext.Array.each(me.query('[isFormField]'), function(field) {
let data = field.getSubmitData();
Ext.Object.each(data, function(name, val) {
values[name] = val;
});
});
return values;
},
controller: {
xclass: 'Ext.app.ViewController',
init: function(view) {
this.reloadFull(); // initial load
this.switchColor(true);
},
control: {
'field[fieldGroup=keep]': { change: 'reloadPrune' },
},
reloadFull: function() {
let me = this;
let view = me.getView();
let params = view.getValues();
let [hourSpec, minuteSpec] = params['schedule-time'].split(':');
if (!hourSpec || !minuteSpec) {
Ext.Msg.alert('Error', 'Invalid schedule');
return;
}
let matchTimeSpec = function(timeSpec, rangeMin, rangeMax) {
let specValues = timeSpec.split(',');
let matches = {};
let assertValid = function(value) {
let num = Number(value);
if (isNaN(num)) {
throw value + " is not an integer";
} else if (value < rangeMin || value > rangeMax) {
throw "number '" + value + "' is not in the range '" + rangeMin + ".." + rangeMax + "'";
}
return num;
};
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {
let [start, step] = value.split('/');
start = assertValid(start);
step = assertValid(step);
for (let i = start; i <= rangeMax; i += step) {
matches[i] = 1;
}
} else if (value === '*') {
for (let i = rangeMin; i <= rangeMax; i++) {
matches[i] = 1;
}
} else {
value = assertValid(value);
matches[value] = 1;
}
});
return Object.keys(matches);
};
let hours, minutes;
try {
hours = matchTimeSpec(hourSpec, 0, 23);
minutes = matchTimeSpec(minuteSpec, 0, 59);
} catch (err) {
Ext.Msg.alert('Error', err);
return;
}
let backups = me.populateFromSchedule(
params['schedule-weekdays'],
hours,
minutes,
params.numberOfWeeks,
);
me.pruneSelect(backups, params);
view.pruneStore.setData(backups);
},
reloadPrune: function() {
let me = this;
let view = me.getView();
let params = view.getValues();
let backups = [];
view.pruneStore.getData().items.forEach(function(item) {
backups.push({
backuptime: item.data.backuptime,
});
});
me.pruneSelect(backups, params);
view.pruneStore.setData(backups);
},
// backups are sorted descending by date
populateFromSchedule: function(weekdays, hours, minutes, weekCount) {
let weekdayFlags = [
weekdays.includes('sun'),
weekdays.includes('mon'),
weekdays.includes('tue'),
weekdays.includes('wed'),
weekdays.includes('thu'),
weekdays.includes('fri'),
weekdays.includes('sat'),
];
let todaysDate = new Date(NOW.getTime());
let timesOnSingleDay = [];
hours.forEach(function(hour) {
minutes.forEach(function(minute) {
todaysDate.setHours(hour);
todaysDate.setMinutes(minute);
timesOnSingleDay.push(todaysDate.getTime());
});
});
// sort recent times first, backups array below is ordered now -> past
timesOnSingleDay.sort((a, b) => b - a);
let backups = [];
for (let i = 0; i < 7 * weekCount; i++) {
let daysDate = Ext.Date.subtract(todaysDate, Ext.Date.DAY, i);
let weekday = parseInt(Ext.Date.format(daysDate, 'w'), 10);
if (weekdayFlags[weekday]) {
timesOnSingleDay.forEach(function(time) {
backups.push({
backuptime: Ext.Date.subtract(new Date(time), Ext.Date.DAY, i),
});
});
}
}
return backups;
},
pruneMark: function(backups, keepCount, keepName, idFunc) {
if (!keepCount) {
return;
}
let alreadyIncluded = {};
let newlyIncluded = {};
let newlyIncludedCount = 0;
let finished = false;
backups.forEach(function(backup) {
let mark = backup.mark;
if (mark && mark === 'keep') {
let id = idFunc(backup);
alreadyIncluded[id] = true;
}
});
backups.forEach(function(backup) {
let mark = backup.mark;
let id = idFunc(backup);
if (finished || alreadyIncluded[id] || mark) {
return;
}
if (!newlyIncluded[id]) {
if (newlyIncludedCount >= keepCount) {
finished = true;
return;
}
newlyIncluded[id] = true;
newlyIncludedCount++;
backup.mark = 'keep';
backup.keepName = keepName;
backup.keepCount = newlyIncludedCount;
} else {
backup.mark = 'remove';
}
});
},
// backups need to be sorted descending by date
pruneSelect: function(backups, keepParams) {
let me = this;
if (Number(keepParams['keep-last']) +
Number(keepParams['keep-hourly']) +
Number(keepParams['keep-daily']) +
Number(keepParams['keep-weekly']) +
Number(keepParams['keep-monthly']) +
Number(keepParams['keep-yearly']) === 0) {
backups.forEach(function(backup) {
backup.mark = 'keep';
backup.keepName = 'keep-all';
});
return;
}
me.pruneMark(backups, keepParams['keep-last'], 'keep-last', function(backup) {
return backup.backuptime;
});
me.pruneMark(backups, keepParams['keep-hourly'], 'keep-hourly', function(backup) {
return Ext.Date.format(backup.backuptime, 'H/d/m/Y');
});
me.pruneMark(backups, keepParams['keep-daily'], 'keep-daily', function(backup) {
return Ext.Date.format(backup.backuptime, 'd/m/Y');
});
me.pruneMark(backups, keepParams['keep-weekly'], 'keep-weekly', function(backup) {
// ISO-8601 week and week-based year
return Ext.Date.format(backup.backuptime, 'W/o');
});
me.pruneMark(backups, keepParams['keep-monthly'], 'keep-monthly', function(backup) {
return Ext.Date.format(backup.backuptime, 'm/Y');
});
me.pruneMark(backups, keepParams['keep-yearly'], 'keep-yearly', function(backup) {
return Ext.Date.format(backup.backuptime, 'Y');
});
backups.forEach(function(backup) {
backup.mark = backup.mark || 'remove';
});
},
toggleColors: function(checkbox, checked) {
this.switchColor(checked);
},
switchColor: function(useColors) {
let me = this;
let view = me.getView();
const getStyle = name =>
`background-color: ${COLORS[name]}; color: ${TEXT_COLORS[name]};`;
for (const field of view.query('[isFormField]')) {
if (field.fieldGroup !== 'keep') {
continue;
}
if (useColors) {
field.setFieldStyle(getStyle(field.name));
} else {
field.setFieldStyle('background-color: white; color: #444;');
}
}
me.lookup('weekTable').useColors = useColors;
me.lookup('pruneList').useColors = useColors;
me.reloadPrune();
},
},
keepItems: [
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-last',
fieldLabel: 'keep-last',
value: 4,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-hourly',
fieldLabel: 'keep-hourly',
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-daily',
fieldLabel: 'keep-daily',
value: 5,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-weekly',
fieldLabel: 'keep-weekly',
value: 2,
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-monthly',
fieldLabel: 'keep-monthly',
},
{
xtype: 'prunesimulatorKeepInput',
name: 'keep-yearly',
fieldLabel: 'keep-yearly',
},
],
initComponent: function() {
var me = this;
me.pruneStore = Ext.create('Ext.data.Store', {
model: 'pbs-prune-list',
sorters: { property: 'backuptime', direction: 'DESC' },
});
me.items = [
{
xtype: 'panel',
layout: {
type: 'hbox',
align: 'stretch',
},
border: false,
items: [
{
title: 'View',
layout: 'anchor',
flex: 1,
border: false,
bodyPadding: 10,
items: [
{
xtype: 'checkbox',
name: 'showCalendar',
reference: 'showCalendar',
fieldLabel: 'Show Calendar:',
checked: true,
},
{
xtype: 'checkbox',
name: 'showColors',
reference: 'showColors',
fieldLabel: 'Show Colors:',
checked: true,
handler: 'toggleColors',
},
],
},
{ xtype: "panel", width: 1, border: 1 },
{
xtype: 'form',
layout: 'anchor',
flex: 1,
border: false,
title: 'Simulated Backup Schedule',
defaults: {
labelWidth: 120,
},
bodyPadding: 10,
items: [
{
xtype: 'prunesimulatorDayOfWeekSelector',
name: 'schedule-weekdays',
fieldLabel: 'Day of week',
value: ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'],
allowBlank: false,
multiSelect: true,
padding: '0 0 0 10',
},
{
xtype: 'prunesimulatorCalendarEvent',
name: 'schedule-time',
allowBlank: false,
value: '0/6:00',
fieldLabel: 'Backup schedule',
padding: '0 0 0 10',
},
{
xtype: 'numberfield',
name: 'numberOfWeeks',
allowBlank: false,
fieldLabel: 'Number of weeks',
minValue: 1,
value: 15,
maxValue: 260, // five years
padding: '0 0 0 10',
},
{
xtype: 'button',
name: 'schedule-button',
text: 'Update Schedule',
formBind: true,
handler: 'reloadFull',
},
],
},
],
},
{
xtype: 'panel',
layout: {
type: 'hbox',
align: 'stretch',
},
flex: 1,
border: false,
items: [
{
layout: 'anchor',
title: 'Prune Options',
border: false,
bodyPadding: 10,
scrollable: true,
items: me.keepItems,
flex: 1,
},
{ xtype: "panel", width: 1, border: 1 },
{
layout: 'fit',
title: 'Backups',
border: false,
xtype: 'prunesimulatorPruneList',
store: me.pruneStore,
reference: 'pruneList',
flex: 1,
},
],
},
{
layout: 'anchor',
title: 'Calendar',
autoScroll: true,
flex: 2,
xtype: 'prunesimulatorWeekTable',
reference: 'weekTable',
store: me.pruneStore,
bind: {
hidden: '{!showCalendar.checked}',
},
},
];
me.callParent();
},
});
Ext.create('Ext.container.Viewport', {
layout: 'border',
renderTo: Ext.getBody(),
items: [
{
xtype: 'prunesimulatorPanel',
title: 'Proxmox Backup Server - Prune Simulator',
region: 'west',
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start',
},
flex: 3,
maxWidth: 1090,
},
{
xtype: 'prunesimulatorDocumentation',
title: 'Usage',
border: false,
flex: 2,
region: 'center',
},
],
});
});

View File

@ -1,6 +1,8 @@
Storage
=======
.. _storage_disk_management:
Disk Management
---------------
@ -57,7 +59,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
You can also create a ``zpool`` with various raid levels from **Administration
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it on the root directory (default):
mounts it under ``/mnt/datastore/zpool1``:
.. code-block:: console
@ -85,7 +87,7 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
.. _datastore_intro:
:term:`DataStore`
:term:`Datastore`
-----------------
A datastore refers to a location at which backups are stored. The current
@ -107,7 +109,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
Datastore Configuration
~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore.png
.. image:: images/screenshots/pbs-gui-datastore-content.png
:align: right
:alt: Datastore Overview
@ -121,14 +123,17 @@ number of backups to keep in that store. :ref:`backup-pruning` and
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
.. _storage_datastore_create:
Creating a Datastore
^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
:align: right
:alt: Create a datastore
You can create a new datastore from the web GUI, by navigating to **Datastore** in
the menu tree and clicking **Create**. Here:
You can create a new datastore from the web interface, by clicking **Add
Datastore** in the side menu, under the **Datastore** section. In the setup
window:
* *Name* refers to the name of the datastore
* *Backing Path* is the path to the directory upon which you want to create the
@ -136,7 +141,9 @@ the menu tree and clicking **Create**. Here:
* *GC Schedule* refers to the time and intervals at which garbage collection
runs
* *Prune Schedule* refers to the frequency at which pruning takes place
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`backup-pruning`).
* *Prune Options* set the amount of backups which you would like to keep (see
:ref:`backup-pruning`).
* *Comment* can be used to add some contextual information to the datastore.
Alternatively you can create a new datastore from the command line. The
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`

View File

@ -1,3 +1,5 @@
.. _sysadmin_host_administration:
Host System Administration
==========================

732
docs/tape-backup.rst Normal file
View File

@ -0,0 +1,732 @@
Tape Backup
===========
Proxmox tape backup provides an easy way to store datastore content
onto magnetic tapes. This increases data safety because you get:
- an additional copy of the data
- to a different media type (tape)
- to an additional location (you can move tapes offsite)
In most restore jobs, only data from the last backup job is restored.
Restore requests further decline the older the data
gets. Considering this, tape backup may also help to reduce disk
usage, because you can safely remove data from disk once archived on
tape. This is especially true if you need to keep data for several
years.
Tape backups do not provide random access to the stored data. Instead,
you need to restore the data to disk before you can access it
again. Also, if you store your tapes offsite (using some kind of tape
vaulting service), you need to bring them onsite before you can do any
restore. So please consider that restores from tapes can take much
longer than restores from disk.
Tape Technology Primer
----------------------
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
As of 2021, the only broadly available tape technology standard is
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
drives, autoloaders and LTO tape cartridges.
There are a few vendors offering proprietary drives with
slight advantages in performance and capacity, but they have
significant disadvantages:
- proprietary (single vendor)
- a much higher purchase cost
So we currently do not test such drives.
In general, LTO tapes offer the following advantages:
- Durable (30 years)
- High Capacity (12 TB)
- Relatively low cost per TB
- Cold Media
- Movable (storable inside vault)
- Multiple vendors (for both media and drives)
- Build in AES-CGM Encryption engine
Please note that `Proxmox Backup Server` already stores compressed
data, so we do not need/use the tape compression feature.
Supported Hardware
------------------
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
or later. In general, all SCSI2 tape drives supported by the Linux
kernel should work, but feature like hardware encryptions needs LTO4
or later.
Tape changer support is done using the Linux 'mtx' command line
tool. So any changer device supported by that tool should work.
Drive Performance
~~~~~~~~~~~~~~~~~
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
that it still takes a minimum of 9 hours to completely write or
read a single tape (even at maximum speed).
The only way to speed up that data rate is to use more than one
drive. That way you can run several backup jobs in parallel, or run
restore jobs while the other dives are used for backups.
Also consider that you need to read data first from your datastore
(disk). But a single spinning disk is unable to deliver data at this
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
to run your tape at full speed, please make sure that the source
datastore is able to deliver that performance (e.g, by using SSDs).
Terminology
-----------
:Tape Labels: are used to uniquely indentify a tape. You normally use
some sticky paper labels and apply them on the front of the
cartridge. We additionally store the label text magnetically on the
tape (first file on tape).
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
.. _LTO Ultrium Cartridge Label Specification: https://www.ibm.com/support/pages/ibm-lto-ultrium-cartridge-label-specification
.. _LTO Barcode Generator: lto-barcode/index.html
:Barcodes: are a special form of tape labels, which are electronically
readable. Most LTO tape robots use an 8 character string encoded as
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
Specification`_.
You can either buy such barcode labels from your cartridge vendor,
or print them yourself. You can use our `LTO Barcode Generator`_ App
for that.
.. Note:: Physical labels and the associated adhesive shall have an
environmental performance to match or exceed the environmental
specifications of the cartridge to which it is applied.
:Media Pools: A media pool is a logical container for tapes. A backup
job targets one media pool, so a job only uses tapes from that
pool. The pool additionally defines how long a backup job can
append data to tapes (allocation policy) and how long you want to
keep the data (retention policy).
:Media Set: A group of continuously written tapes (all from the same
media pool).
:Tape drive: The decive used to read and write data to the tape. There
are standalone drives, but drives often ship within tape libraries.
:Tape changer: A device which can change the tapes inside a tape drive
(tape robot). They are usually part of a tape library.
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
:`Tape library`_: A storage device that contains one or more tape drives,
a number of slots to hold tape cartridges, a barcode reader to
identify tape cartridges and an automated method for loading tapes
(a robot).
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
:Inventory: The inventory stores the list of known tapes (with
additional status information).
:Catalog: A media catalog stores information about the media content.
Tape Quickstart
---------------
1. Configure your tape hardware (drives and changers)
2. Configure one or more media pools
3. Label your tape cartridges.
4. Start your first tape backup job ...
Configuration
-------------
Please note that you can configure anything using the graphical user
interface or the command line interface. Both methods results in the
same configuration.
Tape changers
~~~~~~~~~~~~~
Tape changers (robots) are part of a `Tape Library`_. You can skip
this step if you are using a standalone drive.
Linux is able to auto detect those devices, and you can get a list
of available devices using::
# proxmox-tape changer scan
┌─────────────────────────────┬─────────┬──────────────┬────────┐
│ path │ vendor │ model │ serial │
╞═════════════════════════════╪═════════╪══════════════╪════════╡
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└─────────────────────────────┴─────────┴──────────────┴────────┘
In order to use that device with Proxmox, you need to create a
configuration entry::
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
Where ``sl3`` is an arbitrary name you can choose.
.. Note:: Please use stable device path names from inside
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
different device after reboot, and that is not what you want.
You can show the final configuration with::
# proxmox-tape changer config sl3
┌──────┬─────────────────────────────┐
│ Name │ Value │
╞══════╪═════════════════════════════╡
│ name │ sl3 │
├──────┼─────────────────────────────┤
│ path │ /dev/tape/by-id/scsi-CC2C52 │
└──────┴─────────────────────────────┘
Or simply list all configured changer devices::
# proxmox-tape changer list
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
│ name │ path │ vendor │ model │ serial │
╞══════╪═════════════════════════════╪═════════╪══════════════╪════════════╡
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
The Vendor, Model and Serial number are auto detected, but only shown
if the device is online.
To test your setup, please query the status of the changer device with::
# proxmox-tape changer status sl3
┌───────────────┬──────────┬────────────┬─────────────┐
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
╞═══════════════╪══════════╪════════════╪═════════════╡
│ drive │ 0 │ vtape1 │ 1 │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 1 │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 2 │ vtape2 │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ ... │ ... │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 16 │ │ │
└───────────────┴──────────┴────────────┴─────────────┘
Tape libraries usually provide some special import/export slots (also
called "mail slots"). Tapes inside those slots are acessible from
outside, making it easy to add/remove tapes to/from the library. Those
tapes are considered to be "offline", so backup jobs will not use
them. Those special slots are auto-detected and marked as
``import-export`` slot in the status command.
It's worth noting that some of the smaller tape libraries don't have
such slots. While they have something called "Mail Slot", that slot
is just a way to grab the tape from the gripper. But they are unable
to hold media while the robot does other things. They also do not
expose that "Mail Slot" over the SCSI interface, so you wont see them in
the status output.
As a workaround, you can mark some of the normal slots as export
slot. The software treats those slots like real ``import-export``
slots, and the media inside those slots is considered to be 'offline'
(not available for backup)::
# proxmox-tape changer update sl3 --export-slots 15,16
After that, you can see those artificial ``import-export`` slots in
the status output::
# proxmox-tape changer status sl3
┌───────────────┬──────────┬────────────┬─────────────┐
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
╞═══════════════╪══════════╪════════════╪═════════════╡
│ drive │ 0 │ vtape1 │ 1 │
├───────────────┼──────────┼────────────┼─────────────┤
│ import-export │ 15 │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ import-export │ 16 │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 1 │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 2 │ vtape2 │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ ... │ ... │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 14 │ │ │
└───────────────┴──────────┴────────────┴─────────────┘
Tape drives
~~~~~~~~~~~
Linux is able to auto detect tape drives, and you can get a list
of available tape drives using::
# proxmox-tape drive scan
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
In order to use that drive with Proxmox, you need to create a
configuration entry::
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
.. Note:: Please use stable device path names from inside
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
different device after reboot, and that is not what you want.
If you have a tape library, you also need to set the associated
changer device::
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
The ``--changer-drivenum`` is only necessary if the tape library
includes more than one drive (The changer status command lists all
drivenums).
You can show the final configuration with::
# proxmox-tape drive config mydrive
┌─────────┬────────────────────────────────┐
│ Name │ Value │
╞═════════╪════════════════════════════════╡
│ name │ mydrive │
├─────────┼────────────────────────────────┤
│ path │ /dev/tape/by-id/scsi-12345-nst │
├─────────┼────────────────────────────────┤
│ changer │ sl3 │
└─────────┴────────────────────────────────┘
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
configuration, because that is the default.
To list all configured drives use::
# proxmox-tape drive list
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
│ name │ path │ changer │ vendor │ model │ serial │
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
The Vendor, Model and Serial number are auto detected, but only shown
if the device is online.
For testing, you can simply query the drive status with::
# proxmox-tape status --drive mydrive
┌───────────┬────────────────────────┐
│ Name │ Value │
╞═══════════╪════════════════════════╡
│ blocksize │ 0 │
├───────────┼────────────────────────┤
│ status │ DRIVE_OPEN | IM_REP_EN │
└───────────┴────────────────────────┘
.. NOTE:: Blocksize should always be 0 (variable block size
mode). This is the default anyways.
Media Pools
~~~~~~~~~~~
A media pool is a logical container for tapes. A backup job targets
one media pool, so a job only uses tapes from that pool.
.. topic:: Media Set
A media set is a group of continuously written tapes, used to split
the larger pool into smaller, restorable units. One or more backup
jobs write to a media set, producing an ordered group of
tapes. Media sets are identified by an unique ID. That ID and the
sequence number is stored on each tape of that set (tape label).
Media sets are the basic unit for restore tasks, i.e. you need all
tapes in the set to restore the media set content. Data is fully
deduplicated inside a media set.
.. topic:: Media Set Allocation Policy
The pool additionally defines how long backup jobs can append data
to a media set. The following settings are possible:
- Try to use the current media set.
This setting produce one large media set. While this is very
space efficient (deduplication, no unused space), it can lead to
long restore times, because restore jobs needs to read all tapes in the
set.
.. NOTE:: Data is fully deduplicated inside a media set. That
also means that data is randomly distributed over the tapes in
the set. So even if you restore a single VM, this may have to
read data from all tapes inside the media set.
Larger media sets are also more error prone, because a single
damaged media makes the restore fail.
Usage scenario: Mostly used with tape libraries, and you manually
trigger new set creation by running a backup job with the
``--export`` option.
.. NOTE:: Retention period starts with the existence of a newer
media set.
- Always create a new media set.
With this setting each backup job creates a new media set. This
is less space efficient, because the last media from the last set
may not be fully written, leaving the remaining space unused.
The advantage is that this procudes media sets of minimal
size. Small set are easier to handle, you can move sets to an
off-site vault, and restore is much faster.
.. NOTE:: Retention period starts with the creation time of the
media set.
- Create a new set when the specified Calendar Event triggers.
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
This allows you to specify points in time by using systemd like
Calendar Event specifications (see `systemd.time manpage`_).
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
will create a new set each week.
This balances between space efficency and media count.
.. NOTE:: Retention period starts when the calendar event
triggers.
Additionally, the following events may allocate a new media set:
- Required tape is offline (and you use a tape library).
- Current set contains damaged of retired tapes.
- Media pool encryption changed
- Database consistency errors, e.g. if the inventory does not
contain required media info, or contain conflicting infos
(outdated data).
.. topic:: Retention Policy
Defines how long we want to keep the data.
- Always overwrite media.
- Protect data for the duration specified.
We use systemd like time spans to specify durations, e.g. ``2
weeks`` (see `systemd.time manpage`_).
- Never overwrite data.
.. topic:: Hardware Encryption
LTO4 (or later) tape drives support hardware encryption. If you
configure the media pool to use encryption, all data written to the
tapes is encrypted using the configured key.
That way, unauthorized users cannot read data from the media,
e.g. if you loose a media while shipping to an offsite location.
.. Note:: If the backup client also encrypts data, data on tape
will be double encrypted.
The password protected key is stored on each media, so it is
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
you remember the password in case you need to restore the key.
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
the source datastore, so it is impossible to distinguish
store1:/vm/100 from store2:/vm/100. Please use different media
pools if the source is from a different name space)
The following command creates a new media pool::
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
# proxmox-tape pool create daily --drive mydrive
Additional option can be set later using the update command::
# proxmox-tape pool update daily --allocation daily --retention 7days
To list all configured pools use::
# proxmox-tape pool list
┌───────┬──────────┬────────────┬───────────┬──────────┐
│ name │ drive │ allocation │ retention │ template │
╞═══════╪══════════╪════════════╪═══════════╪══════════╡
│ daily │ mydrive │ daily │ 7days │ │
└───────┴──────────┴────────────┴───────────┴──────────┘
Tape Jobs
~~~~~~~~~
Administration
--------------
Many sub-command of the ``proxmox-tape`` command line tools take a
parameter called ``--drive``, which specifies the tape drive you want
to work on. For convenience, you can set that in an environment
variable::
# export PROXMOX_TAPE_DRIVE=mydrive
You can then omit the ``--drive`` parameter from the command. If the
drive has an associated changer device, you may also omit the changer
parameter from commands that needs a changer device, for example::
# proxmox-tape changer status
Should displays the changer status of the changer device associated with
drive ``mydrive``.
Label Tapes
~~~~~~~~~~~
By default, tape cartidges all looks the same, so you need to put a
label on them for unique identification. So first, put a sticky paper
label with some human readable text on the cartridge.
If you use a `Tape Library`_, you should use an 8 character string
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
Specification`_. You can either bye such barcode labels from your
cartidge vendor, or print them yourself. You can use our `LTO Barcode
Generator`_ App for that.
Next, you need to write that same label text to the tape, so that the
software can uniquely identify the tape too.
For a standalone drive, manually insert the new tape cartidge into the
drive and run::
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
.. Note:: For safety reasons, this command fails if the tape contain
any data. If you want to overwrite it anways, erase the tape first.
You can verify success by reading back the label::
# proxmox-tape read-label
┌─────────────────┬──────────────────────────────────────┐
│ Name │ Value │
╞═════════════════╪══════════════════════════════════════╡
│ changer-id │ vtape1 │
├─────────────────┼──────────────────────────────────────┤
│ uuid │ 7f42c4dd-9626-4d89-9f2b-c7bc6da7d533 │
├─────────────────┼──────────────────────────────────────┤
│ ctime │ Wed Jan 6 09:07:51 2021 │
├─────────────────┼──────────────────────────────────────┤
│ pool │ daily │
├─────────────────┼──────────────────────────────────────┤
│ media-set-uuid │ 00000000-0000-0000-0000-000000000000 │
├─────────────────┼──────────────────────────────────────┤
│ media-set-ctime │ Wed Jan 6 09:07:51 2021 │
└─────────────────┴──────────────────────────────────────┘
.. NOTE:: The ``media-set-uuid`` using all zeros indicates an empty
tape (not used by any media set).
If you have a tape library, apply the sticky barcode label to the tape
cartridges first. Then load those empty tapes into the library. You
can then label all unlabeled tapes with a single command::
# proxmox-tape barcode-label [--pool <pool-name>]
Run Tape Backups
~~~~~~~~~~~~~~~~
To manually run a backup job use::
# proxmox-tape backup <store> <pool> [OPTIONS]
The following options are available:
--eject-media Eject media upon job completion.
It is normally good practice to eject the tape after use. This unmounts the
tape from the drive and prevents the tape from getting dirty with dust.
--export-media-set Export media set upon job completion.
After a sucessful backup job, this moves all tapes from the used
media set into import-export slots. The operator can then pick up
those tapes and move them to a media vault.
Restore from Tape
~~~~~~~~~~~~~~~~~
Restore is done at media-set granularity, so you first need to find
out which media set contains the data you want to restore. This
information is stored in the media catalog. If you do not have media
catalogs, you need to restore them first. Please note that you need
the catalog to find your data, but restoring a complete media-set does
not need media catalogs.
The following command shows the media content (from catalog)::
# proxmox-tape media content
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
│ label-text │ pool │ media-set-name │ seq-nr │ snapshot │ media-set-uuid │
╞════════════╪══════╪══════════════════════════╪════════╪════════════════════════════════╪══════════════════════════════════════╡
│ TEST01L8 │ p2 │ Wed Jan 13 13:55:55 2021 │ 0 │ vm/201/2021-01-11T10:43:48Z │ 9da37a55-aac7-4deb-91c6-482b3b675f30 │
├────────────┼──────┼──────────────────────────┼────────┼────────────────────────────────┼──────────────────────────────────────┤
│ ... │ ... │ ... │ ... │ ... │ ... │
└────────────┴──────┴──────────────────────────┴────────┴────────────────────────────────┴──────────────────────────────────────┘
A restore job reads the data from the media set and moves it back to
data disk (datastore)::
// proxmox-tape restore <media-set-uuid> <datastore>
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
Update Inventory
~~~~~~~~~~~~~~~~
Restore Catalog
~~~~~~~~~~~~~~~
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
Creating a new encryption key::
# proxmox-tape key create --hint "tape pw 2020"
Tape Encryption Key Password: **********
Verify Password: **********
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
List existing encryption keys::
# proxmox-tape key list
┌───────────────────────────────────────────────────┬───────────────┐
│ fingerprint │ hint │
╞═══════════════════════════════════════════════════╪═══════════════╡
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
└───────────────────────────────────────────────────┴───────────────┘
To show encryption key details::
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
┌─────────────┬───────────────────────────────────────────────┐
│ Name │ Value │
╞═════════════╪═══════════════════════════════════════════════╡
│ kdf │ scrypt │
├─────────────┼───────────────────────────────────────────────┤
│ created │ Sat Jan 23 14:47:21 2021 │
├─────────────┼───────────────────────────────────────────────┤
│ modified │ Sat Jan 23 14:47:21 2021 │
├─────────────┼───────────────────────────────────────────────┤
│ fingerprint │ 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f │
├─────────────┼───────────────────────────────────────────────┤
│ hint │ tape pw 2020 │
└─────────────┴───────────────────────────────────────────────┘
The ``paperkey`` subcommand can be used to create a QR encoded
version of a tape encryption key. The following command sends the output of the
``paperkey`` command to a text file, for easy printing::
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
.. _restore_encryption_key:
Restoring Encryption Keys
^^^^^^^^^^^^^^^^^^^^^^^^^
You can restore the encryption key from the tape, using the password
used to generate the key. First, load the tape you want to restore
into the drive. Then run::
# proxmox-tape key restore
Tepe Encryption Key Password: ***********
If the password is correct, the key will get imported to the
database. Further restore jobs automatically use any availbale key.
Tape Cleaning
~~~~~~~~~~~~~
LTO tape drives requires regular cleaning. This is done by loading a
cleaning cartridge into the drive, which is a manual task for
standalone drives.
For tape libraries, cleaning cartridges are identified using special
labels starting with letters "CLN". For example, our tape library has a
cleaning cartridge inside slot 3::
# proxmox-tape changer status sl3
┌───────────────┬──────────┬────────────┬─────────────┐
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
╞═══════════════╪══════════╪════════════╪═════════════╡
│ drive │ 0 │ vtape1 │ 1 │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 1 │ │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 2 │ vtape2 │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ slot │ 3 │ CLN001CU │ │
├───────────────┼──────────┼────────────┼─────────────┤
│ ... │ ... │ │ │
└───────────────┴──────────┴────────────┴─────────────┘
To initiate a cleaning operation simply run::
# proxmox-tape clean
This command does the following:
- find the cleaning tape (in slot 3)
- unload the current media from the drive (back to slot1)
- load the cleaning tape into the drive
- run drive cleaning operation
- unload the cleaning tape (to slot 3)

View File

@ -41,11 +41,12 @@ users:
:alt: Add a new user
The superuser has full administration rights on everything, so you
normally want to add other users with less privileges. You can create a new
user with the ``user create`` subcommand or through the web interface, under
**Configuration -> User Management**. The ``create`` subcommand lets you specify
many options like ``--email`` or ``--password``. You can update or change any
user properties using the ``update`` subcommand later (**Edit** in the GUI):
normally want to add other users with less privileges. You can add a new
user with the ``user create`` subcommand or through the web
interface, under the **User Management** tab of **Configuration -> Access
Control**. The ``create`` subcommand lets you specify many options like
``--email`` or ``--password``. You can update or change any user properties
using the ``update`` subcommand later (**Edit** in the GUI):
.. code-block:: console
@ -70,7 +71,7 @@ The resulting user list looks like this:
│ root@pam │ 1 │ │ │ │ │ Superuser │
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
Newly created users do not have any permissions. Please read the next
Newly created users do not have any permissions. Please read the Access Control
section to learn how to set access permissions.
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
@ -85,15 +86,77 @@ Or completely remove the user with:
# proxmox-backup-manager user remove john@pbs
.. _user_tokens:
API Tokens
----------
.. image:: images/screenshots/pbs-gui-apitoken-overview.png
:align: right
:alt: API Token Overview
Any authenticated user can generate API tokens which can in turn be used to
configure various clients, instead of directly providing the username and
password.
API tokens serve two purposes:
#. Easy revocation in case client gets compromised
#. Limit permissions for each client/token within the users' permission
An API token consists of two parts: an identifier consisting of the user name,
the realm and a tokenname (``user@realm!tokenname``), and a secret value. Both
need to be provided to the client in place of the user ID (``user@realm``) and
the user password, respectively.
.. image:: images/screenshots/pbs-gui-apitoken-secret-value.png
:align: right
:alt: API secret value
The API token is passed from the client to the server by setting the
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
``TOKENID:TOKENSECRET``.
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
.. code-block:: console
# proxmox-backup-manager user generate-token john@pbs client1
Result: {
"tokenid": "john@pbs!client1",
"value": "d63e505a-e3ec-449a-9bc7-1da610d4ccde"
}
.. note:: The displayed secret value needs to be saved, since it cannot be
displayed again after generating the API token.
The ``user list-tokens`` sub-command can be used to display tokens and their
metadata:
.. code-block:: console
# proxmox-backup-manager user list-tokens john@pbs
┌──────────────────┬────────┬────────┬─────────┐
│ tokenid │ enable │ expire │ comment │
╞══════════════════╪════════╪════════╪═════════╡
│ john@pbs!client1 │ 1 │ │ │
└──────────────────┴────────┴────────┴─────────┘
Similarly, the ``user delete-token`` subcommand can be used to delete a token
again.
Newly generated API tokens don't have any permissions. Please read the next
section to learn how to set access permissions.
.. _user_acl:
Access Control
--------------
By default new users do not have any permission. Instead you need to
specify what is allowed and what is not. You can do this by assigning
roles to users on specific objects like datastores or remotes. The
By default new users and API tokens do not have any permission. Instead you
need to specify what is allowed and what is not. You can do this by assigning
roles to users/tokens on specific objects like datastores or remotes. The
following roles exist:
**NoAccess**
@ -130,7 +193,7 @@ following roles exist:
**RemoteSyncOperator**
Is allowed to read data from a remote.
.. image:: images/screenshots/pbs-gui-permissions-add.png
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
:align: right
:alt: Add permissions for user
@ -148,31 +211,32 @@ The data represented in each field is as follows:
#. The object on which the permission is set. This can be a specific object
(single datastore, remote, etc.) or a top level object, which with
propagation enabled, represents all children of the object also.
#. The user for which the permission is set
#. The user(s)/token(s) for which the permission is set
#. The role being set
You can manage datastore permissions from **Configuration -> Permissions** in the
web interface. Likewise, you can use the ``acl`` subcommand to manage and
monitor user permissions from the command line. For example, the command below
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
``store1``, located at ``/backup/disk1/store1``:
You can manage permissions via **Configuration -> Access Control ->
Permissions** in the web interface. Likewise, you can use the ``acl``
subcommand to manage and monitor user permissions from the command line. For
example, the command below will add the user ``john@pbs`` as a
**DatastoreAdmin** for the datastore ``store1``, located at
``/backup/disk1/store1``:
.. code-block:: console
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --auth-id john@pbs
You can monitor the roles of each user using the following command:
You can list the ACLs of each user/token using the following command:
.. code-block:: console
# proxmox-backup-manager acl list
┌──────────┬──────────────────┬───────────┬────────────────┐
│ ugid │ path │ propagate │ roleid │
╞══════════╪══════════════════╪═══════════╪════════════════╡
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
└──────────┴──────────────────┴───────────┴────────────────┘
┌──────────┬──────────────────┬───────────┬────────────────┐
│ ugid │ path │ propagate │ roleid │
╞══════════╪══════════════════╪═══════════╪════════════════╡
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
└──────────┴──────────────────┴───────────┴────────────────┘
A single user can be assigned multiple permission sets for different datastores.
A single user/token can be assigned multiple permission sets for different datastores.
.. Note::
Naming convention is important here. For datastores on the host,
@ -183,4 +247,141 @@ A single user can be assigned multiple permission sets for different datastores.
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
the remote.
API Token permissions
~~~~~~~~~~~~~~~~~~~~~
API token permissions are calculated based on ACLs containing their ID
independent of those of their corresponding user. The resulting permission set
on a given path is then intersected with that of the corresponding user.
In practice this means:
#. API tokens require their own ACL entries
#. API tokens can never do more than their corresponding user
Effective permissions
~~~~~~~~~~~~~~~~~~~~~
To calculate and display the effective permission set of a user or API token
you can use the ``proxmox-backup-manager user permission`` command:
.. code-block:: console
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Audit (*)
- Datastore.Backup (*)
- Datastore.Modify (*)
- Datastore.Prune (*)
- Datastore.Read (*)
- Datastore.Verify (*)
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Backup (*)
.. _user_tfa:
Two-factor authentication
-------------------------
Introduction
~~~~~~~~~~~~
Simple authentication requires only secret piece of evidence (one factor) that
a user can successfully claim a identiy (authenticate), for example, that you
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
If the password gets stolen, or leaked in another way, anybody can use it to
login - even if they should not be allowed to do so.
With Two-factor authentication (TFA) a user is asked for an additional factor,
to proof his authenticity. The extra factor is different from a password
(something only the user knows), it is something only the user has, for example
a piece of hardware (security key) or an secret saved on the users smartphone.
This means that a remote user can never get hold on such a physical object. So,
even if that user would know your password they cannot successfully
authenticate as you, as your second factor is missing.
.. image:: images/screenshots/pbs-gui-tfa-login.png
:align: right
:alt: Add a new user
Available Second Factors
~~~~~~~~~~~~~~~~~~~~~~~~
You can setup more than one second factor to avoid that losing your smartphone
or security key permanently locks you out from your account.
There are three different two-factor authentication methods supported:
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
A short code derived from a shared secret and the current time, it switches
every 30 seconds.
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
A general standard for authentication. It is implemented by various security
devices like hardware keys or trusted platform modules (TPM) from a computer
or smart phone.
* Single use Recovery Keys. A list of keys which should either be printed out
and locked in a secure fault or saved digitally in a electronic vault.
Each key can be used only once, they are perfect for ensuring you are not
locked out even if all of your other second factors are lost or corrupt.
Setup
~~~~~
.. _user_tfa_setup_totp:
TOTP
^^^^
.. image:: images/screenshots/pbs-gui-tfa-add-totp.png
:align: right
:alt: Add a new user
There is not server setup required, simply install a TOTP app on your
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
Proxmox Backup Server web-interface to add a TOTP factor.
.. _user_tfa_setup_webauthn:
WebAuthn
^^^^^^^^
For WebAuthn to work you need to have two things:
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
Once you fullfilled both of those requirements, you can add a WebAuthn
configuration in the *Access Control* panel.
.. _user_tfa_setup_recovery_keys:
Recovery Keys
^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-tfa-add-recovery-keys.png
:align: right
:alt: Add a new user
Recovery key codes do not need any preparation, you can simply create a set of
recovery keys in the *Access Control* panel.
.. note:: There can only be one set of single-use recovery keys per user at any
time.
TFA and Automated Access
~~~~~~~~~~~~~~~~~~~~~~~~
Two-factor authentication is only implemented for the web-interface, you should
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
as a storage).

View File

@ -1,13 +1,15 @@
include ../defines.mk
UNITS :=
UNITS := \
proxmox-backup-daily-update.timer \
DYNAMIC_UNITS := \
proxmox-backup-banner.service \
proxmox-backup-daily-update.service \
proxmox-backup.service \
proxmox-backup-proxy.service
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list
clean:
rm -f $(DYNAMIC_UNITS)

1
etc/pbs-enterprise.list Normal file
View File

@ -0,0 +1 @@
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise

View File

@ -1 +0,0 @@
deb http://download.proxmox.com/debian/pbs buster pbstest

View File

@ -0,0 +1,8 @@
[Unit]
Description=Daily Proxmox Backup Server update and maintenance activities
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-daily-update

View File

@ -0,0 +1,10 @@
[Unit]
Description=Daily Proxmox Backup Server update and maintenance activities
[Timer]
OnCalendar=*-*-* 1:00
RandomizedDelaySec=5h
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -9,6 +9,7 @@ After=proxmox-backup.service
Type=notify
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-proxy
ExecReload=/bin/kill -HUP $MAINPID
PIDFile=/run/proxmox-backup/proxy.pid
Restart=on-failure
User=%PROXY_USER%
Group=%PROXY_USER%

View File

@ -7,6 +7,7 @@ After=network.target
Type=notify
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-api
ExecReload=/bin/kill -HUP $MAINPID
PIDFile=/run/proxmox-backup/api.pid
Restart=on-failure
[Install]

View File

@ -2,7 +2,7 @@ use std::io::Write;
use anyhow::{Error};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter {
@ -26,13 +26,13 @@ async fn run() -> Result<(), Error> {
let host = "localhost";
let username = Userid::root_userid();
let auth_id = Authid::root_auth_id();
let options = HttpClientOptions::new()
let options = HttpClientOptions::default()
.interactive(true)
.ticket_cache(true);
let client = HttpClient::new(host, 8007, username, options)?;
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;

View File

@ -2,7 +2,7 @@ use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{Error};
use anyhow::Error;
use futures::future::TryFutureExt;
use futures::stream::Stream;
use tokio::net::TcpStream;
@ -38,11 +38,11 @@ impl Future for Process {
this.body.flow_control().release_capacity(chunk.len())?;
this.bytes += chunk.len();
// println!("GOT FRAME {}", chunk.len());
},
}
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
None => {
this.trailers = true;
},
}
}
}
}
@ -52,7 +52,6 @@ impl Future for Process {
fn send_request(
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = http::Request::builder()
@ -62,11 +61,11 @@ fn send_request(
let (response, _stream) = client.send_request(request, true).unwrap();
response
.map_err(Error::from)
.and_then(|response| {
Process { body: response.into_body(), trailers: false, bytes: 0 }
})
response.map_err(Error::from).and_then(|response| Process {
body: response.into_body(),
trailers: false,
bytes: 0,
})
}
fn main() -> Result<(), Error> {
@ -74,16 +73,15 @@ fn main() -> Result<(), Error> {
}
async fn run() -> Result<(), Error> {
let start = std::time::SystemTime::now();
let conn = TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008)))
.await?;
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap();
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024*1024*1024)
.initial_window_size(1024*1024*1024)
.max_frame_size(4*1024*1024)
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)
.handshake(conn)
.await?;
@ -99,10 +97,13 @@ async fn run() -> Result<(), Error> {
}
let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
println!(
"Downloaded {} bytes, {} MB/s",
bytes,
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
);
Ok(())
}

View File

@ -5,6 +5,7 @@ use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::future::TryFutureExt;
use futures::stream::Stream;
use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2s-server.rs
@ -37,11 +38,11 @@ impl Future for Process {
this.body.flow_control().release_capacity(chunk.len())?;
this.bytes += chunk.len();
// println!("GOT FRAME {}", chunk.len());
},
}
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
None => {
this.trailers = true;
},
}
}
}
}
@ -60,11 +61,11 @@ fn send_request(
let (response, _stream) = client.send_request(request, true).unwrap();
response
.map_err(Error::from)
.and_then(|response| {
Process { body: response.into_body(), trailers: false, bytes: 0 }
})
response.map_err(Error::from).and_then(|response| Process {
body: response.into_body(),
trailers: false,
bytes: 0,
})
}
fn main() -> Result<(), Error> {
@ -74,57 +75,51 @@ fn main() -> Result<(), Error> {
async fn run() -> Result<(), Error> {
let start = std::time::SystemTime::now();
let conn =
tokio::net::TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap();
conn.set_recv_buffer_size(1024*1024).unwrap();
use openssl::ssl::{SslConnector, SslMethod};
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
let conn =
tokio_openssl::connect(
ssl_connector_builder.build().configure()?,
"localhost",
conn,
)
let ssl = ssl_connector_builder
.build()
.configure()?
.into_ssl("localhost")?;
let conn = tokio_openssl::SslStream::new(ssl, conn)?;
let mut conn = Box::pin(conn);
conn.as_mut()
.connect()
.await
.map_err(|err| format_err!("connect failed - {}", err))?;
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024*1024*1024)
.initial_window_size(1024*1024*1024)
.max_frame_size(4*1024*1024)
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)
.handshake(conn)
.await?;
// Spawn a task to run the conn...
tokio::spawn(async move {
if let Err(e) = h2.await {
println!("GOT ERR={:?}", e);
if let Err(err) = h2.await {
println!("GOT ERR={:?}", err);
}
});
let mut bytes = 0;
for _ in 0..100 {
match send_request(client.clone()).await {
Ok(b) => {
bytes += b;
}
Err(e) => {
println!("ERROR {}", e);
return Ok(());
}
}
for _ in 0..2000 {
bytes += send_request(client.clone()).await?;
}
let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
println!(
"Downloaded {} bytes, {} MB/s",
bytes,
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
);
Ok(())
}

View File

@ -2,14 +2,12 @@ use std::sync::Arc;
use anyhow::{format_err, Error};
use futures::*;
use hyper::{Request, Response, Body};
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
use hyper::{Body, Request, Response};
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use tokio::net::{TcpListener, TcpStream};
use proxmox_backup::configdir;
// Simple H2 server to test H2 speed with h2s-client.rs
fn main() -> Result<(), Error> {
proxmox_backup::tools::runtime::main(run())
}
@ -19,38 +17,38 @@ async fn run() -> Result<(), Error> {
let cert_path = configdir!("/proxy.pem");
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
acceptor
.set_private_key_file(key_path, SslFiletype::PEM)
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
acceptor.set_certificate_chain_file(cert_path)
acceptor
.set_certificate_chain_file(cert_path)
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
acceptor.check_private_key().unwrap();
let acceptor = Arc::new(acceptor.build());
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
println!("listening on {:?}", listener.local_addr());
loop {
let (socket, _addr) = listener.accept().await?;
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor))
.map(|res| {
if let Err(err) = res {
eprintln!("Error: {}", err);
}
}));
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor)).map(|res| {
if let Err(err) = res {
eprintln!("Error: {}", err);
}
}));
}
}
async fn handle_connection(
socket: TcpStream,
acceptor: Arc<SslAcceptor>,
) -> Result<(), Error> {
async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Result<(), Error> {
socket.set_nodelay(true).unwrap();
socket.set_send_buffer_size(1024*1024).unwrap();
socket.set_recv_buffer_size(1024*1024).unwrap();
let socket = tokio_openssl::accept(acceptor.as_ref(), socket).await?;
let ssl = openssl::ssl::Ssl::new(acceptor.context())?;
let stream = tokio_openssl::SslStream::new(ssl, socket)?;
let mut stream = Box::pin(stream);
stream.as_mut().accept().await?;
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
@ -61,7 +59,7 @@ async fn handle_connection(
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A,A...]
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
let body = Body::from(buffer);
let response = Response::builder()
@ -72,7 +70,7 @@ async fn handle_connection(
future::ok::<_, Error>(response)
});
http.serve_connection(socket, service)
http.serve_connection(stream, service)
.map_err(Error::from)
.await?;

View File

@ -1,51 +1,55 @@
use anyhow::{Error};
use anyhow::Error;
use futures::*;
use hyper::{Body, Request, Response};
// Simple H2 server to test H2 speed with h2client.rs
use tokio::net::TcpListener;
use tokio::io::{AsyncRead, AsyncWrite};
use proxmox_backup::client::pipe_to_stream::PipeToSendStream;
use tokio::net::{TcpListener, TcpStream};
fn main() -> Result<(), Error> {
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
println!("listening on {:?}", listener.local_addr());
loop {
let (socket, _addr) = listener.accept().await?;
tokio::spawn(handle_connection(socket)
.map(|res| {
if let Err(err) = res {
eprintln!("Error: {}", err);
}
}));
tokio::spawn(handle_connection(socket).map(|res| {
if let Err(err) = res {
eprintln!("Error: {}", err);
}
}));
}
}
async fn handle_connection<T: AsyncRead + AsyncWrite + Unpin>(socket: T) -> Result<(), Error> {
let mut conn = h2::server::handshake(socket).await?;
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
socket.set_nodelay(true).unwrap();
println!("H2 connection bound");
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
while let Some((request, mut respond)) = conn.try_next().await? {
println!("GOT request: {:?}", request);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
let body = Body::from(buffer);
let response = http::Response::builder()
let response = Response::builder()
.status(http::StatusCode::OK)
.body(())
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)
});
let send = respond.send_response(response, false).unwrap();
let data = vec![65u8; 1024*1024];
PipeToSendStream::new(bytes::Bytes::from(data), send).await?;
println!("DATA SENT");
}
http.serve_connection(socket, service)
.map_err(Error::from)
.await?;
println!("H2 connection CLOSE !");
Ok(())
}

View File

@ -1,6 +1,6 @@
use anyhow::{Error};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::*;
async fn upload_speed() -> Result<f64, Error> {
@ -8,13 +8,13 @@ async fn upload_speed() -> Result<f64, Error> {
let host = "localhost";
let datastore = "store2";
let username = Userid::root_userid();
let auth_id = Authid::root_auth_id();
let options = HttpClientOptions::new()
let options = HttpClientOptions::default()
.interactive(true)
.ticket_cache(true);
let client = HttpClient::new(host, 8007, username, options)?;
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox::tools::time::epoch_i64();

View File

@ -1,3 +1,5 @@
//! The Proxmox Backup Server API
pub mod access;
pub mod admin;
pub mod backup;
@ -9,6 +11,7 @@ pub mod types;
pub mod version;
pub mod ping;
pub mod pull;
pub mod tape;
mod helpers;
use proxmox::api::router::SubdirMap;
@ -17,7 +20,7 @@ use proxmox::list_subdirs_api_method;
const NODES_ROUTER: Router = Router::new().match_all("node", &node::ROUTER);
pub const SUBDIRS: SubdirMap = &[
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),
("admin", &admin::ROUTER),
("backup", &backup::ROUTER),
@ -27,6 +30,7 @@ pub const SUBDIRS: SubdirMap = &[
("pull", &pull::ROUTER),
("reader", &reader::ROUTER),
("status", &status::ROUTER),
("tape", &tape::ROUTER),
("version", &version::ROUTER),
];

View File

@ -1,46 +1,69 @@
//! Access control (Users, Permissions and Authentication)
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::collections::HashSet;
use proxmox::api::{api, RpcEnvironment, Permission};
use proxmox::api::router::{Router, SubdirMap};
use proxmox::{sortable, identity};
use proxmox::api::{api, Permission, RpcEnvironment};
use proxmox::{http_err, list_subdirs_api_method};
use proxmox::{identity, sortable};
use crate::tools::ticket::{self, Empty, Ticket};
use crate::auth_helpers::*;
use crate::api2::types::*;
use crate::tools::{FileLogOptions, FileLogger};
use crate::auth_helpers::*;
use crate::server::ticket::ApiTicket;
use crate::tools::ticket::{self, Empty, Ticket};
use crate::config::acl as acl_config;
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
use crate::config::tfa::TfaChallenge;
pub mod user;
pub mod domain;
pub mod acl;
pub mod domain;
pub mod role;
pub mod tfa;
pub mod user;
#[allow(clippy::large_enum_variant)]
enum AuthResult {
/// Successful authentication which does not require a new ticket.
Success,
/// Successful authentication which requires a ticket to be created.
CreateTicket,
/// A partial ticket which requires a 2nd factor will be created.
Partial(TfaChallenge),
}
/// returns Ok(true) if a ticket has to be created
/// and Ok(false) if not
fn authenticate_user(
userid: &Userid,
password: &str,
path: Option<String>,
privs: Option<String>,
port: Option<u16>,
) -> Result<bool, Error> {
tfa_challenge: Option<String>,
) -> Result<AuthResult, Error> {
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&userid) {
let auth_id = Authid::from(userid.clone());
if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
if let Some(tfa_challenge) = tfa_challenge {
return authenticate_2nd(userid, &tfa_challenge, password);
}
if password.starts_with("PBS:") {
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
{
if *userid == ticket_userid {
return Ok(true);
return Ok(AuthResult::CreateTicket);
}
bail!("ticket login failed - wrong userid");
}
@ -50,17 +73,17 @@ fn authenticate_user(
}
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
let privilege_name = privs
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
let privilege_name =
privs.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
if let Ok(Empty) = Ticket::parse(password)
.and_then(|ticket| ticket.verify(
if let Ok(Empty) = Ticket::parse(password).and_then(|ticket| {
ticket.verify(
public_auth_key(),
ticket::TERM_PREFIX,
Some(&ticket::term_aad(userid, &path, port)),
))
{
)
}) {
for (name, privilege) in PRIVILEGES {
if *name == privilege_name {
let mut path_vec = Vec::new();
@ -69,9 +92,8 @@ fn authenticate_user(
path_vec.push(part);
}
}
user_info.check_privs(userid, &path_vec, *privilege, false)?;
return Ok(false);
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
return Ok(AuthResult::Success);
}
}
@ -79,8 +101,26 @@ fn authenticate_user(
}
}
let _ = crate::auth::authenticate_user(userid, password)?;
Ok(true)
let _: () = crate::auth::authenticate_user(userid, password)?;
Ok(match crate::config::tfa::login_challenge(userid)? {
None => AuthResult::CreateTicket,
Some(challenge) => AuthResult::Partial(challenge),
})
}
fn authenticate_2nd(
userid: &Userid,
challenge_ticket: &str,
response: &str,
) -> Result<AuthResult, Error> {
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
.require_partial()?;
let _: () = crate::config::tfa::verify_challenge(userid, &challenge, response.parse()?)?;
Ok(AuthResult::CreateTicket)
}
#[api(
@ -107,6 +147,11 @@ fn authenticate_user(
description: "Port for verifying terminal tickets.",
optional: true,
},
"tfa-challenge": {
type: String,
description: "The signed TFA challenge string the user wants to respond to.",
optional: true,
},
},
},
returns: {
@ -121,7 +166,9 @@ fn authenticate_user(
},
CSRFPreventionToken: {
type: String,
description: "Cross Site Request Forgery Prevention Token.",
description:
"Cross Site Request Forgery Prevention Token. \
For partial tickets this is the string \"invalid\".",
},
},
},
@ -133,28 +180,24 @@ fn authenticate_user(
/// Create or verify authentication ticket.
///
/// Returns: An authentication ticket with additional infos.
fn create_ticket(
pub fn create_ticket(
username: Userid,
password: String,
path: Option<String>,
privs: Option<String>,
port: Option<u16>,
tfa_challenge: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let logger_options = FileLogOptions {
append: true,
prefix_time: true,
..Default::default()
};
let mut auth_log = FileLogger::new("/var/log/proxmox-backup/api/auth.log", logger_options)?;
match authenticate_user(&username, &password, path, privs, port) {
Ok(true) => {
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
Ok(AuthResult::Success) => Ok(json!({ "username": username })),
Ok(AuthResult::CreateTicket) => {
let api_ticket = ApiTicket::full(username.clone());
let ticket = Ticket::new("PBS", &api_ticket)?.sign(private_auth_key(), None)?;
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
auth_log.log(format!("successful auth for user '{}'", username));
crate::server::rest::auth_logger()?
.log(format!("successful auth for user '{}'", username));
Ok(json!({
"username": username,
@ -162,9 +205,16 @@ fn create_ticket(
"CSRFPreventionToken": token,
}))
}
Ok(false) => Ok(json!({
"username": username,
})),
Ok(AuthResult::Partial(challenge)) => {
let api_ticket = ApiTicket::partial(challenge);
let ticket = Ticket::new("PBS", &api_ticket)?
.sign(private_auth_key(), Some(username.as_str()))?;
Ok(json!({
"username": username,
"ticket": ticket,
"CSRFPreventionToken": "invalid",
}))
}
Err(err) => {
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
Some(ip) => format!("{}", ip),
@ -177,7 +227,7 @@ fn create_ticket(
username,
err.to_string()
);
auth_log.log(&msg);
crate::server::rest::auth_logger()?.log(&msg);
log::error!("{}", msg);
Err(http_err!(UNAUTHORIZED, "permission check failed."))
@ -186,6 +236,7 @@ fn create_ticket(
}
#[api(
protected: true,
input: {
properties: {
userid: {
@ -197,35 +248,42 @@ fn create_ticket(
},
},
access: {
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
description: "Everybody is allowed to change their own password. In addition, users with 'Permissions:Modify' privilege may change any password on @pbs realm.",
permission: &Permission::Anybody,
},
)]
/// Change user password
///
/// Each user is allowed to change his own password. Superuser
/// can change all passwords.
fn change_password(
pub fn change_password(
userid: Userid,
password: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let current_user: Userid = rpcenv
.get_user()
.ok_or_else(|| format_err!("unknown user"))?
let current_auth: Authid = rpcenv
.get_auth_id()
.ok_or_else(|| format_err!("no authid available"))?
.parse()?;
let mut allowed = userid == current_user;
if current_auth.is_token() {
bail!("API tokens cannot access this API endpoint");
}
if userid == "root@pam" { allowed = true; }
let current_user = current_auth.user();
let mut allowed = userid == *current_user;
if !allowed {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&current_user, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}
let privs = user_info.lookup_privs(&current_auth, &[]);
if user_info.is_superuser(&current_auth) {
allowed = true;
}
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 && userid.realm() != "pam" {
allowed = true;
}
};
if !allowed {
bail!("you are not authorized to change the password.");
@ -237,20 +295,138 @@ fn change_password(
Ok(Value::Null)
}
#[api(
input: {
properties: {
"auth-id": {
type: Authid,
optional: true,
},
path: {
schema: ACL_PATH_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Sys.Audit on '/access', limited to own privileges otherwise.",
},
returns: {
description: "Map of ACL path to Map of privilege to propagate bit",
type: Object,
properties: {},
additional_properties: true,
},
)]
/// List permissions of given or currently authenticated user / API token.
///
/// Optionally limited to specific path.
pub fn list_permissions(
auth_id: Option<Authid>,
path: Option<String>,
rpcenv: &dyn RpcEnvironment,
) -> Result<HashMap<String, HashMap<String, bool>>, Error> {
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&current_auth_id, &["access"]);
let auth_id = match auth_id {
Some(auth_id) if auth_id == current_auth_id => current_auth_id,
Some(auth_id) => {
if user_privs & PRIV_SYS_AUDIT != 0
|| (auth_id.is_token()
&& !current_auth_id.is_token()
&& auth_id.user() == current_auth_id.user())
{
auth_id
} else {
bail!("not allowed to list permissions of {}", auth_id);
}
},
None => current_auth_id,
};
fn populate_acl_paths(
mut paths: HashSet<String>,
node: acl_config::AclTreeNode,
path: &str,
) -> HashSet<String> {
for (sub_path, child_node) in node.children {
let sub_path = format!("{}/{}", path, &sub_path);
paths = populate_acl_paths(paths, child_node, &sub_path);
paths.insert(sub_path);
}
paths
}
let paths = match path {
Some(path) => {
let mut paths = HashSet::new();
paths.insert(path);
paths
}
None => {
let mut paths = HashSet::new();
let (acl_tree, _) = acl_config::config()?;
paths = populate_acl_paths(paths, acl_tree.root, "");
// default paths, returned even if no ACL exists
paths.insert("/".to_string());
paths.insert("/access".to_string());
paths.insert("/datastore".to_string());
paths.insert("/remote".to_string());
paths.insert("/system".to_string());
paths
}
};
let map = paths.into_iter().fold(
HashMap::new(),
|mut map: HashMap<String, HashMap<String, bool>>, path: String| {
let split_path = acl_config::split_acl_path(path.as_str());
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
match privs {
0 => map, // Don't leak ACL paths where we don't have any privileges
_ => {
let priv_map =
PRIVILEGES
.iter()
.fold(HashMap::new(), |mut priv_map, (name, value)| {
if value & privs != 0 {
priv_map
.insert(name.to_string(), value & propagated_privs != 0);
}
priv_map
});
map.insert(path, priv_map);
map
}
}
},
);
Ok(map)
}
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
("acl", &acl::ROUTER),
("password", &Router::new().put(&API_METHOD_CHANGE_PASSWORD)),
(
"password", &Router::new()
.put(&API_METHOD_CHANGE_PASSWORD)
),
(
"ticket", &Router::new()
.post(&API_METHOD_CREATE_TICKET)
"permissions",
&Router::new().get(&API_METHOD_LIST_PERMISSIONS)
),
("ticket", &Router::new().post(&API_METHOD_CREATE_TICKET)),
("domains", &domain::ROUTER),
("roles", &role::ROUTER),
("users", &user::ROUTER),
("tfa", &tfa::ROUTER),
]);
pub const ROUTER: Router = Router::new()

View File

@ -1,5 +1,6 @@
//! Manage Access Control Lists
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::tools::fs::open_file_locked;
@ -7,44 +8,30 @@ use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::acl;
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize)]
/// ACL list entry.
pub struct AclListItem {
path: String,
ugid: String,
ugid_type: String,
propagate: bool,
roleid: String,
}
use crate::config::cached_user_info::CachedUserInfo;
fn extract_acl_node_data(
node: &acl::AclTreeNode,
path: &str,
list: &mut Vec<AclListItem>,
exact: bool,
token_user: &Option<Authid>,
) {
// tokens can't have tokens, so we can early return
if let Some(token_user) = token_user {
if token_user.is_token() {
return;
}
}
for (user, roles) in &node.users {
if let Some(token_user) = token_user {
if !user.is_token()
|| user.user() != token_user.user() {
continue;
}
}
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
@ -56,6 +43,10 @@ fn extract_acl_node_data(
}
}
for (group, roles) in &node.groups {
if token_user.is_some() {
continue;
}
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
@ -71,7 +62,7 @@ fn extract_acl_node_data(
}
for (comp, child) in &node.children {
let new_path = format!("{}/{}", path, comp);
extract_acl_node_data(child, &new_path, list, exact);
extract_acl_node_data(child, &new_path, list, exact, token_user);
}
}
@ -98,7 +89,8 @@ fn extract_acl_node_data(
}
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
permission: &Permission::Anybody,
description: "Returns all ACLs if user has Sys.Audit on '/access/acl', or just the ACLs containing the user's API tokens.",
},
)]
/// Read Access Control List (ACLs).
@ -107,18 +99,26 @@ pub fn read_acl(
exact: bool,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<AclListItem>, Error> {
let auth_id = rpcenv.get_auth_id().unwrap().parse()?;
//let auth_user = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "acl"]);
let auth_id_filter = if (top_level_privs & PRIV_SYS_AUDIT) == 0 {
Some(auth_id)
} else {
None
};
let (mut tree, digest) = acl::config()?;
let mut list: Vec<AclListItem> = Vec::new();
if let Some(path) = &path {
if let Some(node) = &tree.find_node(path) {
extract_acl_node_data(&node, path, &mut list, exact);
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
}
} else {
extract_acl_node_data(&tree.root, "", &mut list, exact);
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
@ -140,9 +140,9 @@ pub fn read_acl(
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
userid: {
"auth-id": {
optional: true,
type: Userid,
type: Authid,
},
group: {
optional: true,
@ -160,20 +160,45 @@ pub fn read_acl(
},
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
permission: &Permission::Anybody,
description: "Requires Permissions.Modify on '/access/acl', limited to updating ACLs of the user's API tokens otherwise."
},
)]
/// Update Access Control List (ACLs).
#[allow(clippy::too_many_arguments)]
pub fn update_acl(
path: String,
role: String,
propagate: Option<bool>,
userid: Option<Userid>,
auth_id: Option<Authid>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&current_auth_id, &["access", "acl"]);
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
if group.is_some() {
bail!("Unprivileged users are not allowed to create group ACL item.");
}
match &auth_id {
Some(auth_id) => {
if current_auth_id.is_token() {
bail!("Unprivileged API tokens can't set ACL items.");
} else if !auth_id.is_token() {
bail!("Unprivileged users can only set ACL items for API tokens.");
} else if auth_id.user() != current_auth_id.user() {
bail!("Unprivileged users can only set ACL items for their own API tokens.");
}
},
None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
};
}
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -190,11 +215,12 @@ pub fn update_acl(
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref userid) = userid {
} else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
if user_cfg.sections.get(&userid.to_string()).is_none() {
bail!("no such user.");
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
bail!(format!("no such {}.",
if auth_id.is_token() { "API token" } else { "user" }));
}
}
} else {
@ -205,11 +231,11 @@ pub fn update_acl(
acl::check_acl_path(&path)?;
}
if let Some(userid) = userid {
if let Some(auth_id) = auth_id {
if delete {
tree.delete_user_role(&path, &userid, &role);
tree.delete_user_role(&path, &auth_id, &role);
} else {
tree.insert_user_role(&path, &userid, &role, propagate);
tree.insert_user_role(&path, &auth_id, &role, propagate);
}
} else if let Some(group) = group {
if delete {

View File

@ -1,3 +1,5 @@
//! List Authentication domains/realms
use anyhow::{Error};
use serde_json::{json, Value};

View File

@ -1,3 +1,5 @@
//! Manage Roles with privileges
use anyhow::Error;
use serde_json::{json, Value};
@ -46,7 +48,7 @@ fn list_roles() -> Result<Value, Error> {
let mut priv_list = Vec::new();
for (name, privilege) in PRIVILEGES.iter() {
if privs & privilege > 0 {
priv_list.push(name.clone());
priv_list.push(name);
}
}
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));

594
src/api2/access/tfa.rs Normal file
View File

@ -0,0 +1,594 @@
//! Two Factor Authentication
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use proxmox::api::{api, Permission, Router, RpcEnvironment};
use proxmox::tools::tfa::totp::Totp;
use proxmox::{http_bail, http_err};
use crate::api2::types::{Authid, Userid, PASSWORD_SCHEMA};
use crate::config::acl::{PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::tfa::{TfaInfo, TfaUserData};
/// Perform first-factor (password) authentication only. Ignore password for the root user.
/// Otherwise check the current user's password.
///
/// This means that user admins need to type in their own password while editing a user, and
/// regular users, which can only change their own TFA settings (checked at the API level), can
/// change their own settings using their own password.
fn tfa_update_auth(
rpcenv: &mut dyn RpcEnvironment,
userid: &Userid,
password: Option<String>,
must_exist: bool,
) -> Result<(), Error> {
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if authid.user() != Userid::root_userid() {
let password = password.ok_or_else(|| http_err!(UNAUTHORIZED, "missing password"))?;
let _: () = crate::auth::authenticate_user(authid.user(), &password)
.map_err(|err| http_err!(UNAUTHORIZED, "{}", err))?;
}
// After authentication, verify that the to-be-modified user actually exists:
if must_exist && authid.user() != userid {
let (config, _digest) = crate::config::user::config()?;
if config
.lookup::<crate::config::user::User>("user", userid.as_str())
.is_err()
{
http_bail!(UNAUTHORIZED, "user '{}' does not exists.", userid);
}
}
Ok(())
}
#[api]
/// A TFA entry type.
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
enum TfaType {
/// A TOTP entry type.
Totp,
/// A U2F token entry.
U2f,
/// A Webauthn token entry.
Webauthn,
/// Recovery tokens.
Recovery,
}
#[api(
properties: {
type: { type: TfaType },
info: { type: TfaInfo },
},
)]
/// A TFA entry for a user.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
struct TypedTfaInfo {
#[serde(rename = "type")]
pub ty: TfaType,
#[serde(flatten)]
pub info: TfaInfo,
}
fn to_data(data: TfaUserData) -> Vec<TypedTfaInfo> {
let mut out = Vec::with_capacity(
data.totp.len()
+ data.u2f.len()
+ data.webauthn.len()
+ if data.recovery().is_some() { 1 } else { 0 },
);
if let Some(recovery) = data.recovery() {
out.push(TypedTfaInfo {
ty: TfaType::Recovery,
info: TfaInfo::recovery(recovery.created),
})
}
for entry in data.totp {
out.push(TypedTfaInfo {
ty: TfaType::Totp,
info: entry.info,
});
}
for entry in data.webauthn {
out.push(TypedTfaInfo {
ty: TfaType::Webauthn,
info: entry.info,
});
}
for entry in data.u2f {
out.push(TypedTfaInfo {
ty: TfaType::U2f,
info: entry.info,
});
}
out
}
/// Iterate through tuples of `(type, index, id)`.
fn tfa_id_iter(data: &TfaUserData) -> impl Iterator<Item = (TfaType, usize, &str)> {
data.totp
.iter()
.enumerate()
.map(|(i, entry)| (TfaType::Totp, i, entry.info.id.as_str()))
.chain(
data.webauthn
.iter()
.enumerate()
.map(|(i, entry)| (TfaType::Webauthn, i, entry.info.id.as_str())),
)
.chain(
data.u2f
.iter()
.enumerate()
.map(|(i, entry)| (TfaType::U2f, i, entry.info.id.as_str())),
)
.chain(
data.recovery
.iter()
.map(|_| (TfaType::Recovery, 0, "recovery")),
)
}
#[api(
protected: true,
input: {
properties: { userid: { type: Userid } },
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Add a TOTP secret to the user.
fn list_user_tfa(userid: Userid) -> Result<Vec<TypedTfaInfo>, Error> {
let _lock = crate::config::tfa::read_lock()?;
Ok(match crate::config::tfa::read()?.users.remove(&userid) {
Some(data) => to_data(data),
None => Vec::new(),
})
}
#[api(
protected: true,
input: {
properties: {
userid: { type: Userid },
id: { description: "the tfa entry id" }
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Get a single TFA entry.
fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
let _lock = crate::config::tfa::read_lock()?;
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
match {
// scope to prevent the temprary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index))
} {
Some((TfaType::Recovery, _)) => {
if let Some(recovery) = user_data.recovery() {
return Ok(TypedTfaInfo {
ty: TfaType::Recovery,
info: TfaInfo::recovery(recovery.created),
});
}
}
Some((TfaType::Totp, index)) => {
return Ok(TypedTfaInfo {
ty: TfaType::Totp,
// `into_iter().nth()` to *move* out of it
info: user_data.totp.into_iter().nth(index).unwrap().info,
});
}
Some((TfaType::Webauthn, index)) => {
return Ok(TypedTfaInfo {
ty: TfaType::Webauthn,
info: user_data.webauthn.into_iter().nth(index).unwrap().info,
});
}
Some((TfaType::U2f, index)) => {
return Ok(TypedTfaInfo {
ty: TfaType::U2f,
info: user_data.u2f.into_iter().nth(index).unwrap().info,
});
}
None => (),
}
}
http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id);
}
#[api(
protected: true,
input: {
properties: {
userid: { type: Userid },
id: {
description: "the tfa entry id",
},
password: {
schema: PASSWORD_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Get a single TFA entry.
fn delete_tfa(
userid: Userid,
id: String,
password: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
tfa_update_auth(rpcenv, &userid, password, false)?;
let _lock = crate::config::tfa::write_lock()?;
let mut data = crate::config::tfa::read()?;
let user_data = data
.users
.get_mut(&userid)
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
match {
// scope to prevent the temprary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index))
} {
Some((TfaType::Recovery, _)) => user_data.recovery = None,
Some((TfaType::Totp, index)) => drop(user_data.totp.remove(index)),
Some((TfaType::Webauthn, index)) => drop(user_data.webauthn.remove(index)),
Some((TfaType::U2f, index)) => drop(user_data.u2f.remove(index)),
None => http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id),
}
if user_data.is_empty() {
data.users.remove(&userid);
}
crate::config::tfa::write(&data)?;
Ok(())
}
#[api(
properties: {
"userid": { type: Userid },
"entries": {
type: Array,
items: { type: TypedTfaInfo },
},
},
)]
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
/// Over the API we only provide the descriptions for TFA data.
struct TfaUser {
/// The user this entry belongs to.
userid: Userid,
/// TFA entries.
entries: Vec<TypedTfaInfo>,
}
#[api(
protected: true,
input: {
properties: {},
},
access: {
permission: &Permission::Anybody,
description: "Returns all or just the logged-in user, depending on privileges.",
},
returns: {
description: "The list tuples of user and TFA entries.",
type: Array,
items: { type: TfaUser }
},
)]
/// List user TFA configuration.
fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&authid, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let _lock = crate::config::tfa::read_lock()?;
let tfa_data = crate::config::tfa::read()?.users;
let mut out = Vec::<TfaUser>::new();
if top_level_allowed {
for (user, data) in tfa_data {
out.push(TfaUser {
userid: user,
entries: to_data(data),
});
}
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
out.push(TfaUser {
userid: authid.into(),
entries: to_data(data),
});
}
Ok(out)
}
#[api(
properties: {
recovery: {
description: "A list of recovery codes as integers.",
type: Array,
items: {
type: Integer,
description: "A one-time usable recovery code entry.",
},
},
},
)]
/// The result returned when adding TFA entries to a user.
#[derive(Default, Serialize)]
struct TfaUpdateInfo {
/// The id if a newly added TFA entry.
id: Option<String>,
/// When adding u2f entries, this contains a challenge the user must respond to in order to
/// finish the registration.
#[serde(skip_serializing_if = "Option::is_none")]
challenge: Option<String>,
/// When adding recovery codes, this contains the list of codes to be displayed to the user
/// this one time.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
recovery: Vec<String>,
}
impl TfaUpdateInfo {
fn id(id: String) -> Self {
Self {
id: Some(id),
..Default::default()
}
}
}
#[api(
protected: true,
input: {
properties: {
userid: { type: Userid },
description: {
description: "A description to distinguish multiple entries from one another",
type: String,
max_length: 255,
optional: true,
},
"type": { type: TfaType },
totp: {
description: "A totp URI.",
optional: true,
},
value: {
description:
"The current value for the provided totp URI, or a Webauthn/U2F challenge response",
optional: true,
},
challenge: {
description: "When responding to a u2f challenge: the original challenge string",
optional: true,
},
password: {
schema: PASSWORD_SCHEMA,
optional: true,
},
},
},
returns: { type: TfaUpdateInfo },
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Add a TFA entry to the user.
#[allow(clippy::too_many_arguments)]
fn add_tfa_entry(
userid: Userid,
description: Option<String>,
totp: Option<String>,
value: Option<String>,
challenge: Option<String>,
password: Option<String>,
r#type: TfaType,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<TfaUpdateInfo, Error> {
tfa_update_auth(rpcenv, &userid, password, true)?;
let need_description =
move || description.ok_or_else(|| format_err!("'description' is required for new entries"));
match r#type {
TfaType::Totp => match (totp, value) {
(Some(totp), Some(value)) => {
if challenge.is_some() {
bail!("'challenge' parameter is invalid for 'totp' entries");
}
let description = need_description()?;
let totp: Totp = totp.parse()?;
if totp
.verify(&value, std::time::SystemTime::now(), -1..=1)?
.is_none()
{
bail!("failed to verify TOTP challenge");
}
crate::config::tfa::add_totp(&userid, description, totp).map(TfaUpdateInfo::id)
}
_ => bail!("'totp' type requires both 'totp' and 'value' parameters"),
},
TfaType::Webauthn => {
if totp.is_some() {
bail!("'totp' parameter is invalid for 'totp' entries");
}
match challenge {
None => crate::config::tfa::add_webauthn_registration(&userid, need_description()?)
.map(|c| TfaUpdateInfo {
challenge: Some(c),
..Default::default()
}),
Some(challenge) => {
let value = value.ok_or_else(|| {
format_err!(
"missing 'value' parameter (webauthn challenge response missing)"
)
})?;
crate::config::tfa::finish_webauthn_registration(&userid, &challenge, &value)
.map(TfaUpdateInfo::id)
}
}
}
TfaType::U2f => {
if totp.is_some() {
bail!("'totp' parameter is invalid for 'totp' entries");
}
match challenge {
None => crate::config::tfa::add_u2f_registration(&userid, need_description()?).map(
|c| TfaUpdateInfo {
challenge: Some(c),
..Default::default()
},
),
Some(challenge) => {
let value = value.ok_or_else(|| {
format_err!("missing 'value' parameter (u2f challenge response missing)")
})?;
crate::config::tfa::finish_u2f_registration(&userid, &challenge, &value)
.map(TfaUpdateInfo::id)
}
}
}
TfaType::Recovery => {
if totp.or(value).or(challenge).is_some() {
bail!("generating recovery tokens does not allow additional parameters");
}
let recovery = crate::config::tfa::add_recovery(&userid)?;
Ok(TfaUpdateInfo {
id: Some("recovery".to_string()),
recovery,
..Default::default()
})
}
}
}
#[api(
protected: true,
input: {
properties: {
userid: { type: Userid },
id: {
description: "the tfa entry id",
},
description: {
description: "A description to distinguish multiple entries from one another",
type: String,
max_length: 255,
optional: true,
},
enable: {
description: "Whether this entry should currently be enabled or disabled",
optional: true,
},
password: {
schema: PASSWORD_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Update user's TFA entry description.
fn update_tfa_entry(
userid: Userid,
id: String,
description: Option<String>,
enable: Option<bool>,
password: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
tfa_update_auth(rpcenv, &userid, password, true)?;
let _lock = crate::config::tfa::write_lock()?;
let mut data = crate::config::tfa::read()?;
let mut entry = data
.users
.get_mut(&userid)
.and_then(|user| user.find_entry_mut(&id))
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
if let Some(description) = description {
entry.description = description;
}
if let Some(enable) = enable {
entry.enable = enable;
}
crate::config::tfa::write(&data)?;
Ok(())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TFA)
.match_all("userid", &USER_ROUTER);
const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_USER_TFA)
.post(&API_METHOD_ADD_TFA_ENTRY)
.match_all("id", &ITEM_ROUTER);
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_TFA_ENTRY)
.put(&API_METHOD_UPDATE_TFA_ENTRY)
.delete(&API_METHOD_DELETE_TFA);

View File

@ -1,12 +1,18 @@
use anyhow::{bail, Error};
use serde_json::Value;
//! User Management
use anyhow::{bail, format_err, Error};
use serde::{Serialize, Deserialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::{Schema, StringSchema};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::config::user;
use crate::config::token_shadow;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
@ -16,44 +22,160 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: user::ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: user::EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: user::FIRST_NAME_SCHEMA,
},
lastname: {
schema: user::LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: user::EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: user::ApiToken
},
},
}
)]
#[derive(Serialize,Deserialize)]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub tokens: Vec<user::ApiToken>,
}
impl UserWithTokens {
fn new(user: user::User) -> Self {
Self {
userid: user.userid,
comment: user.comment,
enable: user.enable,
expire: user.expire,
firstname: user.firstname,
lastname: user.lastname,
email: user.email,
tokens: Vec::new(),
}
}
}
#[api(
input: {
properties: {},
properties: {
include_tokens: {
type: bool,
description: "Include user's API tokens in returned list.",
optional: true,
default: false,
},
},
},
returns: {
description: "List users (with config digest).",
type: Array,
items: { type: user::User },
items: { type: UserWithTokens },
},
access: {
permission: &Permission::Anybody,
description: "Returns all or just the logged-in user, depending on privileges.",
description: "Returns all or just the logged-in user (/API token owner), depending on privileges.",
},
)]
/// List users
pub fn list_users(
_param: Value,
include_tokens: bool,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<user::User>, Error> {
) -> Result<Vec<UserWithTokens>, Error> {
let (config, digest) = user::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv
.get_auth_id()
.ok_or_else(|| format_err!("no authid available"))?
.parse()?;
let userid = auth_id.user();
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {
top_level_allowed || user.userid == userid
top_level_allowed || user.userid == *userid
};
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list.into_iter().filter(filter_by_privs).collect())
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
let tokens: Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
let mut user_to_tokens = tokens
.into_iter()
.fold(
HashMap::new(),
|mut map: HashMap<Userid, Vec<user::ApiToken>>, token: user::ApiToken| {
if token.tokenid.is_token() {
map
.entry(token.tokenid.user().clone())
.or_default()
.push(token);
}
map
});
iter
.map(|user: user::User| {
let mut user = UserWithTokens::new(user);
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
user
})
.collect()
} else {
iter.map(UserWithTokens::new)
.collect()
};
Ok(list)
}
#[api(
@ -98,7 +220,11 @@ pub fn list_users(
},
)]
/// Create new user.
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
pub fn create_user(
password: Option<String>,
param: Value,
rpcenv: &mut dyn RpcEnvironment
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -106,17 +232,25 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
let (mut config, _digest) = user::config()?;
if let Some(_) = config.sections.get(user.userid.as_str()) {
if config.sections.get(user.userid.as_str()).is_some() {
bail!("user '{}' already exists.", user.userid);
}
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
config.set_data(user.userid.as_str(), "user", &user)?;
let realm = user.userid.realm();
// Fails if realm does not exist!
let authenticator = crate::auth::lookup_authenticator(realm)?;
user::save_config(&config)?;
if let Some(password) = password {
let user_info = CachedUserInfo::new()?;
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if realm == "pam" && !user_info.is_superuser(&current_auth_id) {
bail!("only superuser can edit pam credentials!");
}
authenticator.store_password(user.userid.name(), &password)?;
}
@ -131,10 +265,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
},
},
},
returns: {
description: "The user configuration (with config digest).",
type: user::User,
},
returns: { type: user::User },
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
@ -150,6 +281,21 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
Ok(user)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[allow(non_camel_case_types)]
pub enum DeletableProperty {
/// Delete the comment property.
comment,
/// Delete the firstname property.
firstname,
/// Delete the lastname property.
lastname,
/// Delete the email property.
email,
}
#[api(
protected: true,
input: {
@ -185,6 +331,14 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
schema: user::EMAIL_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
@ -199,6 +353,7 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
},
)]
/// Update user configuration.
#[allow(clippy::too_many_arguments)]
pub fn update_user(
userid: Userid,
comment: Option<String>,
@ -208,7 +363,9 @@ pub fn update_user(
firstname: Option<String>,
lastname: Option<String>,
email: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
@ -222,6 +379,17 @@ pub fn update_user(
let mut data: user::User = config.lookup("user", userid.as_str())?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => data.comment = None,
DeletableProperty::firstname => data.firstname = None,
DeletableProperty::lastname => data.lastname = None,
DeletableProperty::email => data.email = None,
}
}
}
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
@ -240,6 +408,13 @@ pub fn update_user(
}
if let Some(password) = password {
let user_info = CachedUserInfo::new()?;
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let self_service = current_auth_id.user() == &userid;
let target_realm = userid.realm();
if !self_service && target_realm == "pam" && !user_info.is_superuser(&current_auth_id) {
bail!("only superuser can edit pam credentials!");
}
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
authenticator.store_password(userid.name(), &password)?;
}
@ -285,6 +460,7 @@ pub fn update_user(
/// Remove a user from the configuration file.
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
let _tfa_lock = crate::config::tfa::write_lock()?;
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
@ -301,15 +477,353 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
user::save_config(&config)?;
match crate::config::tfa::read().and_then(|mut cfg| {
let _: bool = cfg.remove_user(&userid);
crate::config::tfa::write(&cfg)
}) {
Ok(()) => (),
Err(err) => {
eprintln!(
"error updating TFA config after deleting user {:?}: {}",
userid, err
);
}
}
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
#[api(
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
},
},
returns: { type: user::ApiToken },
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Read user's API token metadata
pub fn read_token(
userid: Userid,
tokenname: Tokenname,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<user::ApiToken, Error> {
let (config, digest) = user::config()?;
let tokenid = Authid::from((userid, Some(tokenname)));
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
config.lookup("token", &tokenid.to_string())
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
returns: {
description: "API token identifier + generated secret.",
properties: {
value: {
type: String,
description: "The API token secret",
},
tokenid: {
type: String,
description: "The API token identifier",
},
},
},
)]
/// Generate a new API token with given metadata
pub fn generate_token(
userid: Userid,
tokenname: Tokenname,
comment: Option<String>,
enable: Option<bool>,
expire: Option<i64>,
digest: Option<String>,
) -> Result<Value, Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
let tokenid_string = tokenid.to_string();
if config.sections.get(&tokenid_string).is_some() {
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
}
let secret = format!("{:x}", proxmox::tools::uuid::Uuid::generate());
token_shadow::set_secret(&tokenid, &secret)?;
let token = user::ApiToken {
tokenid,
comment,
enable,
expire,
};
config.set_data(&tokenid_string, "token", &token)?;
user::save_config(&config)?;
Ok(json!({
"tokenid": tokenid_string,
"value": secret
}))
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Update user's API token metadata
pub fn update_token(
userid: Userid,
tokenname: Tokenname,
comment: Option<String>,
enable: Option<bool>,
expire: Option<i64>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid, Some(tokenname)));
let tokenid_string = tokenid.to_string();
let mut data: user::ApiToken = config.lookup("token", &tokenid_string)?;
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
data.comment = None;
} else {
data.comment = Some(comment);
}
}
if let Some(enable) = enable {
data.enable = if enable { None } else { Some(false) };
}
if let Some(expire) = expire {
data.expire = if expire > 0 { Some(expire) } else { None };
}
config.set_data(&tokenid_string, "token", &data)?;
user::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
tokenname: {
type: Tokenname,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
},
)]
/// Delete a user's API token
pub fn delete_token(
userid: Userid,
tokenname: Tokenname,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
let tokenid_string = tokenid.to_string();
match config.sections.get(&tokenid_string) {
Some(_) => { config.sections.remove(&tokenid_string); },
None => bail!("token '{}' of user '{}' does not exist.", tokenname.as_str(), userid),
}
token_shadow::delete_secret(&tokenid)?;
user::save_config(&config)?;
Ok(())
}
#[api(
input: {
properties: {
userid: {
type: Userid,
},
},
},
returns: {
description: "List user's API tokens (with config digest).",
type: Array,
items: { type: user::ApiToken },
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
&Permission::UserParam("userid"),
]),
},
)]
/// List user's API tokens
pub fn list_tokens(
userid: Userid,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<user::ApiToken>, Error> {
let (config, digest) = user::config()?;
let list:Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let filter_by_owner = |token: &user::ApiToken| {
if token.tokenid.is_token() {
token.tokenid.user() == &userid
} else {
false
}
};
Ok(list.into_iter().filter(filter_by_owner).collect())
}
const TOKEN_ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TOKEN)
.put(&API_METHOD_UPDATE_TOKEN)
.post(&API_METHOD_GENERATE_TOKEN)
.delete(&API_METHOD_DELETE_TOKEN);
const TOKEN_ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TOKENS)
.match_all("tokenname", &TOKEN_ITEM_ROUTER);
const USER_SUBDIRS: SubdirMap = &[
("token", &TOKEN_ROUTER),
];
const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER)
.put(&API_METHOD_UPDATE_USER)
.delete(&API_METHOD_DELETE_USER);
.delete(&API_METHOD_DELETE_USER)
.subdirs(USER_SUBDIRS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_USERS)
.post(&API_METHOD_CREATE_USER)
.match_all("userid", &ITEM_ROUTER);
.match_all("userid", &USER_ROUTER);

View File

@ -1,3 +1,5 @@
//! Backup Server Administration
use proxmox::api::router::{Router, SubdirMap};
use proxmox::list_subdirs_api_method;

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,17 @@
use anyhow::{format_err, Error};
//! Datastore Syncronization Job Management
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::api2::pull::do_sync_job;
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::UPID;
use crate::server::jobstate::{Job, JobState};
@ -27,6 +32,10 @@ use crate::tools::systemd::time::{
type: Array,
items: { type: sync::SyncJobStatus },
},
access: {
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
permission: &Permission::Anybody,
},
)]
/// List all sync jobs
pub fn list_sync_jobs(
@ -35,6 +44,9 @@ pub fn list_sync_jobs(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = sync::config()?;
let mut list: Vec<SyncJobStatus> = config
@ -46,6 +58,10 @@ pub fn list_sync_jobs(
} else {
true
}
})
.filter(|job: &SyncJobStatus| {
let as_config: SyncJobConfig = job.into();
check_sync_job_read_access(&user_info, &auth_id, &as_config)
}).collect();
for job in &mut list {
@ -67,13 +83,13 @@ pub fn list_sync_jobs(
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
let last = job.last_run_endtime.unwrap_or(starttime);
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
compute_next_event(&event, last, false).unwrap_or(None)
})();
}
@ -89,23 +105,31 @@ pub fn list_sync_jobs(
schema: JOB_ID_SCHEMA,
}
}
}
},
access: {
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
permission: &Permission::Anybody,
},
)]
/// Runs the sync jobs manually.
fn run_sync_job(
pub fn run_sync_job(
id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
let job = Job::new("syncjob", &id)?;
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -1,12 +1,19 @@
//! Datastore Verify Job Management
use anyhow::{format_err, Error};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use crate::api2::types::*;
use crate::server::do_verification_job;
use crate::server::jobstate::{Job, JobState};
use crate::config::acl::{
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_VERIFY,
};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::verify;
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
use serde_json::Value;
@ -23,10 +30,14 @@ use crate::server::UPID;
},
},
returns: {
description: "List configured jobs and their status.",
description: "List configured jobs and their status (filtered by access)",
type: Array,
items: { type: verify::VerificationJobStatus },
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on datastore.",
},
)]
/// List all verification jobs
pub fn list_verification_jobs(
@ -34,6 +45,10 @@ pub fn list_verification_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<VerificationJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
let (config, digest) = verify::config()?;
@ -41,6 +56,11 @@ pub fn list_verification_jobs(
.convert_to_typed_array("verification")?
.into_iter()
.filter(|job: &VerificationJobStatus| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
if privs & required_privs == 0 {
return false;
}
if let Some(store) = &store {
&job.store == store
} else {
@ -68,13 +88,13 @@ pub fn list_verification_jobs(
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
let last = job.last_run_endtime.unwrap_or(starttime);
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
compute_next_event(&event, last, false).unwrap_or(None)
})();
}
@ -90,22 +110,29 @@ pub fn list_verification_jobs(
schema: JOB_ID_SCHEMA,
}
}
}
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Runs a verification job manually.
fn run_verification_job(
pub fn run_verification_job(
id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?;
let job = Job::new("verificationjob", &id)?;
let upid_str = do_verification_job(job, verification_job, &userid, None)?;
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -1,8 +1,10 @@
//! Backup protocol (HTTP2 upgrade)
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts;
use hyper::{Body, Response, StatusCode};
use hyper::{Body, Response, Request, StatusCode};
use serde_json::{json, Value};
use proxmox::{sortable, identity, list_subdirs_api_method};
@ -59,12 +61,12 @@ async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
@ -105,12 +107,15 @@ async move {
};
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
if owner != userid && worker_type != "benchmark" {
let correct_owner = owner == auth_id
|| (owner.is_token()
&& Authid::from(owner.user().clone()) == auth_id);
if !correct_owner && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", userid, owner);
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
@ -135,7 +140,7 @@ async move {
}
};
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
@ -153,9 +158,9 @@ async move {
if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
env_type, userid, worker.clone(), datastore, backup_dir);
env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug;
env.last_backup = last_backup;
@ -168,8 +173,7 @@ async move {
let env2 = env.clone();
let mut req_fut = req_body
.on_upgrade()
let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
.map_err(Error::from)
.and_then(move |conn| {
env2.debug("protocol upgrade done");
@ -308,6 +312,10 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
"previous", &Router::new()
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
),
(
"previous_backup_time", &Router::new()
.get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
),
(
"speedtest", &Router::new()
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
@ -691,6 +699,28 @@ fn finish_backup (
Ok(Value::Null)
}
#[sortable]
pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&get_previous_backup_time),
&ObjectSchema::new(
"Get previous backup time.",
&[],
)
);
fn get_previous_backup_time(
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref();
let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
Ok(json!(backup_time))
}
#[sortable]
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&download_previous),

View File

@ -1,6 +1,6 @@
use anyhow::{bail, format_err, Error};
use std::sync::{Arc, Mutex};
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use nix::dir::Dir;
use ::serde::{Serialize};
@ -10,7 +10,7 @@ use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::api2::types::Userid;
use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*;
@ -104,7 +104,7 @@ impl SharedBackupState {
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
user: Userid,
auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@ -117,7 +117,7 @@ pub struct BackupEnvironment {
impl BackupEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
user: Userid,
auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@ -137,7 +137,7 @@ impl BackupEnvironment {
Self {
result_attributes: json!({}),
env_type,
user,
auth_id,
worker,
datastore,
debug: false,
@ -185,7 +185,9 @@ impl BackupEnvironment {
if size > data.chunk_size {
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
} else if size < data.chunk_size {
}
if size < data.chunk_size {
data.small_chunk_count += 1;
if data.small_chunk_count > 1 {
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
@ -465,7 +467,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
// test if all writer are correctly closed
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
bail!("found open index writer - unable to finish backup");
}
@ -518,21 +520,18 @@ impl BackupEnvironment {
WorkerTask::new_thread(
"verify",
Some(worker_id),
self.user.clone(),
self.auth_id.clone(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
if !verify_backup_dir_with_lock(
datastore,
&verify_worker,
&backup_dir,
verified_chunks,
corrupt_chunks,
worker.clone(),
worker.upid().clone(),
None,
snap_lock,
)? {
bail!("verification failed - please check the log for details");
@ -598,12 +597,12 @@ impl RpcEnvironment for BackupEnvironment {
self.env_type
}
fn set_user(&mut self, _user: Option<String>) {
panic!("unable to change user");
fn set_auth_id(&mut self, _auth_id: Option<String>) {
panic!("unable to change auth_id");
}
fn get_user(&self) -> Option<String> {
Some(self.user.to_string())
fn get_auth_id(&self) -> Option<String> {
Some(self.auth_id.to_string())
}
}

View File

@ -1,16 +1,28 @@
//! Backup Server Configuration
use proxmox::api::router::{Router, SubdirMap};
use proxmox::list_subdirs_api_method;
pub mod access;
pub mod datastore;
pub mod remote;
pub mod sync;
pub mod verify;
pub mod drive;
pub mod changer;
pub mod media_pool;
pub mod tape_encryption_keys;
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),
("changer", &changer::ROUTER),
("datastore", &datastore::ROUTER),
("drive", &drive::ROUTER),
("media-pool", &media_pool::ROUTER),
("remote", &remote::ROUTER),
("sync", &sync::ROUTER),
("verify", &verify::ROUTER)
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
("verify", &verify::ROUTER),
];
pub const ROUTER: Router = Router::new()

View File

@ -0,0 +1,10 @@
use proxmox::api::{Router, SubdirMap};
use proxmox::list_subdirs_api_method;
pub mod tfa;
const SUBDIRS: SubdirMap = &[("tfa", &tfa::ROUTER)];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
.subdirs(SUBDIRS);

View File

@ -0,0 +1,84 @@
//! For now this only has the TFA subdir, which is in this file.
//! If we add more, it should be moved into a sub module.
use anyhow::Error;
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox::list_subdirs_api_method;
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
.subdirs(SUBDIRS);
const SUBDIRS: SubdirMap = &[("webauthn", &WEBAUTHN_ROUTER)];
const WEBAUTHN_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_WEBAUTHN_CONFIG)
.put(&API_METHOD_UPDATE_WEBAUTHN_CONFIG);
#[api(
protected: true,
input: {
properties: {},
},
returns: {
type: WebauthnConfig,
optional: true,
},
access: {
permission: &Permission::Anybody,
},
)]
/// Get the TFA configuration.
pub fn get_webauthn_config(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Option<WebauthnConfig>, Error> {
let (config, digest) = match tfa::webauthn_config()? {
Some(c) => c,
None => return Ok(None),
};
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(Some(config))
}
#[api(
protected: true,
input: {
properties: {
webauthn: {
flatten: true,
type: WebauthnConfigUpdater,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
)]
/// Update the TFA configuration.
pub fn update_webauthn_config(
webauthn: WebauthnConfigUpdater,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = tfa::write_lock();
let mut tfa = tfa::read()?;
if let Some(wa) = &mut tfa.webauthn {
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
}
webauthn.apply_to(wa);
} else {
tfa.webauthn = Some(webauthn.build()?);
}
tfa::write(&tfa)?;
Ok(())
}

295
src/api2/config/changer.rs Normal file
View File

@ -0,0 +1,295 @@
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox::api::{
api,
Router,
RpcEnvironment,
schema::parse_property_string,
};
use crate::{
config,
api2::types::{
PROXMOX_CONFIG_DIGEST_SCHEMA,
CHANGER_NAME_SCHEMA,
LINUX_DRIVE_PATH_SCHEMA,
SLOT_ARRAY_SCHEMA,
EXPORT_SLOT_LIST_SCHEMA,
ScsiTapeChanger,
LinuxTapeDrive,
},
tape::{
linux_tape_changer_list,
check_drive_path,
},
};
#[api(
protected: true,
input: {
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
},
"export-slots": {
schema: EXPORT_SLOT_LIST_SCHEMA,
optional: true,
},
},
},
)]
/// Create a new changer device
pub fn create_changer(
name: String,
path: String,
export_slots: Option<String>,
) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, _digest) = config::drive::config()?;
let linux_changers = linux_tape_changer_list();
check_drive_path(&linux_changers, &path)?;
let existing: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
for changer in existing {
if changer.name == name {
bail!("Entry '{}' already exists", name);
}
if changer.path == path {
bail!("Path '{}' already in use by '{}'", path, changer.name);
}
}
let item = ScsiTapeChanger {
name: name.clone(),
path,
export_slots,
};
config.set_data(&name, "changer", &item)?;
config::drive::save_config(&config)?;
Ok(())
}
#[api(
input: {
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
},
},
returns: {
type: ScsiTapeChanger,
},
)]
/// Get tape changer configuration
pub fn get_config(
name: String,
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ScsiTapeChanger, Error> {
let (config, digest) = config::drive::config()?;
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(data)
}
#[api(
input: {
properties: {},
},
returns: {
description: "The list of configured changers (with config digest).",
type: Array,
items: {
type: ScsiTapeChanger,
},
},
)]
/// List changers
pub fn list_changers(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ScsiTapeChanger>, Error> {
let (config, digest) = config::drive::config()?;
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete export-slots.
export_slots,
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
"export-slots": {
schema: EXPORT_SLOT_LIST_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
},
},
digest: {
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
optional: true,
},
},
},
)]
/// Update a tape changer configuration
pub fn update_changer(
name: String,
path: Option<String>,
export_slots: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
_param: Value,
) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, expected_digest) = config::drive::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: ScsiTapeChanger = config.lookup("changer", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::export_slots => {
data.export_slots = None;
}
}
}
}
if let Some(path) = path {
let changers = linux_tape_changer_list();
check_drive_path(&changers, &path)?;
data.path = path;
}
if let Some(export_slots) = export_slots {
let slots: Value = parse_property_string(
&export_slots, &SLOT_ARRAY_SCHEMA
)?;
let mut slots: Vec<String> = slots
.as_array()
.unwrap()
.iter()
.map(|v| v.to_string())
.collect();
slots.sort();
if slots.is_empty() {
data.export_slots = None;
} else {
let slots = slots.join(",");
data.export_slots = Some(slots);
}
}
config.set_data(&name, "changer", &data)?;
config::drive::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
},
},
)]
/// Delete a tape changer configuration
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, _digest) = config::drive::config()?;
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "changer" {
bail!("Entry '{}' exists, but is not a changer device", name);
}
config.sections.remove(&name);
},
None => bail!("Delete changer '{}' failed - no such entry", name),
}
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
for drive in drive_list {
if let Some(changer) = drive.changer {
if changer == name {
bail!("Delete changer '{}' failed - used by drive '{}'", name, drive.name);
}
}
}
config::drive::save_config(&config)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_CONFIG)
.put(&API_METHOD_UPDATE_CHANGER)
.delete(&API_METHOD_DELETE_CHANGER);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_CHANGERS)
.post(&API_METHOD_CREATE_CHANGER)
.match_all("name", &ITEM_ROUTER);

View File

@ -5,6 +5,7 @@ use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::api::schema::parse_property_string;
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
@ -35,14 +36,14 @@ pub fn list_datastores(
let (config, digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};
@ -68,6 +69,14 @@ pub fn list_datastores(
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -111,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
let (mut config, _digest) = datastore::config()?;
if let Some(_) = config.sections.get(&datastore.name) {
if config.sections.get(&datastore.name).is_some() {
bail!("datastore '{}' already exists.", datastore.name);
}
@ -142,10 +151,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
},
},
},
returns: {
description: "The datastore configuration (with config digest).",
type: datastore::DataStoreConfig,
},
returns: { type: datastore::DataStoreConfig },
access: {
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
},
@ -187,6 +193,12 @@ pub enum DeletableProperty {
keep_monthly,
/// Delete the keep-yearly property
keep_yearly,
/// Delete the verify-new property
verify_new,
/// Delete the notify-user property
notify_user,
/// Delete the notify property
notify,
}
#[api(
@ -200,6 +212,14 @@ pub enum DeletableProperty {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -232,6 +252,12 @@ pub enum DeletableProperty {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
"verify-new": {
description: "If enabled, all new backups will be verified right after completion.",
type: bool,
optional: true,
default: false,
},
delete: {
description: "List of properties to delete.",
type: Array,
@ -251,6 +277,7 @@ pub enum DeletableProperty {
},
)]
/// Update datastore config.
#[allow(clippy::too_many_arguments)]
pub fn update_datastore(
name: String,
comment: Option<String>,
@ -262,6 +289,9 @@ pub fn update_datastore(
keep_weekly: Option<u64>,
keep_monthly: Option<u64>,
keep_yearly: Option<u64>,
verify_new: Option<bool>,
notify: Option<String>,
notify_user: Option<Userid>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -290,6 +320,9 @@ pub fn update_datastore(
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
DeletableProperty::verify_new => { data.verify_new = None; },
DeletableProperty::notify => { data.notify = None; },
DeletableProperty::notify_user => { data.notify_user = None; },
}
}
}
@ -322,6 +355,19 @@ pub fn update_datastore(
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
if let Some(notify_str) = notify {
let value = parse_property_string(&notify_str, &DatastoreNotify::API_SCHEMA)?;
let notify: DatastoreNotify = serde_json::from_value(value)?;
if let DatastoreNotify { gc: None, verify: None, sync: None } = notify {
data.notify = None;
} else {
data.notify = Some(notify_str);
}
}
if verify_new.is_some() { data.verify_new = verify_new; }
if notify_user.is_some() { data.notify_user = notify_user; }
config.set_data(&name, "datastore", &data)?;
datastore::save_config(&config)?;

281
src/api2/config/drive.rs Normal file
View File

@ -0,0 +1,281 @@
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox::api::{api, Router, RpcEnvironment};
use crate::{
config,
api2::types::{
PROXMOX_CONFIG_DIGEST_SCHEMA,
DRIVE_NAME_SCHEMA,
CHANGER_NAME_SCHEMA,
CHANGER_DRIVENUM_SCHEMA,
LINUX_DRIVE_PATH_SCHEMA,
LinuxTapeDrive,
ScsiTapeChanger,
},
tape::{
linux_tape_device_list,
check_drive_path,
},
};
#[api(
protected: true,
input: {
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
optional: true,
},
"changer-drivenum": {
schema: CHANGER_DRIVENUM_SCHEMA,
optional: true,
},
},
},
)]
/// Create a new drive
pub fn create_drive(param: Value) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, _digest) = config::drive::config()?;
let item: LinuxTapeDrive = serde_json::from_value(param)?;
let linux_drives = linux_tape_device_list();
check_drive_path(&linux_drives, &item.path)?;
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
for drive in existing {
if drive.name == item.name {
bail!("Entry '{}' already exists", item.name);
}
if drive.path == item.path {
bail!("Path '{}' already used in drive '{}'", item.path, drive.name);
}
}
config.set_data(&item.name, "linux", &item)?;
config::drive::save_config(&config)?;
Ok(())
}
#[api(
input: {
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
},
},
returns: {
type: LinuxTapeDrive,
},
)]
/// Get drive configuration
pub fn get_config(
name: String,
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<LinuxTapeDrive, Error> {
let (config, digest) = config::drive::config()?;
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(data)
}
#[api(
input: {
properties: {},
},
returns: {
description: "The list of configured drives (with config digest).",
type: Array,
items: {
type: LinuxTapeDrive,
},
},
)]
/// List drives
pub fn list_drives(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<LinuxTapeDrive>, Error> {
let (config, digest) = config::drive::config()?;
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(drive_list)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the changer property.
changer,
/// Delete the changer-drivenum property.
changer_drivenum,
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
optional: true,
},
"changer-drivenum": {
schema: CHANGER_DRIVENUM_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
optional: true,
},
},
},
)]
/// Update a drive configuration
pub fn update_drive(
name: String,
path: Option<String>,
changer: Option<String>,
changer_drivenum: Option<u64>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
_param: Value,
) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, expected_digest) = config::drive::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::changer => {
data.changer = None;
data.changer_drivenum = None;
},
DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
}
}
}
if let Some(path) = path {
let linux_drives = linux_tape_device_list();
check_drive_path(&linux_drives, &path)?;
data.path = path;
}
if let Some(changer) = changer {
let _: ScsiTapeChanger = config.lookup("changer", &changer)?;
data.changer = Some(changer);
}
if let Some(changer_drivenum) = changer_drivenum {
if changer_drivenum == 0 {
data.changer_drivenum = None;
} else {
if data.changer.is_none() {
bail!("Option 'changer-drivenum' requires option 'changer'.");
}
data.changer_drivenum = Some(changer_drivenum);
}
}
config.set_data(&name, "linux", &data)?;
config::drive::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
},
},
)]
/// Delete a drive configuration
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
let _lock = config::drive::lock()?;
let (mut config, _digest) = config::drive::config()?;
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "linux" {
bail!("Entry '{}' exists, but is not a linux tape drive", name);
}
config.sections.remove(&name);
},
None => bail!("Delete drive '{}' failed - no such drive", name),
}
config::drive::save_config(&config)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_CONFIG)
.put(&API_METHOD_UPDATE_DRIVE)
.delete(&API_METHOD_DELETE_DRIVE);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DRIVES)
.post(&API_METHOD_CREATE_DRIVE)
.match_all("name", &ITEM_ROUTER);

View File

@ -0,0 +1,251 @@
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use proxmox::{
api::{
api,
Router,
RpcEnvironment,
},
};
use crate::{
api2::types::{
MEDIA_POOL_NAME_SCHEMA,
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
MEDIA_RETENTION_POLICY_SCHEMA,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
MediaPoolConfig,
},
config,
};
#[api(
protected: true,
input: {
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
allocation: {
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
optional: true,
},
retention: {
schema: MEDIA_RETENTION_POLICY_SCHEMA,
optional: true,
},
template: {
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
optional: true,
},
encrypt: {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
optional: true,
},
},
},
)]
/// Create a new media pool
pub fn create_pool(
name: String,
allocation: Option<String>,
retention: Option<String>,
template: Option<String>,
encrypt: Option<String>,
) -> Result<(), Error> {
let _lock = config::media_pool::lock()?;
let (mut config, _digest) = config::media_pool::config()?;
if config.sections.get(&name).is_some() {
bail!("Media pool '{}' already exists", name);
}
let item = MediaPoolConfig {
name: name.clone(),
allocation,
retention,
template,
encrypt,
};
config.set_data(&name, "pool", &item)?;
config::media_pool::save_config(&config)?;
Ok(())
}
#[api(
returns: {
description: "The list of configured media pools (with config digest).",
type: Array,
items: {
type: MediaPoolConfig,
},
},
)]
/// List media pools
pub fn list_pools(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<MediaPoolConfig>, Error> {
let (config, digest) = config::media_pool::config()?;
let list = config.convert_to_typed_array("pool")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
input: {
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
},
},
returns: {
type: MediaPoolConfig,
},
)]
/// Get media pool configuration
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
let (config, _digest) = config::media_pool::config()?;
let data: MediaPoolConfig = config.lookup("pool", &name)?;
Ok(data)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete media set allocation policy.
allocation,
/// Delete pool retention policy
retention,
/// Delete media set naming template
template,
/// Delete encryption fingerprint
encrypt,
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
allocation: {
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
optional: true,
},
retention: {
schema: MEDIA_RETENTION_POLICY_SCHEMA,
optional: true,
},
template: {
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
optional: true,
},
encrypt: {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
},
},
)]
/// Update media pool settings
pub fn update_pool(
name: String,
allocation: Option<String>,
retention: Option<String>,
template: Option<String>,
encrypt: Option<String>,
delete: Option<Vec<DeletableProperty>>,
) -> Result<(), Error> {
let _lock = config::media_pool::lock()?;
let (mut config, _digest) = config::media_pool::config()?;
let mut data: MediaPoolConfig = config.lookup("pool", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::allocation => { data.allocation = None; },
DeletableProperty::retention => { data.retention = None; },
DeletableProperty::template => { data.template = None; },
DeletableProperty::encrypt => { data.encrypt = None; },
}
}
}
if allocation.is_some() { data.allocation = allocation; }
if retention.is_some() { data.retention = retention; }
if template.is_some() { data.template = template; }
if encrypt.is_some() { data.encrypt = encrypt; }
config.set_data(&name, "pool", &data)?;
config::media_pool::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
},
},
)]
/// Delete a media pool configuration
pub fn delete_pool(name: String) -> Result<(), Error> {
let _lock = config::media_pool::lock()?;
let (mut config, _digest) = config::media_pool::config()?;
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
None => bail!("delete pool '{}' failed - no such pool", name),
}
config::media_pool::save_config(&config)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_CONFIG)
.put(&API_METHOD_UPDATE_POOL)
.delete(&API_METHOD_DELETE_POOL);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_POOLS)
.post(&API_METHOD_CREATE_POOL)
.match_all("name", &ITEM_ROUTER);

View File

@ -1,11 +1,14 @@
use anyhow::{bail, Error};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::http_err;
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use crate::client::{HttpClient, HttpClientOptions};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::remote;
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
@ -16,13 +19,11 @@ use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
returns: {
description: "The list of configured remotes (with config digest).",
type: Array,
items: {
type: remote::Remote,
description: "Remote configuration (without password).",
},
items: { type: remote::Remote },
},
access: {
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
description: "List configured remotes filtered by Remote.Audit privileges",
permission: &Permission::Anybody,
},
)]
/// List all remotes
@ -31,16 +32,25 @@ pub fn list_remotes(
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<remote::Remote>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = remote::config()?;
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
// don't return password in api
for remote in &mut list {
remote.password = "".to_string();
}
let list = list
.into_iter()
.filter(|remote| {
let privs = user_info.lookup_privs(&auth_id, &["remote", &remote.name]);
privs & PRIV_REMOTE_AUDIT != 0
})
.collect();
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
@ -65,8 +75,8 @@ pub fn list_remotes(
optional: true,
default: 8007,
},
userid: {
type: Userid,
"auth-id": {
type: Authid,
},
password: {
schema: remote::REMOTE_PASSWORD_SCHEMA,
@ -86,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let mut data = param.clone();
let mut data = param;
data["password"] = Value::from(base64::encode(password.as_bytes()));
let remote: remote::Remote = serde_json::from_value(data)?;
let (mut config, _digest) = remote::config()?;
if let Some(_) = config.sections.get(&remote.name) {
if config.sections.get(&remote.name).is_some() {
bail!("remote '{}' already exists.", remote.name);
}
@ -111,10 +121,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
},
},
},
returns: {
description: "The remote configuration (with config digest).",
type: remote::Remote,
},
returns: { type: remote::Remote },
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
}
@ -165,9 +172,9 @@ pub enum DeletableProperty {
type: u16,
optional: true,
},
userid: {
"auth-id": {
optional: true,
type: Userid,
type: Authid,
},
password: {
optional: true,
@ -196,12 +203,13 @@ pub enum DeletableProperty {
},
)]
/// Update remote configuration.
#[allow(clippy::too_many_arguments)]
pub fn update_remote(
name: String,
comment: Option<String>,
host: Option<String>,
port: Option<u16>,
userid: Option<Userid>,
auth_id: Option<Authid>,
password: Option<String>,
fingerprint: Option<String>,
delete: Option<Vec<DeletableProperty>>,
@ -239,7 +247,7 @@ pub fn update_remote(
}
if let Some(host) = host { data.host = host; }
if port.is_some() { data.port = port; }
if let Some(userid) = userid { data.userid = userid; }
if let Some(auth_id) = auth_id { data.auth_id = auth_id; }
if let Some(password) = password { data.password = password; }
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
@ -271,6 +279,17 @@ pub fn update_remote(
/// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
use crate::config::sync::{self, SyncJobConfig};
let (sync_jobs, _) = sync::config()?;
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
if job.remote == name {
bail!("remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
}
}
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = remote::config()?;
@ -290,10 +309,78 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
Ok(())
}
/// Helper to get client for remote.cfg entry
pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error> {
let options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.fingerprint.clone());
let client = HttpClient::new(
&remote.host,
remote.port.unwrap_or(8007),
&remote.auth_id,
options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
Ok(client)
}
#[api(
input: {
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
},
returns: {
description: "List the accessible datastores.",
type: Array,
items: { type: DataStoreListItem },
},
)]
/// List datastores of a remote.cfg entry
pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListItem>, Error> {
let (remote_config, _digest) = remote::config()?;
let remote: remote::Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err)
};
let client = remote_client(remote)
.await
.map_err(map_remote_err)?;
let api_res = client
.get("api2/json/admin/datastore", None)
.await
.map_err(map_remote_err)?;
let parse_res = match api_res.get("data") {
Some(data) => serde_json::from_value::<Vec<DataStoreListItem>>(data.to_owned()),
None => bail!("remote {} did not return any datastore list data", &name),
};
match parse_res {
Ok(parsed) => Ok(parsed),
Err(_) => bail!("Failed to parse remote scan api result."),
}
}
const SCAN_ROUTER: Router = Router::new()
.get(&API_METHOD_SCAN_REMOTE_DATASTORES);
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_REMOTE)
.put(&API_METHOD_UPDATE_REMOTE)
.delete(&API_METHOD_DELETE_REMOTE);
.delete(&API_METHOD_DELETE_REMOTE)
.subdirs(&[("scan", &SCAN_ROUTER)]);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_REMOTES)

Some files were not shown because too many files have changed in this diff Show More