Compare commits

..

244 Commits

Author SHA1 Message Date
c002d48b0c bump version to 1.1.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-22 20:15:18 +02:00
15998ed12a file-restore: support encrypted VM backups
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-22 17:55:30 +02:00
9d8ab62769 client-tools: add crypto_parameters_keep_fd
same functionality as crypto_parameters, except it keeps the file
descriptor passed as "keyfd" open (and seeks to the beginning after
reading), if one is given.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-22 17:55:30 +02:00
3526a76ef3 file-restore: don't force PBS_FINGERPRINT env var
It is valid to not set it, in case the server has a valid certificate.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-22 17:55:30 +02:00
b9e0fcbdcd tape: implement report_desnity 2021-04-22 13:54:31 +02:00
a7188b3a75 tape: fix FORMAT for LTO-4 drives
FORMAT requires LTO-5 or newer, so we do a rewind/erase if FORMAT fails.
2021-04-22 11:44:49 +02:00
b6c06dce9d http proxy: implement read_connect_response()
Limit memory usage in case we get strange data from proxy.
2021-04-22 10:06:14 +02:00
4adf47b606 file-restore: allow extracting a full pxar archive
If the path for within the archive is empty, assume "/" to extract all
of it.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-21 17:20:54 +02:00
4d0dc29951 file-restore: Add 'v' (Virtual) ArchiveEntry type
For the actual partitions and blockdevices in a backup, which the
user sees like folders in the file-restore ui

Encoded as "None", to avoid cluttering DirEntryAttribute, where it
wouldn't make any sense to have.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-21 17:19:40 +02:00
1011fb552b file-restore: print warnings on stderr
as we print JSON on stdout to be parsed

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-21 17:18:12 +02:00
2fd2d29281 file-restore: don't list non-pxar/-img *idx archives
These can't be entered or restored anyway, and cause issues with catalog
files for example.

Also a clippy fix.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-21 17:18:06 +02:00
9104152a83 HttpsConnector: add proxy support 2021-04-21 15:29:17 +02:00
02a58862dd HttpsConnector: code cleanup 2021-04-21 15:29:17 +02:00
26153589ba new http client implementation SimpleHttp (avoid static HTTP_CLIENT)
This one will have proxy support.
2021-04-21 15:29:17 +02:00
17b3e4451f MaybeTlsStream: implement poll_write_vectored()
This is just an performance optimization.
2021-04-21 15:29:17 +02:00
a2072cc346 http: rename EitherStream to MaybeTlsStream
And rename the enum values. Added an additional enum called Proxied.

The enum in now more specialized, but we only use it for the http client anyways.
2021-04-21 15:29:17 +02:00
fea23d0323 fix #3393: tools/xattr: allow xattr 'security.NTACL'
in some configurations, samba stores NTFS-ACLs in this xattr[0], so
we should backup (if we can)

altough the 'security' namespace is special (e.g. in use by
selinux, etc.) this value is normally only used by samba and we
should be able to back it up.

to restore it, the user needs at least 'CAP_SYS_ADMIN' rights, otherwise
it cannot be set

0: https://www.samba.org/samba/docs/current/man-html/vfs_acl_xattr.8.html

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-21 14:49:46 +02:00
71e83e1b1f tape/changer/sg_pt_changer: read whole descriptor size for each entry
Some changer seem to append more data than we expect, but correctly
annotates that size in the subheader.

For each descriptor entry, read as much as the size given in the
subheader (or until the end of the reader), else our position in
the reader is wrong for the next entry, and we will parse
incorrect data.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-21 14:07:41 +02:00
28570d19a6 tape restore: avoid multiple stat calls for same chunk 2021-04-16 13:17:17 +02:00
1369bcdbba tape restore: verify if all chunks exist 2021-04-16 12:20:44 +02:00
5e4d81e957 tape restore: simplify log (list datastores on single line) 2021-04-16 11:35:05 +02:00
0f4721f305 tape restore: fix datastore locking 2021-04-16 09:09:05 +02:00
5547f90ba7 bump version to 1.1.2-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 13:26:59 +02:00
2e1b63fb25 backup verify: do not check every loop iteration for abort/shutdown
only check every 1024'th, which is cheaper to do than a modulo, as we
can just mask the 10 least-significant-bits and check if the result
is zero.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 13:21:36 +02:00
7b2d3a5fe9 backup verify: unify check if chunk can be skipped
This also re-checks the corrupt chunk list before actually loading a
chunk.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 13:21:07 +02:00
0216f56241 config: tfa: drop now unused schema::Updatable
was used in a macro expansion, now handled otherwise

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 12:35:11 +02:00
80acdd71fa tape: do not try to backup unfinished backups 2021-04-15 10:24:14 +02:00
26af61debc backup verify: re-check if we can skip a chunk in the actual verify loop
Fixes a non-negligible performance regression from commit
7f394c807b

While we skip known-verified chunks in the stat-and-inode-sort loop,
those are only the ones from previous indexes. If there's a repeated
chunk in one index they would get re-verified more often as required.

So, add the check again explicitly to the read+verify loop.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 10:00:06 +02:00
e7f94010d3 cargo toml: update proxmox version
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-15 09:56:45 +02:00
a4e871f52c api2/access/user: remove password for @pbs users on removal
so that their password entry is not left in the shadow.json

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-15 08:33:20 +02:00
bc3072ef7a bump version to 1.1.1-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 14:50:41 +02:00
f4bb2510b9 docs: tape: replace changer overview screenshot
Replace previous screenshot with one that shows a more realistic amount
of drives.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2021-04-14 14:41:00 +02:00
2ab12cd0cb verify: add comment for inode sorting
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 14:39:24 +02:00
c894909e17 verify: partially rust fmt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 14:39:24 +02:00
7f394c807b backup/verify: improve speed by sorting chunks by inode
before reading the chunks from disk in the order of the index file,
stat them first and sort them by inode number.

this can have a very positive impact on read speed on spinning disks,
even with the additional stat'ing of the chunks.

memory footprint should be tolerable, for 1_000_000 chunks
we need about ~16MiB of memory (Vec of 64bit position + 64bit inode)
(assuming 4MiB Chunks, such an index would reference 4TiB of data)

two small benchmarks (single spinner, ext4) here showed an improvement from
~430 seconds to ~330 seconds for a 32GiB fixed index
and from
~160 seconds to ~120 seconds for a 10GiB dynamic index

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-14 14:39:24 +02:00
7afb98a912 docs: fix tape.cfg format description 2021-04-14 14:30:20 +02:00
3847008e1b docs: pmt - remove old linux driver options 2021-04-14 14:26:39 +02:00
f6ed2eff47 docs: move tape config/syntax to appendix 2021-04-14 14:25:10 +02:00
23eed6755a ui: tape/ChangerStatus: hide drive-slots without assigned drives
if a user has not configured a drive for a specified driveslot of the
changer, simply hide that slot

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-14 14:24:07 +02:00
384a2b4d4f docs: faq: fix encryption link
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 14:18:05 +02:00
910177a388 docs: pve-integration: mention gui integration
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 14:18:05 +02:00
54311a38c6 docs: update proxmox-tape status output 2021-04-14 14:03:45 +02:00
983edbc54a ui: tape/ChangerStatus: add Format button to drivegrid
so that the user can also format an already inserted tape directly

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-14 13:53:16 +02:00
10439718e2 docs: use defininition list for tape Terminology
To avoid those strange line breaks in Field list labels.
2021-04-14 13:25:29 +02:00
ebddccef5f docs: gui: add short tape backup section
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 12:47:05 +02:00
9cfe0ff350 docs: include tape backup in TOC
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 12:47:05 +02:00
295bae14b7 docs: tape: add a bit of text to changers for better image flow
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 12:47:05 +02:00
53939bb438 docs: tape screenshots
Add tape screenshots throughout the tape docs with some references to the GUI

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2021-04-14 12:47:05 +02:00
329c2cbe66 docs: reorder maintenance & network after client usage/tools
The idea is that people first need to make actual backups before they
need to do maintenance tasks.
Network is already setup when installing with the ISO or on-top of
Debian, so that is not a priority either.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 12:47:05 +02:00
55334cf45a docs: use "Backup Storage" heading
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-14 12:47:05 +02:00
a2e30cd51d ui: tape: rename erase to format
erase is a different action, so correctly call it 'format'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-14 12:25:53 +02:00
4bf2ab1109 cleanup: remove debug println 2021-04-14 10:39:29 +02:00
1dd1c9eb5c api2/tape/restore: restore_chunk_archive: only ignore tape related errors
when we get an error from the tape, we possibly want to ignore it,
i.e. when the file was incomplete, but we still want to error
out if the error came from e.g, the datastore, so we have to move
the error checking code to the 'next_chunk' call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-14 10:38:26 +02:00
6dde015f8c bump version to 1.1.0-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 14:42:50 +02:00
5f3b2330c8 docs: typo fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 14:42:50 +02:00
4ba5d3b3dd docs: mention client repository and rework client installation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 14:42:50 +02:00
e7e3d7360a docs: get help: actually link customer portal
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 14:42:50 +02:00
fd8b00aed7 docs: client: add "Backup" to "Repository Locations" heading to avoid confusion
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 14:42:50 +02:00
2631e57d20 fix regression tests 2021-04-13 14:02:37 +02:00
90461b76fb TapeRead: add skip_data() 2021-04-13 13:32:45 +02:00
629103d60c d/rules: update binary path for proxmox-backup-file-restore
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 12:18:29 +02:00
dc232b8946 d/control: update to track thiserror build dependency
was in Cargo.toml since commit
64ad7b706148b289d4ca67b87630cdc60f464cc

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 12:16:42 +02:00
6fed819dc2 proxmox-backup-file-restore: fix various postinst bugs/bad-behavior
1. The exit was never called as `test ... || echo "foo" || exit 1`
   can never come to the exit, as echo will not fail

2. The echo was meant to be redirected to stderr (FD #2) but it was
   actually redirected to a file named '2'

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 12:06:10 +02:00
646fc7f086 ui: tape/DriveStatus: show that no tape is loaded in grid title
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-13 11:48:45 +02:00
ecc5602c88 ui: tape/DriveStatus: remove buffer-mode
since this will almost always be set to '1', it has no real
value to show to the user

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-13 11:47:37 +02:00
6a15cce540 tape: SgTapeReader::read_block - disable reading beyond EOF 2021-04-13 11:46:30 +02:00
f281b8d3a9 tape: cleanup MediaCatalog on tape reuse 2021-04-13 11:46:30 +02:00
4465b76812 d/control: bump versioned dependency for proxmox-widget-toolkit
to ensure we have the moved out file browser widget available

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 09:14:11 +02:00
61df02cda1 ui: file browser: adapt to abi changes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-13 09:06:30 +02:00
3b0321365b use FileBrowser from proxmox-widget-toolkit
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-13 08:44:48 +02:00
0dfce17a43 api/datastore: allow pxar file download of entire archive
Treat filepaths like "/root.pxar.didx" without a trailing slash as
wanting to download the entire archive content instead of erroring. The
zip-creation code already works fine for this scenario.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-13 08:26:41 +02:00
a38dccf0e8 ui: index: drop enableTapeUI
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 15:55:58 +02:00
f05085ab22 ui: nav tree: code cleanup
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 15:55:58 +02:00
bc42bb3c6e ui: nav tree: make datastore-add button less special cased
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 15:55:58 +02:00
94b7f56e65 ui: nav tree: code cleanup and unification between datastore and tapes
datastore and tape entries are very similar but differ in some points
in such a way that a nice unification is not really that helpful, but
making similar key parts the same is still nice when reading the code

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 15:55:58 +02:00
0417e9af1b tools/async_io: do not error on Accept for StaticIncoming
in proxmox-backup-proxy, we log and discard any errors on 'accept',
so that we can continue to server requests

in proxmox-backup-api, we just have the StaticIncoming that accepts,
which will forward any errors from the underlying TcpListener

this patch also logs and discards the errors, like in the proxy.
Otherwise it could happen that if the api-daemon has more files open
than the proxy, it will shut itself down because of a
'too many open files' error if there are many open connections

(the service should also restart on exit i think, but this is
a separate issue)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-12 15:43:13 +02:00
ce5327badc tape: fix regression tests 2021-04-12 14:08:05 +02:00
368f4c5416 fix gathering io stats for zpools
if a datastore or root is not used directly on the pool dir
(e.g. the installer creates 2 sub datasets ROOT/pbs-1), info in
/proc/self/mountinfo returns not the pool, but the path to the
dataset, which has no iostats itself in /proc/spl/kstat/zfs/
but only the pool itself

so instead of not gathering data at all, gather the info from the
underlying pool instead. if one has multiple datastores on the same
pool those rrd stats will be the same for all those datastores now
(instead of empty) similar to 'normal' directories

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-12 13:35:38 +02:00
318b310638 tape: improve EOT error handling 2021-04-12 13:27:34 +02:00
164ad7b706 sgutils2: use thiserror to derive Error 2021-04-12 13:27:34 +02:00
a5322f3c50 buildsys: fix restore package names
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 12:52:19 +02:00
fa29d7eb49 ui: improve code-readability s/tapestore/tapeStore/
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 12:34:26 +02:00
a21f9852fd enable tape backup by default
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 12:31:56 +02:00
79e2473c63 d/control: file restore: only recommend proxmox-backup-restore-image
should not be a hard dependency, as one can use the file-restore tool
for pxar archives without it too

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 07:56:31 +02:00
375b1f6150 d/control: rename proxmox-file-restore to proxmox-backup-file-restore
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-12 07:56:31 +02:00
109ccd300f cleanup: move tape SCSI code to src/tape/drive/lto/sg_tape/ 2021-04-09 11:34:45 +02:00
c287b28725 buildsys: dinstall: only install server/client debs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-09 10:09:15 +02:00
c560cfddca tape: read_drive_status - ignore media changed sense info 2021-04-09 09:46:19 +02:00
44f6bb019c sgutils2: implement scsi_request_sense() 2021-04-09 09:46:19 +02:00
d6d42702d1 bump version to 1.0.14-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 18:59:07 +02:00
3fafd0e2a1 d/postinst: check for old tape.cfg
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 18:59:07 +02:00
59648eac3d avoid extra separate upload to pbs repo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 18:45:30 +02:00
5b6b5bba68 upload file restore package only to PVE for now
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 18:45:30 +02:00
b13089cdf5 file-restore: add 'extract' command for VM file restore
The data on the restore daemon is either encoded into a pxar archive, to
provide the most accurate data for local restore, or encoded directly
into a zip file (or written out unprocessed for files), depending on the
'pxar' argument to the 'extract' API call.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:43:41 +02:00
1f03196c0b tools/zip: add zip_directory helper
Encodes an entire local directory into an AsyncWrite recursively.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 14:32:03 +02:00
edf0940649 pxar/extract: add sequential variant of extract_sub_dir
extract_sub_dir_seq, together with seq_files_extractor, allow extracting
files from a pxar Decoder, along with the existing option for an
Accessor. To facilitate code re-use, some helper functions are extracted
in the process.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:24:23 +02:00
801ec1dbf9 file-restore(-daemon): implement list API
Allows listing files and directories on a block device snapshot.
Hierarchy displayed is:

/archive.img.fidx/bucket/component/<path>
e.g.
/drive-scsi0.img.fidx/part/2/etc/passwd
(corresponding to /etc/passwd on the second partition of drive-scsi0)

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 14:24:14 +02:00
34ac5cd889 debian/client: add postinst hook to rebuild file-restore initramfs
This will be triggered on updating proxmox-file-restore (via configure,
necessary since the daemon binary might change) and
proxmox-backup-restore-image (via 'activate-noawait', necessary since
the base image might change).

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:20:05 +02:00
58421ec112 file-restore: add basic VM/block device support
Includes methods to start, stop and list QEMU file-restore VMs, as well
as CLI commands do the latter two (start is implicit).

The implementation is abstracted behind the concept of a
"BlockRestoreDriver", so other methods can be implemented later (e.g.
mapping directly to loop devices on the host, using other hypervisors
then QEMU, etc...).

Starting VMs is currently unused but will be needed for further changes.

The design for the QEMU driver uses a locked 'map' file
(/run/proxmox-backup/$UID/restore-vm-map.json) containing a JSON
encoding of currently running VMs. VMs are addressed by a 'name', which
is a systemd-unit encoded combination of repository and snapshot string,
thus uniquely identifying it.

Note that currently you need to run proxmox-file-restore as root to use
this method of restoring.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:11:02 +02:00
a5bdc987dc add tools/cpio encoding module
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:10:45 +02:00
d32a8652bd file-restore-daemon: add disk module
Includes functionality for scanning and referring to partitions on
attached disks (i.e. snapshot images).

Fairly modular structure, so adding ZFS/LVM/etc... support in the future
should be easy.

The path is encoded as "/disk/bucket/component/path/to/file", e.g.
"/drive-scsi0/part/0/etc/passwd". See the comments for further
explanations on the design.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 14:03:54 +02:00
a26ebad5f9 file-restore-daemon: add watchdog module
Add a watchdog that will automatically shut down the VM after 10
minutes, if no API call is received.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 13:58:29 +02:00
dd9cef56fc file-restore-daemon: add binary with virtio-vsock API server
Implements the base of a small daemon to run within a file-restore VM.

The binary spawns an API server on a virtio-vsock socket, listening for
connections from the host. This happens mostly manually via the standard
Unix socket API, since tokio/hyper do not have support for vsock built
in. Once we have the accept'ed file descriptor, we can create a
UnixStream and use our tower service implementation for that.

The binary is deliberately not installed in the usual $PATH location,
since it shouldn't be executed on the host by a user anyway.

For now, only the API calls 'status' and 'stop' are implemented, to
demonstrate and test proxmox::api functionality.

Authorization is provided via a custom ApiAuth only checking a header
value against a static /ticket file.

Since the REST server implementation uses the log!() macro, we can
redirect its output to stdout by registering env_logger as the logging
target. env_logger is already in our dependency tree via zstd/bindgen.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 13:57:57 +02:00
26858dba84 server/rest: add ApiAuth trait to make user auth generic
This allows switching the base user identification/authentication method
in the rest server. Will initially be used for single file restore VMs,
where authentication is based on a ticket file, not the PBS user
backend (PAM/local).

To avoid putting generic types into the RestServer type for this, we
merge the two calls "extract_auth_data" and "check_auth" into a single
one, which can use whatever type it wants internally.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 13:57:57 +02:00
9fe3358ce6 file-restore: allow specifying output-format
Makes CLI use more comfortable by not just printing JSON to the
terminal.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 13:57:57 +02:00
76425d84b3 file-restore: add binary and basic commands
For now it only supports 'list' and 'extract' commands for 'pxar.didx'
files. This should be the foundation for a general file-restore
interface that is shared with block-level snapshots.

This is packaged as a seperate .deb file, since for block level restore
it will need to depend on pve-qemu-kvm, which we want to seperate from
proxmox-backup-client.

[original code for proxmox-file-restore.rs]
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>

[code cleanups/clippy, use helpers::list_dir_content/ArchiveEntry, no
/block subdir for .fidx files, seperate binary and package]
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-08 13:57:57 +02:00
42355b11a4 update d/control
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-08 13:57:57 +02:00
511e4f6987 ui: tape/DriveStatus: improve status grid a bit
by using format_boolean for compression/write protect,
combining file/block posiition into one (saves a line)

and adding the missing alert-flags

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
3f0e344bc1 ui: tape/ChangerStatus: hide selector for single drives in barcode-label
it is rather pointless to let the user select something were there
is no choice. We have to keep the window though, since the user may
want to choose a pool

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
a316178768 ui: tape/ChangerStatus: shortcut Inventory for single drives
like 'load-media'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
dff8ea92aa ui: tape/ChangerStatus: shortcut 'load-media' for single drive
if a changer only has a single drive, there is no point in showing
a window with a DriveSelector, just do want the user wanted.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
88e1f7997c ui: tape/ChangerStatus: rework EraseWindow
to make it more like a 'dangerous' remove window
also works in the singleDrive logic to hide/show the driveselector

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
4c3eabeaf3 ui: tape/ChangerStatus: save assigned drives
so that we can shortcut later if we only have one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
4c7be5f59d ui: tape/ChangerStatus: add missing property
it will actually not fail, but we declare it nonetheless to indicate
that it exists

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:56:46 +02:00
6d4fbbc3ea ui: dashobard/DataStoreStatistics: add 'Available' column
for some storages, it is valuable information, e.g. if one has datastores
on separate datasets of the same zpool

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 13:27:22 +02:00
1a23132262 tape: add TapeDensity::Unknown 2021-04-08 12:23:54 +02:00
48c4193f7c ui: update tape DriveStatus for new driver 2021-04-08 12:04:14 +02:00
8204d9b095 tape: avoid unneccessary SCSI request in Drop 2021-04-08 11:26:08 +02:00
fad95a334a tape: clear encryption key after backup (for security reasons) 2021-04-08 10:37:49 +02:00
973e985d73 cleanup: remove unused linux tape driver code 2021-04-08 10:15:52 +02:00
e5a13382b2 ui: tape/TapeRestore: use correct value check for store & mapping
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 10:02:31 +02:00
81c0b90447 ui: tape/TapeRestore: fix restoring without mapping
we have to delete the 'mapping' variable in any case since it's not
a valid api parameter

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-08 10:02:17 +02:00
ee9fa953de docs: Mention our new user space tape driver, adopt device path names 2021-04-08 09:50:09 +02:00
09acf0a70d do not depend on mt-st and mtx
We now use our own driver, so those tools are no longer required.
2021-04-08 09:48:47 +02:00
15d1435789 tape: add vendor, product and revision to LtoDriveAndMediaStatus 2021-04-08 08:34:46 +02:00
80ea23e1b9 tape: pmt - implement options command 2021-04-08 08:34:45 +02:00
5d6379f8db tape: implement locate_file without LOCATE(10) 2021-04-08 08:34:45 +02:00
566b946f9b tape: pmt - re-implement lock/unlock command 2021-04-08 07:28:30 +02:00
7f7459677d tape: pmt - re-implement fsr/bsr 2021-04-08 07:28:30 +02:00
0892a512bc tape: correctly set/display drive option 2021-04-08 07:28:30 +02:00
b717871d2a sgutils2: add scsi_mode_sense helper 2021-04-08 07:28:30 +02:00
7b11a8098d tape: make sure there is a filemark at the end of the tape 2021-04-08 07:28:30 +02:00
8b2c6f5dbc tape: make fsf/bsf driver specific
Because the virtual tape driver behaves different than LTO drives.
2021-04-08 07:28:30 +02:00
d26985a600 tape: fix LEOM handling 2021-04-08 07:28:30 +02:00
e29f456efc tape: implement format/erase 2021-04-08 07:28:30 +02:00
a79082a0dd tape: implement LTO userspace driver 2021-04-08 07:28:30 +02:00
1336ae8249 tape: introduce trait BlockWrite 2021-04-08 07:28:30 +02:00
0db5712493 tape: introduce trait BlockRead 2021-04-08 07:28:30 +02:00
c47609fedb server: rest: collapse nested if for less indentation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-07 17:57:46 +02:00
b84e8aaee9 server: rest: switch from fastest to default deflate compression level
I made some comparision with bombardier[0], the one listed here are
30s looped requests with two concurrent clients:

[ static download of ext-all.js ]:
  lvl                              avg /   stdev  / max
 none        1.98 MiB  100 %    5.17ms /  1.30ms / 32.38ms
 fastest   813.14 KiB   42 %   20.53ms /  2.85ms / 58.71ms
 default   626.35 KiB   30 %   39.70ms /  3.98ms / 85.47ms

[ deterministic (pre-defined data), but real API call ]:
  lvl                              avg /   stdev  / max
 none      129.09 KiB  100 %    2.70ms / 471.58us / 26.93ms
 fastest    42.12 KiB   33 %    3.47ms / 606.46us / 32.42ms
 default    34.82 KiB   27 %    4.28ms / 737.99us / 33.75ms

The reduction is quite better with default, but it's also slower, but
only when testing over unconstrained network. For real world
scenarios where compression actually matters, e.g., when using a
spotty train connection, we will be faster again with better
compression.

A GPRS limited connection (Firefox developer console) requires the
following load (until the DOMContentLoaded event triggered) times:
  lvl        t      x faster
 none      9m 18.6s   x 1.0
 fastest   3m 20.0s   x 2.8
 default   2m 30.0s   x 3.7

So for worst case using sligthly more CPU time on the server has a
tremendous effect on the client load time.

Using a more realistical example and limiting for "Good 2G" gives:

 none      1m  1.8s   x 1.0
 fastest      22.6s   x 2.7
 default      16.6s   x 3.7

16s is somewhat OK, >1m just isn't...

So, use default level to ensure we get bearable load times on
clients, and if we want to improve transmission size AND speed then
we could always use a in-memory cache, only a few MiB would be
required for the compressable static files we server.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-07 17:57:42 +02:00
d84e4073af tools/zip: compress zips with deflate
by using our DeflateEncoder

for this to work, we have to create wrapper reader that generates the crc32
checksum while reading.

also we need to put the target writer in an Option, so that we can take
it out of self and move it into the DeflateEncoder while writing
compressed

we can drop the internal buffer then, since that is managed by the
deflate encoder now

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
e8656da70d tools/zip: run rustfmt
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
59477ad252 server/rest: compress static files
compress them on the fly
and refactor the size limit for chunking files

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
2f29f1c765 server/rest: compress api calls
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
4d84e869bf server/rest: add helper to extract compression headers
for now we only extract 'deflate'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
79d841014e tools/compression: add DeflateEncoder and helpers
implements a deflate encoder that can compress anything that implements
AsyncRead + Unpin into a file with the helper 'compress'

if the inner type is a Stream, it implements Stream itself, this way
some streaming data can be streamed compressed

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
ea62611d8e tools: add compression module
only contains a basic enum for the different compresssion methods

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-04-07 12:34:31 +02:00
f3c867a034 docs: fix MathJax inclusion in build
Else it does not gets picked up on release builds...

Also the mathjax path option affects HTML not EPUB so move it to the
correct section in conf.py

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-05 11:59:45 +02:00
aae5db916e docs: fix heading
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-05 11:59:18 +02:00
a417c8a93e bump version to 1.0.13-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-02 15:32:27 +02:00
79e58a903e pxar: handle missing GROUP_OBJ ACL entries
Previously we did not store GROUP_OBJ ACL entries for
directories, this means that these were lost which may
potentially elevate group permissions if they were masked
before via ACLs, so we also show a warning.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-04-02 11:10:20 +02:00
9f40e09d0a pxar: fix directory ACL entry creation
Don't override `group_obj` with `None` when handling
`ACL_TYPE_DEFAULT` entries for directories.

Reproducer: /var/log/journal ends up without a `MASK` type
entry making it invalid as it has `USER` and `GROUP`
entries.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-04-02 10:22:04 +02:00
553e57f914 server/rest: drop now unused imports
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-01 11:53:13 +02:00
2200a38671 code cleanup: drop extra newlines at EOF
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-01 11:27:07 +02:00
ba39ab20fb server/rest: extract auth to seperate module
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-01 11:26:28 +02:00
ff8945fd2f proxmox_client_tools: move common key related functions to key_source.rs
Add a new module containing key-related functions and schemata from all
over, code moved is not changed as much as possible.

Requires adapting some 'use' statements across proxmox-backup-client and
putting the XDG helpers quite cozily into proxmox_client_tools/mod.rs

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-01 11:09:28 +02:00
4876393562 vsock_client: support authorization header
Pass in an optional auth tag, which will be passed as an Authorization
header on every subsequent call.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-01 11:09:28 +02:00
971bc6f94b vsock_client: remove some &mut restrictions and rustfmt
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-01 11:09:28 +02:00
cab92acb3c vsock_client: remove wrong comment
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-04-01 11:09:28 +02:00
a1d90719e4 bump pxar dep to 0.10.1
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-03-31 14:00:20 +02:00
eeff085d9d server/rest: fix type ambiguity
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-29 12:02:30 +02:00
d43c407a00 server/rest: rust format
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-29 08:17:26 +02:00
6bc87d3952 ui: verification job: fix subject of edit window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 16:57:00 +02:00
04c1c68f31 ui: verify job: fix subject of edit window
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 16:45:45 +02:00
94b17c804a ui: task descriptions: sort alphabetically
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 16:45:23 +02:00
94352256b7 ui: task descriptions: fix casing
Enforce title-case. Affects mostly the new tape related task
description.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 13:50:51 +02:00
b3bed7e41f docs: tape/pool: add backend/ui setting name for allocation policy
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 13:40:23 +02:00
a4672dd0b1 ui: tape/pool: set onlineHelp for edit/add window
To let users find the good explanation about allocation and retention
policies from the docs easier.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 13:29:02 +02:00
17bbcb57d7 ui: tape: retention/allocation are Policies, note so
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 13:28:36 +02:00
843146479a ui: gettext; s/blocksize/block size/
Blocksize is not a word in the English language

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-28 13:04:19 +02:00
cf1e117fc7 sgutils2: use enum for ScsiError
This avoids string allocation when we return SenseInfo.
2021-03-27 15:57:48 +01:00
03eac20b87 SgRaw: add do_in_command() 2021-03-27 15:38:08 +01:00
11f5d59396 tape: page-align BlockHeader so that we can use it with SG_IO 2021-03-27 15:36:35 +01:00
6f63c29306 Cargo.toml: fix: set version to 1.0.12 2021-03-26 14:14:12 +01:00
c0e365fd49 bump version to 1.0.12-1 2021-03-26 14:09:30 +01:00
93fb2e0d21 api2/types: add type_text to DATASTORE_MAP_FORMAT
This way we get a better rendering in the api-viewer.
before:
 [<string>, ... ]

after:
 [(<source>=)?<target>, ... ]

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-25 13:18:10 +01:00
c553407e98 tape: add --scan option for catalog restore 2021-03-25 13:08:34 +01:00
4830de408b tape: avoid writing catalogs for empty backup tasks 2021-03-25 12:50:40 +01:00
7f78528308 OnlineHelpInfo.js: new link for client-repository 2021-03-25 12:26:57 +01:00
2843ba9017 avoid compiler warning 2021-03-25 12:25:23 +01:00
e244b9d03d api2/types: expand DATASTORE_MAP_LIST_SCHEMA description
and give an example

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-25 12:18:14 +01:00
657c47db35 tape: ui: TapeRestore: make datastore mapping selectable
by adding a custom field (grid) where the user can select
a target datastore for each source datastore on tape

if we have not loaded the content of the media set yet,
we have to load it on window open to get the list of datastores
on the tape

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-25 12:17:46 +01:00
a32bb86df9 api subscription: drop old hack for api-macro issue
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-25 12:03:33 +01:00
654c56e05d docs: client benchmark: note that tls is only done if repo is set
and remove misleading note about no network involved in tls
speedtest, as normally there is!

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-25 10:33:45 +01:00
589c4dad9e tape: add fsf/bsf to TapeDriver trait 2021-03-25 10:10:16 +01:00
0320deb0a9 proxmox-tape: fix clean api call
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-25 08:14:13 +01:00
4c4e5c2b1e api2/tape/restore: enable restore mapping of datastores
by changing the 'store' parameter of the restore api call to a
list of mappings (or a single default datastore)

for example giving:
a=b,c=d,e

would restore
datastore 'a' from tape to local datastore 'b'
datastore 'c' from tape to local datastore 'e'
all other datastores to 'e'

this way, only a single datastore can also be restored, by only
giving a single mapping, e.g. 'a=b'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-25 07:46:12 +01:00
924373d2df client/backup_writer: clarify backup and upload size
The text 'had to upload [KMG]iB' implies that this is the size we
actually had to send to the server, while in reality it is the
raw data size before compression.

Count the size of the compressed chunks and print it separately.
Split the average speed into its own line so they do not get too long.

Rename 'uploaded' into 'size_dirty' and 'vsize_h' into 'size'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-24 18:24:56 +01:00
3b60b5098f client/backup_writer: introduce UploadStats struct
instead of using a big anonymous tuple. This way the returned values
are properly named.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-24 18:24:56 +01:00
4abb3edd9f docs: fix horizontal scrolling issues on desktop and mobile
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-24 18:24:39 +01:00
932e69a837 docs: improve navigation coloring on mobile
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-24 18:24:20 +01:00
ef6d49670b client: backup writer: run rustfmt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-24 17:12:05 +01:00
52ea00e9df docs: only apply toctree color override to sidebar one
else the TOC on the index page has some white text on white back
ground

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-24 17:09:30 +01:00
870681013a tape: fix catalog restore
We need to rewind the tape if fast_catalog_restore() fail ...
2021-03-24 10:09:23 +01:00
c046739461 tape: fix MediaPool regression tests 2021-03-24 09:44:30 +01:00
8b1289f3e4 tape: skip catalog archives in restore 2021-03-24 09:33:39 +01:00
f1d76ecf6c fix #3359: fix blocking writes in async code during pxar create
in commit `asyncify pxar create_archive`, we changed from a
separate thread for creating a pxar to using async code, but the
StdChannelWriter used for both pxar and catalog can block, which
may block the tokio runtime for single (and probably dual) core
environments

this patch adds a wrapper struct for any writer that implements
'std::io::Write' and wraps the write calls with 'block_in_place'
so that if called in a tokio runtime, it knows that this code
potentially blocks

Fixes: 6afb60abf5 ("asyncify pxar create_archive")

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-24 09:00:07 +01:00
074503f288 tape: implement fast catalog restore 2021-03-24 08:40:34 +01:00
c6f55139f8 tape: impl. MediaCatalog::parse_catalog_header
This is just an optimization, avoiding to read the catalog into memory.

We also expose create_temporary_database_file() now (will be
used for catalog restore).
2021-03-24 06:32:59 +01:00
20cc25d749 tape: add TapeDriver::move_to_last_file 2021-03-24 06:32:59 +01:00
30316192b3 tape: improve locking (lock media-sets)
- new helper: lock_media_set()

- MediaPool: lock media set

- Expose Inventory::new() to avoid double loading

- do not lock pool on restore (only lock media-set)

- change pool lock name to ".pool-{name}"
2021-03-24 06:32:59 +01:00
e93263be1e taoe: implement MediaCatalog::destroy_unrelated_catalog() helper 2021-03-22 12:03:11 +01:00
2ab2ca9c24 tape: add MediaPool::lock_unassigned_media_pool() helper 2021-03-19 10:13:38 +01:00
54fcb7f5d8 api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
so that a user can schedule multiple backup jobs onto a single
media pool without having to consider timing them apart

this makes sense since we can backup multiple datastores onto
the same media-set but can only specify one datastore per backup job

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-19 09:04:32 +01:00
4abd4dbe38 api2/tape/backup: include a summary on notification e-mails
for now only contains the list of included snapshots (if any),
as well as the backup duration

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-19 09:03:52 +01:00
eac1beef3c tape: cleanup PoolWriter - factor out common code 2021-03-19 08:56:14 +01:00
166a48f903 tape: cleanup - split PoolWriter into several files 2021-03-19 08:19:13 +01:00
82775c4764 tape: make sure we only commit/write valid catalogs 2021-03-19 07:50:32 +01:00
88bc9635aa tape: store media_uuid in PoolWriterState
This is mainly a cleanup, avoiding to access the catalog_set to get the uuid.
2021-03-19 07:33:59 +01:00
1037f2bc2d tape: cleanup - rename CatalogBuilder to CatalogSet 2021-03-19 07:22:54 +01:00
f24cbee77d server/email_notifications: do not double html escape
the default escape handler is handlebars::html_escape, but this are
plain text emails and we manually escape them for the html part, so
set the default escape handler to 'no_escape'

this avoids double html escape for the characters: '&"<>' in emails

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-19 07:02:49 +01:00
25b4d52dce server/email_notifications: do not panic on template registration
instead print an error and continue, the rendering functions will error
out if one of the templates could not be registered

if we `.unwrap()` here, it can lead to problems if the templates are
not correct, i.e. we could panic while holding a lock, if something holds
a mutex while this is called for the first time

add a test to catch registration issues during package build

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-19 07:02:17 +01:00
2729d134bd tools/systemd/time: implement some Traits for TimeSpan
namely
* From<Duration> (to convert easily from duration to timespan)
* Display (for better formatting)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-19 07:00:55 +01:00
32b75d36a8 tape: backup media catalogs 2021-03-19 06:58:46 +01:00
c4430a937d bump version to 1.0.11-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-18 12:36:28 +01:00
237314ad0d tape: improve catalog consistency checks
Try to check if we read the correct catalog by verifying uuid, media_set_uuid
and seq_nr.

Note: this changes the catalog format again.
2021-03-18 08:43:55 +01:00
caf76ec592 tools/subscription: ignore ENOENT for apt auth config removal
deleting a nonexistant file is hardly an error worth mentioning

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-17 20:12:58 +01:00
0af8c26b74 ui: tape/BackupOverview: insert a datastore level
since we can now backup multiple datastores in the same media-set,
we show the datastores as first level below that

the final tree structucture looks like this:

tapepool A
- media set 1
 - datastore I
  - tape x
   - ct/100
    - ct/100/2020-01-01T00:00:00Z

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-17 13:37:49 +01:00
825dfe7e0d ui: tape/DriveStatus: fix updating pointer+click handler on info widget
we can only do this after it is rendered, the element does not exist
before

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-17 13:37:39 +01:00
30a0809553 ui: tape/DriveStatus: add erase button
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-17 13:37:17 +01:00
6ee3035523 tape: define magic number for catalog archives 2021-03-17 13:35:23 +01:00
b627ebbf40 tape: improve catalog parser 2021-03-17 11:29:23 +01:00
ef4bdf6b8b tape: proxmox-tape media content - add 'store' attribute 2021-03-17 11:17:54 +01:00
54722acada tape: store datastore name in tape archives and media catalog
So that we can store multiple datastores on a single media set.
Deduplication is now per datastore (not per media set).
2021-03-17 11:08:51 +01:00
0e2bf3aa1d SnapshotReader: add self.datastore_name() helper 2021-03-17 10:16:34 +01:00
365126efa9 tape: PoolWriter - remove unnecessary move_to_eom 2021-03-17 10:16:34 +01:00
03d4c9217d update OnlineHelpInfo.js 2021-03-17 10:16:34 +01:00
8498290848 docs: technically not everything is in rust/js
I mean the whole distro uses quite some C and the like as base, so
avoid being overly strict here.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-16 20:37:16 +01:00
654db565cb docs: features: mention that there are no client/data limits
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-16 20:37:16 +01:00
51f83548ed docs: drop uncommon spelled out GCM
It does not help users if that is spelled out, and its not a common
use of GCM, and especially in the AES 256 context its clear what is
meant. The link to Wikipedia stays, so interested people can still
read up on it and others get a better overview due to the text being
more concise.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-16 20:37:16 +01:00
5847a6bdb5 docs: fix linking, avoid over long text in main feature list
The main feature list should provide a short overview of the, well,
main features. While enterprise support *is* a main and important
feature, it's not the place here to describe things like personal
volume/ngo/... offers and the like.

Move parts of it to getting help, which lacked mentioning the
enterprise support too and is a good place to describe the customer
portal.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-16 20:37:16 +01:00
313e5e2047 docs: mention support subscription plans
and change enterprise repository section to present tense.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2021-03-16 19:24:23 +01:00
7914e62b10 tools/zip: only add zip64 field when necessary
if neither offset nor size exceeds 32bit, do not add the
zip64 extension field

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-16 09:13:39 +01:00
84d3284609 ui: tape/DriveStatus: open task window on click on state
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-16 09:00:07 +01:00
70fab5b46e ui: tape: convert slot selection on transfer to combogrid
this is much handier than number field, and the user can instantly
see which one is an import/export slot

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-16 08:57:48 +01:00
e36135031d ui: tape/Restore: let the user choose an owner
so that the tape backup can be restored as any user, given
the current logged in user has the correct permission.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-16 08:55:42 +01:00
5a5ee0326e proxmox-tape: add missing notify-user to 'proxmox-tape restore'
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-16 08:54:38 +01:00
776dabfb2e tape: use MB/s for backup speed (to match drive speed specification) 2021-03-16 08:51:49 +01:00
5c4755ad08 tape: speedup backup by doing read/write in parallel 2021-03-16 08:51:49 +01:00
7c1666289d tools/zip: add missing start_disk field for zip64 extension
it is not optional, even though we give the size explicitely

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-03-15 12:36:40 +01:00
cded320e92 backup info: run rustfmt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-03-14 19:18:35 +01:00
b31cdec225 update to pxar 0.10
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-03-12 10:48:09 +01:00
591b120d35 fix feature flag logic in pxar create
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-03-12 10:17:51 +01:00
e8913fea12 tape: write_chunk_archive - do not consume partially written chunk at EOT
So that it is re-written to the next tape.
2021-03-12 07:14:50 +01:00
179 changed files with 11044 additions and 4627 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "1.0.10"
version = "1.1.3"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -29,7 +29,10 @@ bitflags = "1.2.1"
bytes = "1.0"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.7"
flate2 = "1.0"
anyhow = "1.0"
thiserror = "1.0"
futures = "0.3"
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
@ -48,11 +51,11 @@ percent-encoding = "2.1"
pin-utils = "0.1.0"
pin-project = "1.0"
pathpatterns = "0.1.2"
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox = { version = "0.11.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox-fuse = "0.1.1"
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
regex = "1.2"
rustyline = "7"
@ -60,10 +63,10 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
siphasher = "0.3"
syslog = "4.0"
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio = { version = "1.0", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.6", features = [ "codec" ] }
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0"
udev = ">= 0.3, <0.5"
url = "2.1"

View File

@ -9,6 +9,7 @@ SUBDIRS := etc www docs
# Binaries usable by users
USR_BIN := \
proxmox-backup-client \
proxmox-file-restore \
pxar \
proxmox-tape \
pmtx \
@ -25,6 +26,10 @@ SERVICE_BIN := \
proxmox-backup-proxy \
proxmox-daily-update
# Single file restore daemon
RESTORE_BIN := \
proxmox-restore-daemon
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
@ -39,7 +44,7 @@ endif
CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
export DEB_VERSION DEB_VERSION_UPSTREAM
@ -47,9 +52,12 @@ SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
RESTORE_DEB=proxmox-backup-file-restore_${DEB_VERSION}_${ARCH}.deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
${RESTORE_DEB} ${RESTORE_DBG_DEB}
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
@ -117,8 +125,8 @@ clean:
find . -name '*~' -exec rm {} ';'
.PHONY: dinstall
dinstall: ${DEBS}
dpkg -i ${DEBS}
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
dpkg -i $^
# make sure we build binaries before docs
docs: cargo-build
@ -144,6 +152,9 @@ install: $(COMPILED_BINS)
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
$(foreach i,$(RESTORE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
# install sg-tape-cmd as setuid binary
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
@ -152,8 +163,10 @@ install: $(COMPILED_BINS)
$(MAKE) -C docs install
.PHONY: upload
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg" --dist buster
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster

119
debian/changelog vendored
View File

@ -1,3 +1,122 @@
rust-proxmox-backup (1.1.3-1) unstable; urgency=medium
* tape restore: improve datastore locking when GC runs at the same time
* tape restore: always do quick chunk verification
* tape: improve compatibillity with some changers
* tape: work-around missing format command on LTO-4 drives, fall-back to
slower rewind erease
* fix #3393: pxar: allow and safe the 'security.NTACL' extended attribute
* file-restore: support encrypted VM backups
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Apr 2021 20:14:58 +0200
rust-proxmox-backup (1.1.2-1) unstable; urgency=medium
* backup verify: always re-check if we can skip a chunk in the actual verify
loop.
* tape: do not try to backup unfinished backups
-- Proxmox Support Team <support@proxmox.com> Thu, 15 Apr 2021 13:26:52 +0200
rust-proxmox-backup (1.1.1-1) unstable; urgency=medium
* docs: include tape in table of contents
* docs: tape: improve definition-list format and add screenshots
* docs: reorder maintenance and network chapters after client-usage/tools
chapters
* ui: tape changer status: add Format button to drive grid
* backup/verify: improve speed on disks with slow random-IO (spinners) by
iterating over chunks sorted by inode
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Apr 2021 14:50:29 +0200
rust-proxmox-backup (1.1.0-1) unstable; urgency=medium
* enable tape backup as technology preview by default
* tape: read drive status: clear deferred error or media changed events.
* tape: improve end-of-tape (EOT) error handling
* tape: cleanup media catalog on tape reuse
* zfs: re-use underlying pool wide IO stats for datasets
* api daemon: only log error from accepting new connections to avoid opening
to many file descriptors
* api/datastore: allow downloading the entire archive as ZIP archive, not
only sub-paths
-- Proxmox Support Team <support@proxmox.com> Tue, 13 Apr 2021 14:42:18 +0200
rust-proxmox-backup (1.0.14-1) unstable; urgency=medium
* server: compress API call response and static files if client accepts that
* compress generated ZIP archives with deflate
* tape: implement LTO userspace driver
* docs: mention new user space tape driver, adopt device path names
* tape: always clear encryption key after backup (for security reasons)
* ui: improve changer status view
* add proxmox-file-restore package, providing a central file-restore binary
with preparations for restoring files also from block level backups using
QEMU for a safe encapsulation.
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Apr 2021 16:35:11 +0200
rust-proxmox-backup (1.0.13-1) unstable; urgency=medium
* pxar: improve handling ACL entries on create and restore
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Apr 2021 15:32:01 +0200
rust-proxmox-backup (1.0.12-1) unstable; urgency=medium
* tape: write catalogs to tape (speedup catalog restore)
* tape: add --scan option for catalog restore
* tape: improve locking (lock media-sets)
* tape: ui: enable datastore mappings
* fix #3359: fix blocking writes in async code during pxar create
* api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
* docu improvements
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Mar 2021 14:08:47 +0100
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
* fix feature flag logic in pxar create
* tools/zip: add missing start_disk field for zip64 extension to improve
compatibility with some strict archive tools
* tape: speedup backup by doing read/write in parallel
* tape: store datastore name in tape archives and media catalog
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Mar 2021 12:36:01 +0100
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
* tape: improve MediaPool allocation by sorting tapes by creation time and

32
debian/control vendored
View File

@ -15,6 +15,8 @@ Build-Depends: debhelper (>= 11),
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.7+default-dev,
librust-flate2-1+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
@ -36,13 +38,13 @@ Build-Depends: debhelper (>= 11),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-1+default-dev,
librust-pin-utils-0.1+default-dev,
librust-proxmox-0.11+api-macro-dev,
librust-proxmox-0.11+default-dev,
librust-proxmox-0.11+sortable-macro-dev,
librust-proxmox-0.11+websocket-dev,
librust-proxmox-0.11+api-macro-dev (>= 0.11.1-~~),
librust-proxmox-0.11+default-dev (>= 0.11.1-~~),
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.1-~~),
librust-proxmox-0.11+websocket-dev (>= 0.11.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-pxar-0.9+default-dev,
librust-pxar-0.9+tokio-io-dev,
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-7+default-dev,
librust-serde-1+default-dev,
@ -50,8 +52,10 @@ Build-Depends: debhelper (>= 11),
librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev,
librust-thiserror-1+default-dev,
librust-tokio-1+default-dev,
librust-tokio-1+fs-dev,
librust-tokio-1+io-std-dev,
librust-tokio-1+io-util-dev,
librust-tokio-1+macros-dev,
librust-tokio-1+net-dev,
@ -65,6 +69,7 @@ Build-Depends: debhelper (>= 11),
librust-tokio-stream-0.1+default-dev,
librust-tokio-util-0.6+codec-dev,
librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
@ -109,14 +114,12 @@ Depends: fonts-font-awesome,
libsgutils2-2,
libzstd1 (>= 1.3.8),
lvm2,
mt-st,
mtx,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
proxmox-widget-toolkit (>= 2.5-1),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -146,3 +149,14 @@ Depends: libjs-extjs,
Architecture: all
Description: Proxmox Backup Documentation
This package contains the Proxmox Backup Documentation files.
Package: proxmox-backup-file-restore
Architecture: any
Depends: ${misc:Depends},
${shlibs:Depends},
Recommends: pve-qemu-kvm (>= 5.0.0-9),
proxmox-backup-restore-image,
Description: Proxmox Backup single file restore tools for pxar and block device backups
This package contains the Proxmox Backup single file restore client for
restoring individual files and folders from both host/container and VM/block
device backups. It includes a block device restore driver using QEMU.

15
debian/control.in vendored
View File

@ -6,14 +6,12 @@ Depends: fonts-font-awesome,
libsgutils2-2,
libzstd1 (>= 1.3.8),
lvm2,
mt-st,
mtx,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.3-6),
proxmox-widget-toolkit (>= 2.5-1),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -43,3 +41,14 @@ Depends: libjs-extjs,
Architecture: all
Description: Proxmox Backup Documentation
This package contains the Proxmox Backup Documentation files.
Package: proxmox-backup-file-restore
Architecture: any
Depends: ${misc:Depends},
${shlibs:Depends},
Recommends: pve-qemu-kvm (>= 5.0.0-9),
proxmox-backup-restore-image,
Description: Proxmox Backup single file restore tools for pxar and block device backups
This package contains the Proxmox Backup single file restore client for
restoring individual files and folders from both host/container and VM/block
device backups. It includes a block device restore driver using QEMU.

10
debian/postinst vendored
View File

@ -48,6 +48,16 @@ case "$1" in
/etc/proxmox-backup/remote.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '1.0.14-1'; then
# FIXME: Remove with 2.0
if grep -s -q -P -e '^linux:' /etc/proxmox-backup/tape.cfg; then
echo "========="
echo "= NOTE: You have now unsupported 'linux' tape drives configured."
echo "= * Execute 'udevadm control --reload-rules && udevadm trigger' to update /dev"
echo "= * Edit '/etc/proxmox-backup/tape.cfg', remove 'linux' entries and re-add over CLI/GUI"
echo "========="
fi
fi
# FIXME: remove with 2.0
if [ -d "/var/lib/proxmox-backup/tape" ] &&
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then

View File

@ -0,0 +1 @@
debian/proxmox-file-restore.bc proxmox-file-restore

8
debian/proxmox-backup-file-restore.bc vendored Normal file
View File

@ -0,0 +1,8 @@
# proxmox-file-restore bash completion
# see http://tiswww.case.edu/php/chet/bash/FAQ
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
# this modifies global var, but I found no better way
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
complete -C 'proxmox-file-restore bashcomplete' proxmox-file-restore

View File

@ -0,0 +1,4 @@
usr/bin/proxmox-file-restore
usr/share/man/man1/proxmox-file-restore.1
usr/share/zsh/vendor-completions/_proxmox-file-restore
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon

64
debian/proxmox-backup-file-restore.postinst vendored Executable file
View File

@ -0,0 +1,64 @@
#!/bin/sh
set -e
update_initramfs() {
# regenerate initramfs for single file restore VM
INST_PATH="/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
CACHE_PATH="/var/cache/proxmox-backup/file-restore-initramfs.img"
# cleanup first, in case proxmox-file-restore was uninstalled since we do
# not want an unuseable image lying around
rm -f "$CACHE_PATH"
if [ ! -f "$INST_PATH/initramfs.img" ]; then
echo "proxmox-backup-restore-image is not installed correctly, skipping update" >&2
exit 0
fi
echo "Updating file-restore initramfs..."
# avoid leftover temp file
cleanup() {
rm -f "$CACHE_PATH.tmp"
}
trap cleanup EXIT
mkdir -p "/var/cache/proxmox-backup"
cp "$INST_PATH/initramfs.img" "$CACHE_PATH.tmp"
# cpio uses passed in path as offset inside the archive as well, so we need
# to be in the same dir as the daemon binary to ensure it's placed in /
( cd "$INST_PATH"; \
printf "./proxmox-restore-daemon" \
| cpio -o --format=newc -A -F "$CACHE_PATH.tmp" )
mv -f "$CACHE_PATH.tmp" "$CACHE_PATH"
trap - EXIT
}
case "$1" in
configure)
# in case restore daemon was updated
update_initramfs
;;
triggered)
if [ "$2" = "proxmox-backup-restore-image-update" ]; then
# in case base-image was updated
update_initramfs
else
echo "postinst called with unknown trigger name: \`$2'" >&2
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
exit 0

View File

@ -0,0 +1 @@
interest-noawait proxmox-backup-restore-image-update

18
debian/proxmox-backup-server.udev vendored Normal file
View File

@ -0,0 +1,18 @@
# do not edit this file, it will be overwritten on update
# persistent storage links: /dev/tape/{by-id,by-path}
ACTION=="remove", GOTO="persistent_storage_tape_end"
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_tape_end"
# also see: /lib/udev/rules.d/60-persistent-storage-tape.rules
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", IMPORT{program}="scsi_id --sg-version=3 --export --whitelisted -d $devnode", \
SYMLINK+="tape/by-id/scsi-$env{ID_SERIAL}-sg"
# iSCSI devices from the same host have all the same ID_SERIAL,
# but additionally a property named ID_SCSI_SERIAL.
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SERIAL}=="?*", \
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
LABEL="persistent_storage_tape_end"

7
debian/rules vendored
View File

@ -52,8 +52,11 @@ override_dh_dwz:
override_dh_strip:
dh_strip
for exe in $$(find debian/proxmox-backup-client/usr \
debian/proxmox-backup-server/usr -executable -type f); do \
for exe in $$(find \
debian/proxmox-backup-client/usr \
debian/proxmox-backup-server/usr \
debian/proxmox-backup-file-restore \
-executable -type f); do \
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
done

View File

@ -5,6 +5,7 @@ GENERATED_SYNOPSIS := \
proxmox-backup-client/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-file-restore/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
pmt/synopsis.rst \
@ -25,7 +26,8 @@ MAN1_PAGES := \
proxmox-tape.1 \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1
proxmox-backup-manager.1 \
proxmox-file-restore.1
MAN5_PAGES := \
media-pool.cfg.5 \
@ -179,6 +181,12 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
rst2man $< >$@
proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
${COMPILEDIR}/proxmox-file-restore printdoc > proxmox-file-restore/synopsis.rst
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
rst2man $< >$@
.PHONY: onlinehelpinfo
onlinehelpinfo:
@echo "Generating OnlineHelpInfo.js..."

View File

@ -143,7 +143,7 @@ Ext.onReady(function() {
permhtml += "</div></div>";
} else {
//console.log(permission);
permhtml += "Unknown systax!";
permhtml += "Unknown syntax!";
}
return permhtml;

View File

@ -3,9 +3,10 @@ Backup Client Usage
The command line client is called :command:`proxmox-backup-client`.
.. _client_repository:
Repository Locations
--------------------
Backup Repository Locations
---------------------------
The client uses the following notation to specify a datastore repository
on the backup server.
@ -471,7 +472,7 @@ located in ``/etc``, you could do the following:
pxar:/ > restore target/ --pattern etc/**/*.conf
...
The above will scan trough all the directories below ``/etc`` and restore all
The above will scan through all the directories below ``/etc`` and restore all
files ending in ``.conf``.
.. todo:: Explain interactive restore in more detail
@ -691,8 +692,15 @@ Benchmarking
------------
The backup client also comes with a benchmarking tool. This tool measures
various metrics relating to compression and encryption speeds. You can run a
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
various metrics relating to compression and encryption speeds. If a Proxmox
Backup repository (remote or local) is specified, the TLS upload speed will get
measured too.
You can run a benchmark using the ``benchmark`` subcommand of
``proxmox-backup-client``:
.. note:: The TLS speed test is only included if a :ref:`backup server
repository is specified <client_repository>`.
.. code-block:: console
@ -723,8 +731,7 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
.. note:: The percentages given in the output table correspond to a
comparison against a Ryzen 7 2700X. The TLS test connects to the
local host, so there is no network involved.
comparison against a Ryzen 7 2700X.
You can also pass the ``--output-format`` parameter to output stats in ``json``,
rather than the default table format.

View File

@ -6,6 +6,11 @@ Command Line Tools
.. include:: proxmox-backup-client/description.rst
``proxmox-file-restore``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-file-restore/description.rst
``proxmox-backup-manager``
~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -26,6 +26,27 @@ Those command are available when you start an interactive restore shell:
.. include:: proxmox-backup-manager/synopsis.rst
``proxmox-tape``
----------------
.. include:: proxmox-tape/synopsis.rst
``pmt``
-------
.. include:: pmt/options.rst
....
.. include:: pmt/synopsis.rst
``pmtx``
--------
.. include:: pmtx/synopsis.rst
``pxar``
--------

View File

@ -49,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
extensions = ["sphinx.ext.graphviz", 'sphinx.ext.mathjax', "sphinx.ext.todo", "proxmox-scanrefs"]
todo_link_only = True
@ -307,6 +307,9 @@ html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProxmoxBackupdoc'
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'xelatex'
@ -464,6 +467,3 @@ epub_exclude_files = ['search.html']
# If false, no index is generated.
#
# epub_use_index = True
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"

View File

@ -1,4 +1,4 @@
Each drive configuration section starts with a header ``linux: <name>``,
Each LTO drive configuration section starts with a header ``lto: <name>``,
followed by the drive configuration options.
Tape changer configurations starts with ``changer: <name>``,
@ -6,7 +6,7 @@ followed by the changer configuration options.
::
linux: hh8
lto: hh8
changer sl3
path /dev/tape/by-id/scsi-10WT065325-nst

View File

@ -37,8 +37,53 @@ Options
.. include:: config/datastore/config.rst
``media-pool.cfg``
~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/media-pool/format.rst
Options
^^^^^^^
.. include:: config/media-pool/config.rst
``tape.cfg``
~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/tape/format.rst
Options
^^^^^^^
.. include:: config/tape/config.rst
``tape-job.cfg``
~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/tape-job/format.rst
Options
^^^^^^^
.. include:: config/tape-job/config.rst
``user.cfg``
~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~
File Format
^^^^^^^^^^^

View File

@ -57,6 +57,11 @@ div.sphinxsidebar h3 {
div.sphinxsidebar h1.logo-name {
display: none;
}
div.document, div.footer {
width: min(100%, 1320px);
}
@media screen and (max-width: 875px) {
div.sphinxsidebar p.logo {
display: initial;
@ -65,9 +70,19 @@ div.sphinxsidebar h1.logo-name {
display: block;
}
div.sphinxsidebar span {
color: #AAA;
color: #EEE;
}
ul li.toctree-l1 > a {
.sphinxsidebar ul li.toctree-l1 > a, div.sphinxsidebar a {
color: #FFF;
}
div.sphinxsidebar {
background-color: #555;
}
div.body {
min-width: 300px;
}
div.footer {
display: block;
margin: 15px auto 0px auto;
}
}

View File

@ -61,9 +61,7 @@ attacker gains access to the server or any point of the network, they will not
be able to read the data.
.. note:: Encryption is not enabled by default. To set up encryption, see the
`Encryption
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
of the Proxmox Backup Server Administration Guide.
:ref:`backup client encryption section <client_encryption>`.
Is the backup incremental/deduplicated?

View File

@ -112,6 +112,18 @@ The administration menu item also contains a disk management subsection:
* **Directory**: Create and view information on *ext4* and *xfs* disks
* **ZFS**: Create and view information on *ZFS* disks
Tape Backup
^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
:align: right
:alt: Tape Backup: Tape changer overview
The `Tape Backup`_ section contains a top panel, managing tape media sets,
inventories, drives, changers and the tape backup jobs itself.
It also contains a subsection per standalone drive and per changer, with a
status and management view for those devices.
Datastore
^^^^^^^^^

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

View File

@ -25,14 +25,15 @@ in the section entitled "GNU Free Documentation License".
terminology.rst
gui.rst
storage.rst
network-management.rst
user-management.rst
managing-remotes.rst
maintenance.rst
backup-client.rst
pve-integration.rst
pxar-tool.rst
tape-backup.rst
managing-remotes.rst
maintenance.rst
sysadmin.rst
network-management.rst
technical-overview.rst
faq.rst

View File

@ -113,9 +113,9 @@ Client Installation
Install `Proxmox Backup`_ Client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages to be installed on
top of a standard Debian installation. After configuring the
:ref:`sysadmin_package_repositories`, you need to run:
Proxmox ships as a set of Debian packages to be installed on top of a standard
Debian installation. After configuring the :ref:`package_repositories_client_only_apt`,
you need to run:
.. code-block:: console
@ -123,12 +123,6 @@ top of a standard Debian installation. After configuring the
# apt-get install proxmox-backup-client
Installing from source
~~~~~~~~~~~~~~~~~~~~~~
.. note:: The client-only repository should be usable by most recent Debian and
Ubuntu derivatives.
.. todo:: Add section "Installing from source"
Installing statically linked binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. todo:: Add section "Installing statically linked binary"

View File

@ -65,10 +65,10 @@ Main Features
:Compression: The ultra-fast Zstandard_ compression is able to compress
several gigabytes of data per second.
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
provides very high performance on modern hardware. In addition to client-side
encryption, all data is transferred via a secure TLS connection.
:Encryption: Backups can be encrypted on the client-side, using AES-256 GCM_.
This authenticated encryption (AE_) mode provides very high performance on
modern hardware. In addition to client-side encryption, all data is
transferred via a secure TLS connection.
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
user interface.
@ -76,8 +76,16 @@ Main Features
:Open Source: No secrets. Proxmox Backup Server is free and open-source
software. The source code is licensed under AGPL, v3.
:Support: Enterprise support will be available from `Proxmox`_ once the beta
phase is over.
:No Limits: Proxmox Backup Server has no artificial limits for backup storage or
backup-clients.
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
form of `Proxmox Backup Server Subscription Plans
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
subscription level get access to the Proxmox Backup :ref:`Enterprise
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
Standard or Premium subscription, users have access to the :ref:`Proxmox
Customer Portal <get_help_enterprise_support>`.
Reasons for Data Backup?
@ -117,8 +125,8 @@ Proxmox Backup Server consists of multiple components:
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
any `Linux amd64` environment
Aside from the web interface, everything is written in the Rust programming
language.
Aside from the web interface, most parts of Proxmox Backup Server are written in
the Rust programming language.
"The Rust programming language helps you write faster, more reliable software.
High-level ergonomics and low-level control are often at odds in programming
@ -134,6 +142,17 @@ language.
Getting Help
------------
.. _get_help_enterprise_support:
Enterprise Support
~~~~~~~~~~~~~~~~~~
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
`Proxmox Customer Portal <https://my.proxmox.com>`_. The customer portal
provides support with guaranteed response times from the Proxmox developers.
For more information or for volume discounts, please contact office@proxmox.com.
Community Support Forum
~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -148,7 +148,7 @@ are checked again. The interface for creating verify jobs can be found under the
**Verify Jobs** tab of the datastore.
.. Note:: It is recommended that you reverify all backups at least monthly, even
if a previous verification was successful. This is becuase physical drives
if a previous verification was successful. This is because physical drives
are susceptible to damage over time, which can cause an old, working backup
to become corrupted in a process known as `bit rot/data degradation
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to

View File

@ -29,6 +29,8 @@ update``.
In addition, you need a package repository from Proxmox to get Proxmox Backup
updates.
.. _package_repos_secure_apt:
SecureApt
~~~~~~~~~
@ -69,10 +71,12 @@ Here, the output should be:
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
.. _sysadmin_package_repos_enterprise:
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This will be the default, stable, and recommended repository. It is available for
This is the stable, recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
@ -137,3 +141,40 @@ You can access this repository by adding the following line to
:caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs buster pbstest
.. _package_repositories_client_only:
Proxmox Backup Client-only Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
on systems using a Linux distribution not based on Proxmox projects, you can
use the client-only repository.
Currently there's only a client-repository for APT based systems.
.. _package_repositories_client_only_apt:
APT-based Proxmox Backup Client Repository
++++++++++++++++++++++++++++++++++++++++++
For modern Linux distributions using `apt` as package manager, like all Debian
and Ubuntu Derivative do, you may be able to use the APT-based repository.
This repository is tested with:
- Debian Buster
- Ubuntu 20.04 LTS
It may work with older, and should work with more recent released versions.
In order to configure this repository you need to first :ref:`setup the Proxmox
release key <package_repos_secure_apt>`. After that, add the repository URL to
the APT sources lists.
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
snipped
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://download.proxmox.com/debian/pbs-client buster main

View File

@ -13,39 +13,3 @@ parameter. It accepts the following values:
:``json``: JSON (single line).
:``json-pretty``: JSON (multiple lines, nicely formatted).
Device driver options can be specified as integer numbers (see
``/usr/include/linux/mtio.h``), or using symbolic names:
:``buffer-writes``: Enable buffered writes
:``async-writes``: Enable async writes
:``read-ahead``: Use read-ahead for fixed block size
:``debugging``: Enable debugging if compiled into the driver
:``two-fm``: Write two file marks when closing the file
:``fast-mteom``: Space directly to eod (and lose file number)
:``auto-lock``: Automatically lock/unlock drive door
:``def-writes``: Defaults are meant only for writes
:``can-bsr``: Indicates that the drive can space backwards
:``no-blklims``: Drive does not support read block limits
:``can-partitions``: Drive can handle partitioned tapes
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
:``sysv``: Enable the System V semantics
:``nowait``: Do not wait for rewind, etc. to complete
:``sili``: Enables setting the SILI bit in SCSI commands when reading
in variable block mode to enhance performance when reading blocks
shorter than the byte count

View File

@ -0,0 +1,3 @@
Command line tool for restoring files and directories from PBS archives. In contrast to
proxmox-backup-client, this supports both container/host and VM backups.

View File

@ -0,0 +1,28 @@
==========================
proxmox-file-restore
==========================
.. include:: ../epilog.rst
-----------------------------------------------------------------------
Command line tool for restoring files and directories from PBS archives
-----------------------------------------------------------------------
:Author: |AUTHOR|
:Version: Version |VERSION|
:Manual section: 1
Synopsis
==========
.. include:: synopsis.rst
Description
============
.. include:: description.rst
.. include:: ../pbs-copyright.rst

View File

@ -3,6 +3,26 @@
`Proxmox VE`_ Integration
-------------------------
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
former as a storage in a Proxmox VE standalone or cluster setup.
See also the `Proxmox VE Storage - Proxmox Backup Server
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
Using the Proxmox VE Web-Interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox VE has native API and web-interface integration of Proxmox Backup
Server since the `Proxmox VE 6.3 release
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
Using the Proxmox VE Command-Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
node. The following example uses ``store2`` as storage name, and
assumes the server address is ``localhost``, and you want to connect
@ -41,9 +61,9 @@ After that you should be able to see storage status with:
Name Type Status Total Used Available %
store2 pbs active 3905109820 1336687816 2568422004 34.23%
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
containers in the same way you would for any other storage device within the
environment (see `PVE Admin Guide: Backup and Restore
Having added the Proxmox Backup Server datastore to `Proxmox VE`_, you can
backup VMs and containers in the same way you would for any other storage
device within the environment (see `Proxmox VE Admin Guide: Backup and Restore
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.

View File

@ -1,5 +1,5 @@
Storage
=======
Backup Storage
==============
.. _storage_disk_management:

View File

@ -4,12 +4,11 @@ Tape Backup
===========
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
production use. To enable it in the GUI, you need to issue the
following command (as root user on the console):
production use.
.. code-block:: console
# touch /etc/proxmox-backup/tape.cfg
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
:align: right
:alt: Tape Backup: Tape changer overview
Proxmox tape backup provides an easy way to store datastore content
onto magnetic tapes. This increases data safety because you get:
@ -59,7 +58,7 @@ In general, LTO tapes offer the following advantages:
- Cold Media
- Movable (storable inside vault)
- Multiple vendors (for both media and drives)
- Build in AES-GCM Encryption engine
- Built in AES-GCM Encryption engine
Note that `Proxmox Backup Server` already stores compressed data, so using the
tape compression feature has no advantage.
@ -69,12 +68,16 @@ Supported Hardware
------------------
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
or later. In general, all SCSI-2 tape drives supported by the Linux
kernel should work, but features like hardware encryption need LTO-4
or later.
Tape changing is carried out using the Linux 'mtx' command line
tool, so any changer device supported by this tool should work.
Tape changing is carried out using the SCSI Medium Changer protocol,
so all modern tape libraries should work.
.. Note:: We use a custom user space tape driver written in Rust_. This
driver directly communicates with the tape drive using the SCSI
generic interface. This may have negative side effects when used with the old
Linux kernel tape driver, so you should not use that driver with
Proxmox tape backup.
Drive Performance
@ -84,7 +87,7 @@ Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
that it still takes a minimum of 9 hours to completely write or
read a single tape (even at maximum speed).
The only way to speed up that data rate is to use more than one
The only way to speed that data rate up is to use more than one
drive. That way, you can run several backup jobs in parallel, or run
restore jobs while the other dives are used for backups.
@ -93,15 +96,16 @@ Also consider that you first need to read data from your datastore
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
to write to your tape at full speed, please make sure that the source
datastore is able to deliver that performance (e.g, by using SSDs).
datastore is able to deliver that performance (for example, by using SSDs).
Terminology
-----------
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
sticky paper label to the front of the cartridge. We additionally store the
label text magnetically on the tape (first file on tape).
**Tape Labels:**
are used to uniquely identify a tape. You would normally apply a
sticky paper label to the front of the cartridge. We additionally
store the label text magnetically on the tape (first file on tape).
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
@ -109,7 +113,8 @@ Terminology
.. _LTO Barcode Generator: lto-barcode/index.html
:Barcodes: are a special form of tape labels, which are electronically
**Barcodes:**
are a special form of tape labels, which are electronically
readable. Most LTO tape robots use an 8 character string encoded as
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
Specification`_.
@ -118,42 +123,49 @@ Terminology
or print them yourself. You can use our `LTO Barcode Generator`_
app, if you would like to print them yourself.
.. Note:: Physical labels and the associated adhesive should have an
.. Note:: Physical labels and the associated adhesive should have an
environmental performance to match or exceed the environmental
specifications of the cartridge to which it is applied.
:Media Pools: A media pool is a logical container for tapes. A backup
job targets one media pool, so a job only uses tapes from that
pool. The pool additionally defines how long a backup job can
append data to tapes (allocation policy) and how long you want to
keep the data (retention policy).
**Media Pools:**
A media pool is a logical container for tapes. A backup job targets
one media pool, so a job only uses tapes from that pool. The pool
additionally defines how long a backup job can append data to tapes
(allocation policy) and how long you want to keep the data
(retention policy).
:Media Set: A group of continuously written tapes (all from the same
media pool).
**Media Set:**
A group of continuously written tapes (all from the same media pool).
:Tape drive: The device used to read and write data to the tape. There
are standalone drives, but drives are usually shipped within tape libraries.
**Tape drive:**
The device used to read and write data to the tape. There are
standalone drives, but drives are usually shipped within tape
libraries.
:Tape changer: A device which can change the tapes inside a tape drive
(tape robot). They are usually part of a tape library.
**Tape changer:**
A device which can change the tapes inside a tape drive (tape
robot). They are usually part of a tape library.
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
:`Tape library`_: A storage device that contains one or more tape drives,
a number of slots to hold tape cartridges, a barcode reader to
identify tape cartridges, and an automated method for loading tapes
(a robot).
`Tape library`_:
A storage device that contains one or more tape drives, a number of
slots to hold tape cartridges, a barcode reader to identify tape
cartridges, and an automated method for loading tapes (a robot).
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
This is also commonly known as an 'autoloader', 'tape robot' or
'tape jukebox'.
:Inventory: The inventory stores the list of known tapes (with
additional status information).
**Inventory:**
The inventory stores the list of known tapes (with additional status
information).
:Catalog: A media catalog stores information about the media content.
**Catalog:**
A media catalog stores information about the media content.
Tape Quick Start
---------------
----------------
1. Configure your tape hardware (drives and changers)
@ -176,8 +188,15 @@ same configuration.
Tape changers
~~~~~~~~~~~~~
Tape changers (robots) are part of a `Tape Library`_. You can skip
this step if you are using a standalone drive.
.. image:: images/screenshots/pbs-gui-tape-changers.png
:align: right
:alt: Tape Backup: Tape Changers
Tape changers (robots) are part of a `Tape Library`_. They contain a number of
slots to hold tape cartridges, a barcode reader to identify tape cartridges and
an automated method for loading tapes.
You can skip this step if you are using a standalone drive.
Linux is able to auto detect these devices, and you can get a list
of available devices using:
@ -204,6 +223,13 @@ Where ``sl3`` is an arbitrary name you can choose.
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
different device after reboot, and that is not what you want.
.. image:: images/screenshots/pbs-gui-tape-changers-add.png
:align: right
:alt: Tape Backup: Add a new tape changer
This operation can also be carried out from the GUI, by navigating to the
**Changers** tab of **Tape Backup** and clicking **Add**.
You can display the final configuration with:
.. code-block:: console
@ -217,7 +243,8 @@ You can display the final configuration with:
│ path │ /dev/tape/by-id/scsi-CC2C52 │
└──────┴─────────────────────────────┘
Or simply list all configured changer devices:
Or simply list all configured changer devices (as seen in the **Changers** tab
of the GUI):
.. code-block:: console
@ -228,7 +255,7 @@ Or simply list all configured changer devices:
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
The Vendor, Model and Serial number are auto detected, but only shown
The Vendor, Model and Serial number are auto-detected, but only shown
if the device is online.
To test your setup, please query the status of the changer device with:
@ -261,12 +288,12 @@ It's worth noting that some of the smaller tape libraries don't have
such slots. While they have something called a "Mail Slot", that slot
is just a way to grab the tape from the gripper. They are unable
to hold media while the robot does other things. They also do not
expose that "Mail Slot" over the SCSI interface, so you wont see them in
expose that "Mail Slot" over the SCSI interface, so you won't see them in
the status output.
As a workaround, you can mark some of the normal slots as export
slot. The software treats those slots like real ``import-export``
slots, and the media inside those slots is considered to be 'offline'
slots, and the media inside those slots are considered to be 'offline'
(not available for backup):
.. code-block:: console
@ -302,6 +329,10 @@ the status output:
Tape drives
~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-tape-drives.png
:align: right
:alt: Tape Backup: Drive list
Linux is able to auto detect tape drives, and you can get a list
of available tape drives using:
@ -311,18 +342,23 @@ of available tape drives using:
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
.. image:: images/screenshots/pbs-gui-tape-drives-add.png
:align: right
:alt: Tape Backup: Add a tape drive
In order to use that drive with Proxmox, you need to create a
configuration entry:
configuration entry. This can be done through **Tape Backup -> Drives** in the
GUI or by using the command below:
.. code-block:: console
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-sg
.. Note:: Please use the persistent device path names from inside
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
different device after reboot, and that is not what you want.
If you have a tape library, you also need to set the associated
@ -346,7 +382,7 @@ You can display the final configuration with:
╞═════════╪════════════════════════════════╡
│ name │ mydrive │
├─────────┼────────────────────────────────┤
│ path │ /dev/tape/by-id/scsi-12345-nst
│ path │ /dev/tape/by-id/scsi-12345-sg
├─────────┼────────────────────────────────┤
│ changer │ sl3 │
└─────────┴────────────────────────────────┘
@ -362,10 +398,10 @@ To list all configured drives use:
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
│ name │ path │ changer │ vendor │ model │ serial │
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
│ mydrive │ /dev/tape/by-id/scsi-12345-sg │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
The Vendor, Model and Serial number are auto detected, but only shown
The Vendor, Model and Serial number are auto detected and only shown
if the device is online.
For testing, you can simply query the drive status with:
@ -373,13 +409,35 @@ For testing, you can simply query the drive status with:
.. code-block:: console
# proxmox-tape status --drive mydrive
┌───────────────────────────────────┐
┌────────────────┬──────────────────────────┐
│ Name │ Value │
╞═══════════════════════════════════╡
╞════════════════╪══════════════════════════╡
│ blocksize │ 0 │
├───────────────────────────────────┤
status │ DRIVE_OPEN | IM_REP_EN
───────────────────────────────────
├────────────────┼──────────────────────────┤
density │ LTO4
├────────────────┼──────────────────────────
│ compression │ 1 │
├────────────────┼──────────────────────────┤
│ buffer-mode │ 1 │
├────────────────┼──────────────────────────┤
│ alert-flags │ (empty) │
├────────────────┼──────────────────────────┤
│ file-number │ 0 │
├────────────────┼──────────────────────────┤
│ block-number │ 0 │
├────────────────┼──────────────────────────┤
│ manufactured │ Fri Dec 13 01:00:00 2019 │
├────────────────┼──────────────────────────┤
│ bytes-written │ 501.80 GiB │
├────────────────┼──────────────────────────┤
│ bytes-read │ 4.00 MiB │
├────────────────┼──────────────────────────┤
│ medium-passes │ 20 │
├────────────────┼──────────────────────────┤
│ medium-wearout │ 0.12% │
├────────────────┼──────────────────────────┤
│ volume-mounts │ 2 │
└────────────────┴──────────────────────────┘
.. NOTE:: Blocksize should always be 0 (variable block size
mode). This is the default anyway.
@ -390,8 +448,12 @@ For testing, you can simply query the drive status with:
Media Pools
~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-tape-pools.png
:align: right
:alt: Tape Backup: Media Pools
A media pool is a logical container for tapes. A backup job targets
one media pool, so a job only uses tapes from that pool.
a single media pool, so a job only uses tapes from that pool.
.. topic:: Media Set
@ -411,7 +473,7 @@ one media pool, so a job only uses tapes from that pool.
The pool additionally defines how long backup jobs can append data
to a media set. The following settings are possible:
- Try to use the current media set.
- Try to use the current media set (``continue``).
This setting produces one large media set. While this is very
space efficient (deduplication, no unused space), it can lead to
@ -433,7 +495,7 @@ one media pool, so a job only uses tapes from that pool.
.. NOTE:: Retention period starts with the existence of a newer
media set.
- Always create a new media set.
- Always create a new media set (``always``).
With this setting, each backup job creates a new media set. This
is less space efficient, because the media from the last set
@ -510,8 +572,12 @@ one media pool, so a job only uses tapes from that pool.
if the sources are from different namespaces with conflicting names
(for example, if the sources are from different Proxmox VE clusters).
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
:align: right
:alt: Tape Backup: Add a media pool
The following command creates a new media pool:
To create a new media pool, add one from **Tape Backup -> Media Pools** in the
GUI, or enter the following command:
.. code-block:: console
@ -520,7 +586,7 @@ The following command creates a new media pool:
# proxmox-tape pool create daily --drive mydrive
Additional option can be set later, using the update command:
Additional options can be set later, using the update command:
.. code-block:: console
@ -543,6 +609,10 @@ To list all configured pools use:
Tape Backup Jobs
~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-tape-backup-jobs.png
:align: right
:alt: Tape Backup: Tape Backup Jobs
To automate tape backup, you can configure tape backup jobs which
write datastore content to a media pool, based on a specific time schedule.
The required settings are:
@ -618,6 +688,14 @@ To remove a job, please use:
# proxmox-tape backup-job remove job2
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
:align: right
:alt: Tape Backup: Add a backup job
This same functionality also exists in the GUI, under the **Backup Jobs** tab of
**Tape Backup**, where *Local Datastore* relates to the datastore you want to
backup and *Media Pool* is the pool to back up to.
Administration
--------------
@ -633,7 +711,7 @@ variable:
You can then omit the ``--drive`` parameter from the command. If the
drive has an associated changer device, you may also omit the changer
parameter from commands that needs a changer device, for example:
parameter from commands that need a changer device, for example:
.. code-block:: console
@ -707,7 +785,7 @@ can then label all unlabeled tapes with a single command:
Run Tape Backups
~~~~~~~~~~~~~~~~
To manually run a backup job use:
To manually run a backup job click *Run Now* in the GUI or use the command:
.. code-block:: console
@ -772,7 +850,14 @@ Restore Catalog
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
Creating a new encryption key:
.. image:: images/screenshots/pbs-gui-tape-crypt-keys.png
:align: right
:alt: Tape Backup: Encryption Keys
Proxmox Backup Server also provides an interface for handling encryption keys on
the backup server. Encryption keys can be managed from the **Tape Backup ->
Encryption Keys** section of the GUI or through the ``proxmox-tape key`` command
line tool. To create a new encryption key from the command line:
.. code-block:: console
@ -883,78 +968,3 @@ This command does the following:
- run drive cleaning operation
- unload the cleaning tape (to slot 3)
Configuration Files
-------------------
``media-pool.cfg``
~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/media-pool/format.rst
Options
^^^^^^^
.. include:: config/media-pool/config.rst
``tape.cfg``
~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/tape/format.rst
Options
^^^^^^^
.. include:: config/tape/config.rst
``tape-job.cfg``
~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/tape-job/format.rst
Options
^^^^^^^
.. include:: config/tape-job/config.rst
Command Syntax
--------------
``proxmox-tape``
----------------
.. include:: proxmox-tape/synopsis.rst
``pmt``
-------
.. include:: pmt/options.rst
....
.. include:: pmt/synopsis.rst
``pmtx``
--------
.. include:: pmtx/synopsis.rst

View File

@ -100,7 +100,7 @@ can be encrypted, and they are handled in a slightly different manner than
normal chunks.
The hashes of encrypted chunks are calculated not with the actual (encrypted)
chunk content, but with the plaintext content concatenated with the encryption
chunk content, but with the plain-text content concatenated with the encryption
key. This way, two chunks of the same data encrypted with different keys
generate two different checksums and no collisions occur for multiple
encryption keys.
@ -138,7 +138,7 @@ will see that the probability of a collision in that scenario is:
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
collission is about the same as winning 13 such lotto games *in a row*.
a collision is about the same as winning 13 such lotto games *in a row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.

View File

@ -12,7 +12,7 @@ pub mod version;
pub mod ping;
pub mod pull;
pub mod tape;
mod helpers;
pub mod helpers;
use proxmox::api::router::SubdirMap;
use proxmox::api::Router;

View File

@ -477,6 +477,17 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
user::save_config(&config)?;
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
match authenticator.remove_password(userid.name()) {
Ok(()) => {},
Err(err) => {
eprintln!(
"error removing password after deleting user {:?}: {}",
userid, err
);
}
}
match crate::config::tfa::read().and_then(|mut cfg| {
let _: bool = cfg.remove_user(&userid);
crate::config::tfa::write(&cfg)

View File

@ -1385,7 +1385,7 @@ pub fn pxar_file_download(
let mut split = components.splitn(2, |c| *c == b'/');
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
let file_path = split.next().unwrap_or(b"/");
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {

View File

@ -27,7 +27,7 @@ use crate::{
SLOT_ARRAY_SCHEMA,
EXPORT_SLOT_LIST_SCHEMA,
ScsiTapeChanger,
LinuxTapeDrive,
LtoTapeDrive,
},
tape::{
linux_tape_changer_list,
@ -303,7 +303,7 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
None => bail!("Delete changer '{}' failed - no such entry", name),
}
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
for drive in drive_list {
if let Some(changer) = drive.changer {
if changer == name {

View File

@ -19,12 +19,12 @@ use crate::{
DRIVE_NAME_SCHEMA,
CHANGER_NAME_SCHEMA,
CHANGER_DRIVENUM_SCHEMA,
LINUX_DRIVE_PATH_SCHEMA,
LinuxTapeDrive,
LTO_DRIVE_PATH_SCHEMA,
LtoTapeDrive,
ScsiTapeChanger,
},
tape::{
linux_tape_device_list,
lto_tape_device_list,
check_drive_path,
},
};
@ -37,7 +37,7 @@ use crate::{
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
@ -60,13 +60,13 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
let (mut config, _digest) = config::drive::config()?;
let item: LinuxTapeDrive = serde_json::from_value(param)?;
let item: LtoTapeDrive = serde_json::from_value(param)?;
let linux_drives = linux_tape_device_list();
let lto_drives = lto_tape_device_list();
check_drive_path(&linux_drives, &item.path)?;
check_drive_path(&lto_drives, &item.path)?;
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
let existing: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
for drive in existing {
if drive.name == item.name {
@ -77,7 +77,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
}
}
config.set_data(&item.name, "linux", &item)?;
config.set_data(&item.name, "lto", &item)?;
config::drive::save_config(&config)?;
@ -93,7 +93,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
},
},
returns: {
type: LinuxTapeDrive,
type: LtoTapeDrive,
},
access: {
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
@ -104,11 +104,11 @@ pub fn get_config(
name: String,
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<LinuxTapeDrive, Error> {
) -> Result<LtoTapeDrive, Error> {
let (config, digest) = config::drive::config()?;
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
let data: LtoTapeDrive = config.lookup("lto", &name)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
@ -123,7 +123,7 @@ pub fn get_config(
description: "The list of configured drives (with config digest).",
type: Array,
items: {
type: LinuxTapeDrive,
type: LtoTapeDrive,
},
},
access: {
@ -135,13 +135,13 @@ pub fn get_config(
pub fn list_drives(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<LinuxTapeDrive>, Error> {
) -> Result<Vec<LtoTapeDrive>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = config::drive::config()?;
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
let drive_list = drive_list
.into_iter()
@ -176,7 +176,7 @@ pub enum DeletableProperty {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
changer: {
@ -225,7 +225,7 @@ pub fn update_drive(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
let mut data: LtoTapeDrive = config.lookup("lto", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
@ -240,8 +240,8 @@ pub fn update_drive(
}
if let Some(path) = path {
let linux_drives = linux_tape_device_list();
check_drive_path(&linux_drives, &path)?;
let lto_drives = lto_tape_device_list();
check_drive_path(&lto_drives, &path)?;
data.path = path;
}
@ -261,7 +261,7 @@ pub fn update_drive(
}
}
config.set_data(&name, "linux", &data)?;
config.set_data(&name, "lto", &data)?;
config::drive::save_config(&config)?;
@ -290,8 +290,8 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "linux" {
bail!("Entry '{}' exists, but is not a linux tape drive", name);
if section_type != "lto" {
bail!("Entry '{}' exists, but is not a lto tape drive", name);
}
config.sections.remove(&name);
},

View File

@ -48,7 +48,7 @@ pub fn list_dir_content<R: Read + Seek>(
let mut components = path.clone();
components.push(b'/');
components.extend(&direntry.name);
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
let mut entry = ArchiveEntry::new(&components, Some(&direntry.attr));
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
entry.size = size.into();
entry.mtime = mtime.into();

View File

@ -7,7 +7,7 @@ use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
use proxmox::api::router::{Router, SubdirMap};
use crate::server::WorkerTask;
use crate::tools::{apt, http, subscription};
use crate::tools::{apt, http::SimpleHttp, subscription};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
@ -194,10 +194,12 @@ fn apt_get_changelog(
bail!("Package '{}' not found", name);
}
let mut client = SimpleHttp::new(None); // TODO: pass proxy_config
let changelog_url = &pkg_info[0].change_log_url;
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, None))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
Ok(json!(changelog))
@ -221,7 +223,7 @@ fn apt_get_changelog(
auth_header.insert("Authorization".to_owned(),
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
Ok(json!(changelog))

View File

@ -32,9 +32,6 @@ use crate::api2::types::{NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid};
pub fn check_subscription(
force: bool,
) -> Result<(), Error> {
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
let info = match subscription::read_subscription() {
Err(err) => bail!("could not read subscription status: {}", err),
Ok(Some(info)) => info,

View File

@ -1,10 +1,11 @@
use std::path::Path;
use std::sync::Arc;
use std::sync::{Mutex, Arc};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::{
try_block,
api::{
api,
RpcEnvironment,
@ -33,6 +34,7 @@ use crate::{
},
server::{
lookup_user_email,
TapeBackupJobSummary,
jobstate::{
Job,
JobState,
@ -176,8 +178,15 @@ pub fn do_tape_backup_job(
let (drive_config, _digest) = config::drive::config()?;
// early check/lock before starting worker
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
// for scheduled jobs we acquire the lock later in the worker
let drive_lock = if schedule.is_some() {
None
} else {
Some(lock_tape_device(&drive_config, &setup.drive)?)
};
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
let email = lookup_user_email(notify_user);
let upid_str = WorkerTask::new_thread(
&worker_type,
@ -185,26 +194,40 @@ pub fn do_tape_backup_job(
auth_id.clone(),
false,
move |worker| {
let _drive_lock = drive_lock; // keep lock guard
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
job.start(&worker.upid().to_string())?;
let mut drive_lock = drive_lock;
let (job_result, summary) = match try_block!({
if schedule.is_some() {
// for scheduled tape backup jobs, we wait indefinitely for the lock
task_log!(worker, "waiting for drive lock...");
loop {
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
drive_lock = Some(lock);
break;
} // ignore errors
worker.check_abort()?;
}
}
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
task_log!(worker,"Starting tape backup job '{}'", job_id);
if let Some(event_str) = schedule {
task_log!(worker,"task triggered by schedule '{}'", event_str);
}
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
let email = lookup_user_email(notify_user);
let job_result = backup_worker(
backup_worker(
&worker,
datastore,
&pool_config,
&setup,
email.clone(),
);
)
}) {
Ok(summary) => (Ok(()), summary),
Err(err) => (Err(err), Default::default()),
};
let status = worker.create_state(&job_result);
@ -214,6 +237,7 @@ pub fn do_tape_backup_job(
Some(job.jobname()),
&setup,
&job_result,
summary,
) {
eprintln!("send tape backup notification failed: {}", err);
}
@ -340,13 +364,17 @@ pub fn backup(
move |worker| {
let _drive_lock = drive_lock; // keep lock guard
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
let job_result = backup_worker(
let (job_result, summary) = match backup_worker(
&worker,
datastore,
&pool_config,
&setup,
email.clone(),
);
) {
Ok(summary) => (Ok(()), summary),
Err(err) => (Err(err), Default::default()),
};
if let Some(email) = email {
if let Err(err) = crate::server::send_tape_backup_status(
@ -354,6 +382,7 @@ pub fn backup(
None,
&setup,
&job_result,
summary,
) {
eprintln!("send tape backup notification failed: {}", err);
}
@ -374,16 +403,16 @@ fn backup_worker(
pool_config: &MediaPoolConfig,
setup: &TapeBackupJobSetup,
email: Option<String>,
) -> Result<(), Error> {
) -> Result<TapeBackupJobSummary, Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
let start = std::time::Instant::now();
let mut summary: TapeBackupJobSummary = Default::default();
task_log!(worker, "update media online status");
let changer_name = update_media_online_status(&setup.drive)?;
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
@ -402,26 +431,42 @@ fn backup_worker(
task_log!(worker, "latest-only: true (only considering latest snapshots)");
}
let datastore_name = datastore.name();
let mut errors = false;
let mut need_catalog = false; // avoid writing catalog for empty jobs
for (group_number, group) in group_list.into_iter().enumerate() {
progress.done_groups = group_number as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
let snapshot_list = group.list_backups(&datastore.base_path())?;
// filter out unfinished backups
let mut snapshot_list = snapshot_list
.into_iter()
.filter(|item| item.is_finished())
.collect();
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
if latest_only {
progress.group_snapshots = 1;
if let Some(info) = snapshot_list.pop() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
need_catalog = true;
let snapshot_name = info.backup_dir.to_string();
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
} else {
summary.snapshot_list.push(snapshot_name);
}
progress.done_snapshots = 1;
task_log!(
@ -433,12 +478,18 @@ fn backup_worker(
} else {
progress.group_snapshots = snapshot_list.len() as u64;
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
need_catalog = true;
let snapshot_name = info.backup_dir.to_string();
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
} else {
summary.snapshot_list.push(snapshot_name);
}
progress.done_snapshots = snapshot_number as u64 + 1;
task_log!(
@ -452,6 +503,22 @@ fn backup_worker(
pool_writer.commit()?;
if need_catalog {
task_log!(worker, "append media catalog");
let uuid = pool_writer.load_writable_media(worker)?;
let done = pool_writer.append_catalog_archive(worker)?;
if !done {
task_log!(worker, "catalog does not fit on tape, writing to next volume");
pool_writer.set_media_status_full(&uuid)?;
pool_writer.load_writable_media(worker)?;
let done = pool_writer.append_catalog_archive(worker)?;
if !done {
bail!("write_catalog_archive failed on second media");
}
}
}
if setup.export_media_set.unwrap_or(false) {
pool_writer.export_media_set(worker)?;
} else if setup.eject_media.unwrap_or(false) {
@ -462,7 +529,9 @@ fn backup_worker(
bail!("Tape backup finished with some errors. Please check the task log.");
}
Ok(())
summary.duration = start.elapsed();
Ok(summary)
}
// Try to update the the media online status
@ -508,33 +577,48 @@ pub fn backup_snapshot(
}
};
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
datastore.clone(),
snapshot_reader.clone(),
)?;
let mut chunk_iter = chunk_iter.peekable();
loop {
worker.check_abort()?;
// test is we have remaining chunks
if chunk_iter.peek().is_none() {
break;
match chunk_iter.peek() {
None => break,
Some(Ok(_)) => { /* Ok */ },
Some(Err(err)) => bail!("{}", err),
}
let uuid = pool_writer.load_writable_media(worker)?;
worker.check_abort()?;
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
if leom {
pool_writer.set_media_status_full(&uuid)?;
}
}
if let Err(_) = reader_thread.join() {
bail!("chunk reader thread failed");
}
worker.check_abort()?;
let uuid = pool_writer.load_writable_media(worker)?;
worker.check_abort()?;
let snapshot_reader = snapshot_reader.lock().unwrap();
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
if !done {

View File

@ -20,7 +20,7 @@ use crate::{
Authid,
CHANGER_NAME_SCHEMA,
ChangerListEntry,
LinuxTapeDrive,
LtoTapeDrive,
MtxEntryKind,
MtxStatusEntry,
ScsiTapeChanger,
@ -88,7 +88,7 @@ pub async fn get_status(
inventory.update_online_status(&map)?;
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
let mut drive_map: HashMap<u64, String> = HashMap::new();
for drive in drive_list {

View File

@ -1,6 +1,7 @@
use std::panic::UnwindSafe;
use std::path::Path;
use std::sync::Arc;
use std::collections::HashMap;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
@ -10,7 +11,6 @@ use proxmox::{
identity,
list_subdirs_api_method,
tools::Uuid,
sys::error::SysError,
api::{
api,
section_config::SectionConfigData,
@ -42,22 +42,29 @@ use crate::{
MEDIA_POOL_NAME_SCHEMA,
Authid,
DriveListEntry,
LinuxTapeDrive,
LtoTapeDrive,
MediaIdFlat,
LabelUuidMap,
MamAttribute,
LinuxDriveAndMediaStatus,
LtoDriveAndMediaStatus,
Lp17VolumeStatistics,
},
tape::restore::{
fast_catalog_restore,
restore_media,
},
tape::restore::restore_media,
},
server::WorkerTask,
tape::{
TAPE_STATUS_DIR,
MediaPool,
Inventory,
MediaCatalog,
MediaId,
linux_tape_device_list,
BlockReadError,
lock_media_set,
lock_media_pool,
lock_unassigned_media_pool,
lto_tape_device_list,
lookup_device_identification,
file_formats::{
MediaLabel,
@ -65,9 +72,8 @@ use crate::{
},
drive::{
TapeDriver,
LinuxTapeHandle,
Lp17VolumeStatistics,
open_linux_tape_device,
LtoTapeHandle,
open_lto_tape_device,
media_changer,
required_media_changer,
open_drive,
@ -316,8 +322,8 @@ pub fn unload(
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
},
)]
/// Erase media. Check for label-text if given (cancels if wrong media).
pub fn erase_media(
/// Format media. Check for label-text if given (cancels if wrong media).
pub fn format_media(
drive: String,
fast: Option<bool>,
label_text: Option<String>,
@ -326,7 +332,7 @@ pub fn erase_media(
let upid_str = run_drive_worker(
rpcenv,
drive.clone(),
"erase-media",
"format-media",
Some(drive.clone()),
move |worker, config| {
if let Some(ref label) = label_text {
@ -345,15 +351,15 @@ pub fn erase_media(
}
/* assume drive contains no or unrelated data */
task_log!(worker, "unable to read media label: {}", err);
task_log!(worker, "erase anyways");
handle.erase_media(fast.unwrap_or(true))?;
task_log!(worker, "format anyways");
handle.format_media(fast.unwrap_or(true))?;
}
Ok((None, _)) => {
if let Some(label) = label_text {
bail!("expected label '{}', found empty tape", label);
}
task_log!(worker, "found empty media - erase anyways");
handle.erase_media(fast.unwrap_or(true))?;
task_log!(worker, "found empty media - format anyways");
handle.format_media(fast.unwrap_or(true))?;
}
Ok((Some(media_id), _key_config)) => {
if let Some(label_text) = label_text {
@ -373,11 +379,20 @@ pub fn erase_media(
);
let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?;
let mut inventory = Inventory::new(status_path);
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
let _pool_lock = lock_media_pool(status_path, pool)?;
let _media_set_lock = lock_media_set(status_path, uuid, None)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.remove_media(&media_id.label.uuid)?;
handle.erase_media(fast.unwrap_or(true))?;
} else {
let _lock = lock_unassigned_media_pool(status_path)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.remove_media(&media_id.label.uuid)?;
};
handle.format_media(fast.unwrap_or(true))?;
}
}
@ -489,7 +504,7 @@ pub fn eject_media(
/// Write a new media label to the media in 'drive'. The media is
/// assigned to the specified 'pool', or else to the free media pool.
///
/// Note: The media need to be empty (you may want to erase it first).
/// Note: The media need to be empty (you may want to format it first).
pub fn label_media(
drive: String,
pool: Option<String>,
@ -514,16 +529,13 @@ pub fn label_media(
drive.rewind()?;
match drive.read_next_file() {
Ok(Some(_file)) => bail!("media is not empty (erase first)"),
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
Ok(_reader) => bail!("media is not empty (format it first)"),
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
Err(err) => {
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
/* assume tape is empty */
} else {
bail!("media read error - {}", err);
}
}
}
let ctime = proxmox::tools::time::epoch_i64();
let label = MediaLabel {
@ -548,29 +560,38 @@ fn write_media_label(
drive.label_tape(&label)?;
let mut media_set_label = None;
let status_path = Path::new(TAPE_STATUS_DIR);
if let Some(ref pool) = pool {
let media_id = if let Some(ref pool) = pool {
// assign media to pool by writing special media set label
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
drive.write_media_set_label(&set, None)?;
media_set_label = Some(set);
} else {
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
}
let media_id = MediaId { label, media_set_label };
let status_path = Path::new(TAPE_STATUS_DIR);
let media_id = MediaId { label, media_set_label: Some(set) };
// Create the media catalog
MediaCatalog::overwrite(status_path, &media_id, false)?;
let mut inventory = Inventory::load(status_path)?;
let mut inventory = Inventory::new(status_path);
inventory.store(media_id.clone(), false)?;
media_id
} else {
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
let media_id = MediaId { label, media_set_label: None };
// Create the media catalog
MediaCatalog::overwrite(status_path, &media_id, false)?;
let mut inventory = Inventory::new(status_path);
inventory.store(media_id.clone(), false)?;
media_id
};
drive.rewind()?;
match drive.read_label() {
@ -705,14 +726,24 @@ pub async fn read_label(
if let Err(err) = drive.set_encryption(encrypt_fingerprint) {
// try, but ignore errors. just log to stderr
eprintln!("uable to load encryption key: {}", err);
eprintln!("unable to load encryption key: {}", err);
}
}
if let Some(true) = inventorize {
let state_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(state_path)?;
let mut inventory = Inventory::new(state_path);
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
let _pool_lock = lock_media_pool(state_path, pool)?;
let _lock = lock_media_set(state_path, uuid, None)?;
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
inventory.store(media_id, false)?;
} else {
let _lock = lock_unassigned_media_pool(state_path)?;
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
inventory.store(media_id, false)?;
};
}
flat
@ -760,9 +791,9 @@ pub fn clean_drive(
changer.clean_drive()?;
if let Ok(drive_config) = config.lookup::<LinuxTapeDrive>("linux", &drive) {
if let Ok(drive_config) = config.lookup::<LtoTapeDrive>("lto", &drive) {
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
let mut handle = LinuxTapeHandle::new(open_linux_tape_device(&drive_config.path)?);
let mut handle = LtoTapeHandle::new(open_lto_tape_device(&drive_config.path)?)?;
// test for critical tape alert flags
if let Ok(alert_flags) = handle.tape_alert_flags() {
@ -947,7 +978,17 @@ pub fn update_inventory(
continue;
}
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
let _pool_lock = lock_media_pool(state_path, pool)?;
let _lock = lock_media_set(state_path, uuid, None)?;
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
inventory.store(media_id, false)?;
} else {
let _lock = lock_unassigned_media_pool(state_path)?;
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
inventory.store(media_id, false)?;
};
}
}
changer.unload_media(None)?;
@ -1047,20 +1088,17 @@ fn barcode_label_media_worker(
drive.rewind()?;
match drive.read_next_file() {
Ok(Some(_file)) => {
worker.log(format!("media '{}' is not empty (erase first)", label_text));
Ok(_reader) => {
worker.log(format!("media '{}' is not empty (format it first)", label_text));
continue;
}
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
Err(err) => {
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
/* assume tape is empty */
} else {
worker.warn(format!("media '{}' read error (maybe not empty - erase first)", label_text));
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
Err(_err) => {
worker.warn(format!("media '{}' read error (maybe not empty - format it first)", label_text));
continue;
}
}
}
let ctime = proxmox::tools::time::epoch_i64();
let label = MediaLabel {
@ -1100,7 +1138,7 @@ pub async fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error>
drive.clone(),
"reading cartridge memory".to_string(),
move |config| {
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
let mut handle = drive_config.open()?;
handle.cartridge_memory()
@ -1130,7 +1168,7 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
drive.clone(),
"reading volume statistics".to_string(),
move |config| {
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
let mut handle = drive_config.open()?;
handle.volume_statistics()
@ -1148,24 +1186,24 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
},
},
returns: {
type: LinuxDriveAndMediaStatus,
type: LtoDriveAndMediaStatus,
},
access: {
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
},
)]
/// Get drive/media status
pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
pub async fn status(drive: String) -> Result<LtoDriveAndMediaStatus, Error> {
run_drive_blocking_task(
drive.clone(),
"reading drive status".to_string(),
move |config| {
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
// Note: use open_linux_tape_device, because this also works if no medium loaded
let file = open_linux_tape_device(&drive_config.path)?;
// Note: use open_lto_tape_device, because this also works if no medium loaded
let file = open_lto_tape_device(&drive_config.path)?;
let mut handle = LinuxTapeHandle::new(file);
let mut handle = LtoTapeHandle::new(file)?;
handle.get_drive_and_media_status()
}
@ -1184,6 +1222,11 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
type: bool,
optional: true,
},
scan: {
description: "Re-read the whole tape to reconstruct the catalog instead of restoring saved versions.",
type: bool,
optional: true,
},
verbose: {
description: "Verbose mode - log all found chunks.",
type: bool,
@ -1202,11 +1245,13 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
pub fn catalog_media(
drive: String,
force: Option<bool>,
scan: Option<bool>,
verbose: Option<bool>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let verbose = verbose.unwrap_or(false);
let force = force.unwrap_or(false);
let scan = scan.unwrap_or(false);
let upid_str = run_drive_worker(
rpcenv,
@ -1237,19 +1282,22 @@ pub fn catalog_media(
let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?;
inventory.store(media_id.clone(), false)?;
let mut inventory = Inventory::new(status_path);
let pool = match media_id.media_set_label {
let (_media_set_lock, media_set_uuid) = match media_id.media_set_label {
None => {
worker.log("media is empty");
let _lock = lock_unassigned_media_pool(status_path)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.store(media_id.clone(), false)?;
return Ok(());
}
Some(ref set) => {
if set.uuid.as_ref() == [0u8;16] { // media is empty
worker.log("media is empty");
let _lock = lock_unassigned_media_pool(status_path)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.store(media_id.clone(), false)?;
return Ok(());
}
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
@ -1257,17 +1305,38 @@ pub fn catalog_media(
drive.set_encryption(encrypt_fingerprint)?;
set.pool.clone()
let _pool_lock = lock_media_pool(status_path, &set.pool)?;
let media_set_lock = lock_media_set(status_path, &set.uuid, None)?;
MediaCatalog::destroy_unrelated_catalog(status_path, &media_id)?;
inventory.store(media_id.clone(), false)?;
(media_set_lock, &set.uuid)
}
};
let _lock = MediaPool::lock(status_path, &pool)?;
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
bail!("media catalog exists (please use --force to overwrite)");
}
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
if !scan {
let media_set = inventory.compute_media_set_members(media_set_uuid)?;
if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? {
return Ok(())
}
task_log!(worker, "no catalog found");
}
task_log!(worker, "scanning entire media to reconstruct catalog");
drive.rewind()?;
drive.read_label()?; // skip over labels - we already read them above
let mut checked_chunks = HashMap::new();
restore_media(&worker, &mut drive, &media_id, None, &mut checked_chunks, verbose)?;
Ok(())
},
@ -1308,9 +1377,9 @@ pub fn list_drives(
let (config, _) = config::drive::config()?;
let linux_drives = linux_tape_device_list();
let lto_drives = lto_tape_device_list();
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
let mut list = Vec::new();
@ -1324,7 +1393,7 @@ pub fn list_drives(
continue;
}
let info = lookup_device_identification(&linux_drives, &drive.path);
let info = lookup_device_identification(&lto_drives, &drive.path);
let state = get_tape_device_state(&config, &drive.name)?;
let entry = DriveListEntry { config: drive, info, state };
list.push(entry);
@ -1356,9 +1425,9 @@ pub const SUBDIRS: SubdirMap = &sorted!([
.post(&API_METHOD_EJECT_MEDIA)
),
(
"erase-media",
"format-media",
&Router::new()
.post(&API_METHOD_ERASE_MEDIA)
.post(&API_METHOD_FORMAT_MEDIA)
),
(
"export-media",

View File

@ -122,7 +122,7 @@ pub async fn list_media(
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
let changer_name = None; // assume standalone drive
let mut pool = MediaPool::with_config(status_path, &config, changer_name)?;
let mut pool = MediaPool::with_config(status_path, &config, changer_name, true)?;
let current_time = proxmox::tools::time::epoch_i64();
@ -432,9 +432,10 @@ pub fn list_content(
.generate_media_set_name(&set.uuid, template)
.unwrap_or_else(|_| set.uuid.to_string());
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
let catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
for snapshot in catalog.snapshot_index().keys() {
for (store, content) in catalog.content() {
for snapshot in content.snapshot_index.keys() {
let backup_dir: BackupDir = snapshot.parse()?;
if let Some(ref backup_type) = filter.backup_type {
@ -453,10 +454,12 @@ pub fn list_content(
media_set_ctime: set.ctime,
seq_nr: set.seq_nr,
snapshot: snapshot.to_owned(),
store: store.to_owned(),
backup_time: backup_dir.backup_time(),
});
}
}
}
Ok(list)
}

View File

@ -15,7 +15,7 @@ use proxmox::{
use crate::{
api2::types::TapeDeviceInfo,
tape::{
linux_tape_device_list,
lto_tape_device_list,
linux_tape_changer_list,
},
};
@ -41,7 +41,7 @@ pub mod restore;
/// Scan tape drives
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
let list = linux_tape_device_list();
let list = lto_tape_device_list();
Ok(list)
}

View File

@ -1,6 +1,9 @@
use std::path::Path;
use std::ffi::OsStr;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::io::{Seek, SeekFrom};
use std::sync::Arc;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
@ -12,6 +15,7 @@ use proxmox::{
RpcEnvironmentType,
Router,
Permission,
schema::parse_property_string,
section_config::SectionConfigData,
},
tools::{
@ -26,10 +30,12 @@ use proxmox::{
use crate::{
task_log,
task_warn,
task::TaskState,
tools::compute_file_csum,
api2::types::{
DATASTORE_SCHEMA,
DATASTORE_MAP_ARRAY_SCHEMA,
DATASTORE_MAP_LIST_SCHEMA,
DRIVE_NAME_SCHEMA,
UPID_SCHEMA,
Authid,
@ -40,6 +46,7 @@ use crate::{
cached_user_info::CachedUserInfo,
acl::{
PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY,
PRIV_TAPE_READ,
},
},
@ -63,18 +70,26 @@ use crate::{
tape::{
TAPE_STATUS_DIR,
TapeRead,
BlockReadError,
MediaId,
MediaSet,
MediaCatalog,
MediaPool,
Inventory,
lock_media_set,
file_formats::{
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
MediaContentHeader,
ChunkArchiveHeader,
ChunkArchiveDecoder,
SnapshotArchiveHeader,
CatalogArchiveHeader,
},
drive::{
TapeDriver,
@ -85,14 +100,75 @@ use crate::{
},
};
pub const ROUTER: Router = Router::new()
.post(&API_METHOD_RESTORE);
pub struct DataStoreMap {
map: HashMap<String, Arc<DataStore>>,
default: Option<Arc<DataStore>>,
}
impl TryFrom<String> for DataStoreMap {
type Error = Error;
fn try_from(value: String) -> Result<Self, Error> {
let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?;
let mut mapping: Vec<String> = value
.as_array()
.unwrap()
.iter()
.map(|v| v.as_str().unwrap().to_string())
.collect();
let mut map = HashMap::new();
let mut default = None;
while let Some(mut store) = mapping.pop() {
if let Some(index) = store.find('=') {
let mut target = store.split_off(index);
target.remove(0); // remove '='
let datastore = DataStore::lookup_datastore(&target)?;
map.insert(store, datastore);
} else if default.is_none() {
default = Some(DataStore::lookup_datastore(&store)?);
} else {
bail!("multiple default stores given");
}
}
Ok(Self { map, default })
}
}
impl DataStoreMap {
fn used_datastores<'a>(&self) -> HashSet<&str> {
let mut set = HashSet::new();
for store in self.map.values() {
set.insert(store.name());
}
if let Some(ref store) = self.default {
set.insert(store.name());
}
set
}
fn get_datastore(&self, source: &str) -> Option<&DataStore> {
if let Some(store) = self.map.get(source) {
return Some(&store);
}
if let Some(ref store) = self.default {
return Some(&store);
}
return None;
}
}
pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
schema: DATASTORE_MAP_LIST_SCHEMA,
},
drive: {
schema: DRIVE_NAME_SCHEMA,
@ -105,6 +181,10 @@ pub const ROUTER: Router = Router::new()
type: Userid,
optional: true,
},
owner: {
type: Authid,
optional: true,
},
},
},
returns: {
@ -123,27 +203,49 @@ pub fn restore(
drive: String,
media_set: String,
notify_user: Option<Userid>,
owner: Option<Authid>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let store_map = DataStoreMap::try_from(store)
.map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
let used_datastores = store_map.used_datastores();
if used_datastores.len() == 0 {
bail!("no datastores given");
}
for store in used_datastores.iter() {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
bail!("no permissions on /datastore/{}", store);
}
if let Some(ref owner) = owner {
let correct_owner = owner == &auth_id
|| (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
// same permission as changing ownership after syncing
if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
bail!("no permission to restore as '{}'", owner);
}
}
}
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
if (privs & PRIV_TAPE_READ) == 0 {
bail!("no permissions on /tape/drive/{}", drive);
}
let status_path = Path::new(TAPE_STATUS_DIR);
let inventory = Inventory::load(status_path)?;
let media_set_uuid = media_set.parse()?;
let status_path = Path::new(TAPE_STATUS_DIR);
let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
let inventory = Inventory::load(status_path)?;
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
@ -151,8 +253,6 @@ pub fn restore(
bail!("no permissions on /tape/pool/{}", pool);
}
let datastore = DataStore::lookup_datastore(&store)?;
let (drive_config, _digest) = config::drive::config()?;
// early check/lock before starting worker
@ -160,9 +260,14 @@ pub fn restore(
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let taskid = used_datastores
.iter()
.map(|s| s.to_string())
.collect::<Vec<String>>()
.join(", ");
let upid_str = WorkerTask::new_thread(
"tape-restore",
Some(store.clone()),
Some(taskid),
auth_id.clone(),
to_stdout,
move |worker| {
@ -170,8 +275,6 @@ pub fn restore(
set_tape_device_state(&drive, &worker.upid().to_string())?;
let _lock = MediaPool::lock(status_path, &pool)?;
let members = inventory.compute_media_set_members(&media_set_uuid)?;
let media_list = members.media_list();
@ -202,7 +305,17 @@ pub fn restore(
task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
}
task_log!(worker, "Pool: {}", pool);
task_log!(worker, "Datastore: {}", store);
task_log!(
worker,
"Datastore(s): {}",
store_map
.used_datastores()
.into_iter()
.map(String::from)
.collect::<Vec<String>>()
.join(", "),
);
task_log!(worker, "Drive: {}", drive);
task_log!(
worker,
@ -213,18 +326,33 @@ pub fn restore(
.join(";")
);
let mut datastore_locks = Vec::new();
for store_name in store_map.used_datastores() {
// explicit create shared lock to prevent GC on newly created chunks
if let Some(store) = store_map.get_datastore(store_name) {
let shared_store_lock = store.try_shared_chunk_store_lock()?;
datastore_locks.push(shared_store_lock);
}
}
let mut checked_chunks_map = HashMap::new();
for media_id in media_id_list.iter() {
request_and_restore_media(
&worker,
media_id,
&drive_config,
&drive,
&datastore,
&store_map,
&mut checked_chunks_map,
&auth_id,
&notify_user,
&owner,
)?;
}
drop(datastore_locks);
task_log!(worker, "Restore mediaset '{}' done", media_set);
if let Err(err) = set_tape_device_state(&drive, "") {
@ -249,11 +377,12 @@ pub fn request_and_restore_media(
media_id: &MediaId,
drive_config: &SectionConfigData,
drive_name: &str,
datastore: &DataStore,
store_map: &DataStoreMap,
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
authid: &Authid,
notify_user: &Option<Userid>,
owner: &Option<Authid>,
) -> Result<(), Error> {
let media_set_uuid = match media_id.media_set_label {
None => bail!("restore_media: no media set - internal error"),
Some(ref set) => &set.uuid,
@ -284,7 +413,16 @@ pub fn request_and_restore_media(
}
}
restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
let restore_owner = owner.as_ref().unwrap_or(authid);
restore_media(
worker,
&mut drive,
&info,
Some((&store_map, restore_owner)),
checked_chunks_map,
false,
)
}
/// Restore complete media content and catalog
@ -294,7 +432,8 @@ pub fn restore_media(
worker: &WorkerTask,
drive: &mut Box<dyn TapeDriver>,
media_id: &MediaId,
target: Option<(&DataStore, &Authid)>,
target: Option<(&DataStoreMap, &Authid)>,
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
verbose: bool,
) -> Result<(), Error> {
@ -303,15 +442,22 @@ pub fn restore_media(
loop {
let current_file_number = drive.current_file_number()?;
let reader = match drive.read_next_file()? {
None => {
let reader = match drive.read_next_file() {
Err(BlockReadError::EndOfFile) => {
task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
continue;
}
Err(BlockReadError::EndOfStream) => {
task_log!(worker, "detected EOT after {} files", current_file_number);
break;
}
Some(reader) => reader,
Err(BlockReadError::Error(err)) => {
return Err(err.into());
}
Ok(reader) => reader,
};
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
restore_archive(worker, reader, current_file_number, target, &mut catalog, checked_chunks_map, verbose)?;
}
MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
@ -323,11 +469,11 @@ fn restore_archive<'a>(
worker: &WorkerTask,
mut reader: Box<dyn 'a + TapeRead>,
current_file_number: u64,
target: Option<(&DataStore, &Authid)>,
target: Option<(&DataStoreMap, &Authid)>,
catalog: &mut MediaCatalog,
checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>,
verbose: bool,
) -> Result<(), Error> {
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
bail!("missing MediaContentHeader");
@ -340,28 +486,46 @@ fn restore_archive<'a>(
bail!("unexpected content magic (label)");
}
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
let snapshot = reader.read_exact_allocated(header.size as usize)?;
let snapshot = std::str::from_utf8(&snapshot)
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
bail!("unexpected snapshot archive version (v1.0)");
}
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
let datastore_name = archive_header.store;
let snapshot = archive_header.snapshot;
let checked_chunks = checked_chunks_map.entry(datastore_name.clone()).or_insert(HashSet::new());
task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
let backup_dir: BackupDir = snapshot.parse()?;
if let Some((datastore, authid)) = target.as_ref() {
let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
if *authid != &owner { // only the owner is allowed to create additional snapshots
bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
if let Some((store_map, authid)) = target.as_ref() {
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
let (owner, _group_lock) =
datastore.create_locked_backup_group(backup_dir.group(), authid)?;
if *authid != &owner {
// only the owner is allowed to create additional snapshots
bail!(
"restore '{}' failed - owner check failed ({} != {})",
snapshot,
authid,
owner
);
}
let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
let (rel_path, is_new, _snap_lock) =
datastore.create_locked_backup_dir(&backup_dir)?;
let mut path = datastore.base_path();
path.push(rel_path);
if is_new {
task_log!(worker, "restore snapshot {}", backup_dir);
match restore_snapshot_archive(worker, reader, &path) {
match restore_snapshot_archive(worker, reader, &path, &datastore, checked_chunks) {
Err(err) => {
std::fs::remove_dir_all(&path)?;
bail!("restore snapshot {} failed - {}", backup_dir, err);
@ -371,27 +535,55 @@ fn restore_archive<'a>(
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
}
Ok(true) => {
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
catalog.register_snapshot(
Uuid::from(header.uuid),
current_file_number,
&datastore_name,
&snapshot,
)?;
catalog.commit_if_large()?;
}
}
return Ok(());
}
} else {
task_log!(worker, "skipping...");
}
}
reader.skip_to_end()?; // read all data
reader.skip_data()?; // read all data
if let Ok(false) = reader.is_incomplete() {
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
catalog.commit_if_large()?;
}
}
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
bail!("unexpected chunk archive version (v1.0)");
}
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
task_log!(worker, "Found chunk archive: {}", current_file_number);
let datastore = target.as_ref().map(|t| t.0);
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
let source_datastore = archive_header.store;
task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
let datastore = target
.as_ref()
.and_then(|t| t.0.get_datastore(&source_datastore));
if datastore.is_some() || target.is_none() {
let checked_chunks = checked_chunks_map
.entry(datastore.map(|d| d.name()).unwrap_or("_unused_").to_string())
.or_insert(HashSet::new());
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, checked_chunks, verbose)? {
catalog.start_chunk_archive(
Uuid::from(header.uuid),
current_file_number,
&source_datastore,
)?;
for digest in chunks.iter() {
catalog.register_chunk(&digest)?;
}
@ -399,6 +591,22 @@ fn restore_archive<'a>(
catalog.end_chunk_archive()?;
catalog.commit_if_large()?;
}
return Ok(());
} else if target.is_some() {
task_log!(worker, "skipping...");
}
reader.skip_data()?; // read all data
}
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid);
reader.skip_data()?; // read all data
}
_ => bail!("unknown content magic {:?}", header.content_magic),
}
@ -412,6 +620,7 @@ fn restore_chunk_archive<'a>(
worker: &WorkerTask,
reader: Box<dyn 'a + TapeRead>,
datastore: Option<&DataStore>,
checked_chunks: &mut HashSet<[u8;32]>,
verbose: bool,
) -> Result<Option<Vec<[u8;32]>>, Error> {
@ -419,8 +628,28 @@ fn restore_chunk_archive<'a>(
let mut decoder = ChunkArchiveDecoder::new(reader);
let result: Result<_, Error> = proxmox::try_block!({
while let Some((digest, blob)) = decoder.next_chunk()? {
loop {
let (digest, blob) = match decoder.next_chunk() {
Ok(Some((digest, blob))) => (digest, blob),
Ok(None) => break,
Err(err) => {
let reader = decoder.reader();
// check if this stream is marked incomplete
if let Ok(true) = reader.is_incomplete() {
return Ok(Some(chunks));
}
// check if this is an aborted stream without end marker
if let Ok(false) = reader.has_end_marker() {
worker.log("missing stream end marker".to_string());
return Ok(None);
}
// else the archive is corrupt
return Err(err);
}
};
worker.check_abort()?;
@ -439,44 +668,26 @@ fn restore_chunk_archive<'a>(
} else if verbose {
task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
}
checked_chunks.insert(digest.clone());
} else if verbose {
task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
}
chunks.push(digest);
}
Ok(())
});
match result {
Ok(()) => Ok(Some(chunks)),
Err(err) => {
let reader = decoder.reader();
// check if this stream is marked incomplete
if let Ok(true) = reader.is_incomplete() {
return Ok(Some(chunks));
}
// check if this is an aborted stream without end marker
if let Ok(false) = reader.has_end_marker() {
worker.log("missing stream end marker".to_string());
return Ok(None);
}
// else the archive is corrupt
Err(err)
}
}
Ok(Some(chunks))
}
fn restore_snapshot_archive<'a>(
worker: &WorkerTask,
reader: Box<dyn 'a + TapeRead>,
snapshot_path: &Path,
datastore: &DataStore,
checked_chunks: &mut HashSet<[u8;32]>,
) -> Result<bool, Error> {
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path, datastore, checked_chunks) {
Ok(()) => Ok(true),
Err(err) => {
let reader = decoder.input();
@ -501,6 +712,8 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
worker: &WorkerTask,
decoder: &mut pxar::decoder::sync::Decoder<R>,
snapshot_path: &Path,
datastore: &DataStore,
checked_chunks: &mut HashSet<[u8;32]>,
) -> Result<(), Error> {
let _root = match decoder.next() {
@ -591,11 +804,13 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
let index = DynamicIndexReader::open(&archive_path)?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&item.filename, &csum, size)?;
datastore.fast_index_verification(&index, checked_chunks)?;
}
ArchiveType::FixedIndex => {
let index = FixedIndexReader::open(&archive_path)?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&item.filename, &csum, size)?;
datastore.fast_index_verification(&index, checked_chunks)?;
}
ArchiveType::Blob => {
let mut tmpfile = std::fs::File::open(&archive_path)?;
@ -617,3 +832,144 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
Ok(())
}
/// Try to restore media catalogs (form catalog_archives)
pub fn fast_catalog_restore(
worker: &WorkerTask,
drive: &mut Box<dyn TapeDriver>,
media_set: &MediaSet,
uuid: &Uuid, // current media Uuid
) -> Result<bool, Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let current_file_number = drive.current_file_number()?;
if current_file_number != 2 {
bail!("fast_catalog_restore: wrong media position - internal error");
}
let mut found_catalog = false;
let mut moved_to_eom = false;
loop {
let current_file_number = drive.current_file_number()?;
{ // limit reader scope
let mut reader = match drive.read_next_file() {
Err(BlockReadError::EndOfFile) => {
task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
continue;
}
Err(BlockReadError::EndOfStream) => {
task_log!(worker, "detected EOT after {} files", current_file_number);
break;
}
Err(BlockReadError::Error(err)) => {
return Err(err.into());
}
Ok(reader) => reader,
};
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
bail!("missing MediaContentHeader");
}
if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
task_log!(worker, "found catalog at pos {}", current_file_number);
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
if &archive_header.media_set_uuid != media_set.uuid() {
task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number);
reader.skip_data()?; // read all data
continue;
}
let catalog_uuid = &archive_header.uuid;
let wanted = media_set
.media_list()
.iter()
.find(|e| {
match e {
None => false,
Some(uuid) => uuid == catalog_uuid,
}
})
.is_some();
if !wanted {
task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid);
reader.skip_data()?; // read all data
continue;
}
if catalog_uuid == uuid {
// always restore and overwrite catalog
} else {
// only restore if catalog does not exist
if MediaCatalog::exists(status_path, catalog_uuid) {
task_log!(worker, "catalog for media '{}' already exists", catalog_uuid);
reader.skip_data()?; // read all data
continue;
}
}
let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
std::io::copy(&mut reader, &mut file)?;
file.seek(SeekFrom::Start(0))?;
match MediaCatalog::parse_catalog_header(&mut file)? {
(true, Some(media_uuid), Some(media_set_uuid)) => {
if &media_uuid != catalog_uuid {
task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number);
continue;
}
if media_set_uuid != archive_header.media_set_uuid {
task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number);
continue;
}
MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
if catalog_uuid == uuid {
task_log!(worker, "successfully restored catalog");
found_catalog = true
} else {
task_log!(worker, "successfully restored related catalog {}", media_uuid);
}
}
_ => {
task_warn!(worker, "got incomplete catalog header - skip file");
continue;
}
}
continue;
}
}
if moved_to_eom {
break; // already done - stop
}
moved_to_eom = true;
task_log!(worker, "searching for catalog at EOT (moving to EOT)");
drive.move_to_last_file()?;
let new_file_number = drive.current_file_number()?;
if new_file_number < (current_file_number + 1) {
break; // no new content - stop
}
}
Ok(found_catalog)
}

View File

@ -0,0 +1,15 @@
use serde::{Deserialize, Serialize};
use proxmox::api::api;
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// General status information about a running VM file-restore daemon
pub struct RestoreDaemonStatus {
/// VM uptime in seconds
pub uptime: i64,
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
/// not set, as then the status call will have reset the timer before returning the value
pub timeout: i64,
}

View File

@ -34,6 +34,9 @@ pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GRO
mod tape;
pub use tape::*;
mod file_restore;
pub use file_restore::*;
// File names: may not contain slashes, may not start with "."
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
if name.starts_with('.') {
@ -99,6 +102,8 @@ const_regex!{
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
}
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
@ -164,6 +169,9 @@ pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
.format(&PASSWORD_FORMAT)
.min_length(1)
@ -356,6 +364,25 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
.max_length(32)
.schema();
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
.format(&DATASTORE_MAP_FORMAT)
.min_length(3)
.max_length(65)
.type_text("(<source>=)?<target>")
.schema();
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
.schema();
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
"A list of Datastore mappings (or single datastore), comma separated. \
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
all other sources to the default 'e'. If no default is given, only the \
specified sources are mapped.")
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
.schema();
pub const MEDIA_SET_UUID_SCHEMA: Schema =
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
.format(&UUID_FORMAT)
@ -1327,19 +1354,22 @@ pub struct ArchiveEntry {
}
impl ArchiveEntry {
pub fn new(filepath: &[u8], entry_type: &DirEntryAttribute) -> Self {
pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
Self {
filepath: base64::encode(filepath),
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
.to_string(),
entry_type: CatalogEntryType::from(entry_type).to_string(),
leaf: !matches!(entry_type, DirEntryAttribute::Directory { .. }),
entry_type: match entry_type {
Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
None => "v".to_owned(),
},
leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
size: match entry_type {
DirEntryAttribute::File { size, .. } => Some(*size),
Some(DirEntryAttribute::File { size, .. }) => Some(*size),
_ => None
},
mtime: match entry_type {
DirEntryAttribute::File { mtime, .. } => Some(*mtime),
Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
_ => None
},
}

View File

@ -21,8 +21,8 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
.max_length(32)
.schema();
pub const LINUX_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
"The path to a LINUX non-rewinding SCSI tape device (i.e. '/dev/nst0')")
pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
"The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')")
.schema();
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
@ -57,7 +57,7 @@ pub struct VirtualTapeDrive {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
@ -71,8 +71,8 @@ pub struct VirtualTapeDrive {
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Linux SCSI tape driver
pub struct LinuxTapeDrive {
/// Lto SCSI tape driver
pub struct LtoTapeDrive {
pub name: String,
pub path: String,
#[serde(skip_serializing_if="Option::is_none")]
@ -84,7 +84,7 @@ pub struct LinuxTapeDrive {
#[api(
properties: {
config: {
type: LinuxTapeDrive,
type: LtoTapeDrive,
},
info: {
type: OptionalDeviceIdentification,
@ -96,7 +96,7 @@ pub struct LinuxTapeDrive {
/// Drive list entry
pub struct DriveListEntry {
#[serde(flatten)]
pub config: LinuxTapeDrive,
pub config: LtoTapeDrive,
#[serde(flatten)]
pub info: OptionalDeviceIdentification,
/// the state of the drive if locked
@ -119,6 +119,8 @@ pub struct MamAttribute {
#[api()]
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
pub enum TapeDensity {
/// Unknown (no media loaded)
Unknown,
/// LTO1
LTO1,
/// LTO2
@ -144,6 +146,7 @@ impl TryFrom<u8> for TapeDensity {
fn try_from(value: u8) -> Result<Self, Self::Error> {
let density = match value {
0x00 => TapeDensity::Unknown,
0x40 => TapeDensity::LTO1,
0x42 => TapeDensity::LTO2,
0x44 => TapeDensity::LTO3,
@ -169,29 +172,37 @@ impl TryFrom<u8> for TapeDensity {
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Drive/Media status for Linux SCSI drives.
/// Drive/Media status for Lto SCSI drives.
///
/// Media related data is optional - only set if there is a medium
/// loaded.
pub struct LinuxDriveAndMediaStatus {
pub struct LtoDriveAndMediaStatus {
/// Vendor
pub vendor: String,
/// Product
pub product: String,
/// Revision
pub revision: String,
/// Block size (0 is variable size)
pub blocksize: u32,
/// Compression enabled
pub compression: bool,
/// Drive buffer mode
pub buffer_mode: u8,
/// Tape density
pub density: TapeDensity,
/// Media is write protected
#[serde(skip_serializing_if="Option::is_none")]
pub density: Option<TapeDensity>,
/// Status flags
pub status: String,
/// Linux Driver Options
pub options: String,
pub write_protect: Option<bool>,
/// Tape Alert Flags
#[serde(skip_serializing_if="Option::is_none")]
pub alert_flags: Option<String>,
/// Current file number
#[serde(skip_serializing_if="Option::is_none")]
pub file_number: Option<u32>,
pub file_number: Option<u64>,
/// Current block number
#[serde(skip_serializing_if="Option::is_none")]
pub block_number: Option<u32>,
pub block_number: Option<u64>,
/// Medium Manufacture Date (epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub manufactured: Option<i64>,
@ -212,3 +223,62 @@ pub struct LinuxDriveAndMediaStatus {
#[serde(skip_serializing_if="Option::is_none")]
pub medium_wearout: Option<f64>,
}
#[api()]
/// Volume statistics from SCSI log page 17h
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Lp17VolumeStatistics {
/// Volume mounts (thread count)
pub volume_mounts: u64,
/// Total data sets written
pub volume_datasets_written: u64,
/// Write retries
pub volume_recovered_write_data_errors: u64,
/// Total unrecovered write errors
pub volume_unrecovered_write_data_errors: u64,
/// Total suspended writes
pub volume_write_servo_errors: u64,
/// Total fatal suspended writes
pub volume_unrecovered_write_servo_errors: u64,
/// Total datasets read
pub volume_datasets_read: u64,
/// Total read retries
pub volume_recovered_read_errors: u64,
/// Total unrecovered read errors
pub volume_unrecovered_read_errors: u64,
/// Last mount unrecovered write errors
pub last_mount_unrecovered_write_errors: u64,
/// Last mount unrecovered read errors
pub last_mount_unrecovered_read_errors: u64,
/// Last mount bytes written
pub last_mount_bytes_written: u64,
/// Last mount bytes read
pub last_mount_bytes_read: u64,
/// Lifetime bytes written
pub lifetime_bytes_written: u64,
/// Lifetime bytes read
pub lifetime_bytes_read: u64,
/// Last load write compression ratio
pub last_load_write_compression_ratio: u64,
/// Last load read compression ratio
pub last_load_read_compression_ratio: u64,
/// Medium mount time
pub medium_mount_time: u64,
/// Medium ready time
pub medium_ready_time: u64,
/// Total native capacity
pub total_native_capacity: u64,
/// Total used native capacity
pub total_used_native_capacity: u64,
/// Write protect
pub write_protect: bool,
/// Volume is WORM
pub worm: bool,
/// Beginning of medium passes
pub beginning_of_medium_passes: u64,
/// Middle of medium passes
pub middle_of_tape_passes: u64,
/// Volume serial number
pub serial: String,
}

View File

@ -144,6 +144,8 @@ pub struct MediaContentEntry {
pub seq_nr: u64,
/// Media Pool
pub pool: String,
/// Datastore Name
pub store: String,
/// Backup snapshot
pub snapshot: String,
/// Snapshot creation time (epoch)

View File

@ -14,6 +14,7 @@ use crate::api2::types::{Userid, UsernameRef, RealmRef};
pub trait ProxmoxAuthenticator {
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
}
pub struct PAM();
@ -60,6 +61,11 @@ impl ProxmoxAuthenticator for PAM {
Ok(())
}
// do not remove password for pam users
fn remove_password(&self, _username: &UsernameRef) -> Result<(), Error> {
Ok(())
}
}
pub struct PBS();
@ -132,6 +138,24 @@ impl ProxmoxAuthenticator for PBS {
Ok(())
}
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error> {
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
if let Some(map) = data.as_object_mut() {
map.remove(username.as_str());
}
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
let options = proxmox::tools::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
let data = serde_json::to_vec_pretty(&data)?;
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
Ok(())
}
}
/// Lookup the autenticator for the specified realm

View File

@ -3,17 +3,29 @@ use crate::tools;
use anyhow::{bail, format_err, Error};
use std::os::unix::io::RawFd;
use std::path::{PathBuf, Path};
use std::path::{Path, PathBuf};
use proxmox::const_regex;
use super::manifest::MANIFEST_BLOB_NAME;
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
macro_rules! BACKUP_ID_RE {
() => {
r"[A-Za-z0-9_][A-Za-z0-9._\-]*"
};
}
macro_rules! BACKUP_TYPE_RE {
() => {
r"(?:host|vm|ct)"
};
}
macro_rules! BACKUP_TIME_RE {
() => {
r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"
};
}
const_regex!{
const_regex! {
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
@ -38,7 +50,6 @@ pub struct BackupGroup {
}
impl std::cmp::Ord for BackupGroup {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let type_order = self.backup_type.cmp(&other.backup_type);
if type_order != std::cmp::Ordering::Equal {
@ -63,9 +74,11 @@ impl std::cmp::PartialOrd for BackupGroup {
}
impl BackupGroup {
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
Self { backup_type: backup_type.into(), backup_id: backup_id.into() }
Self {
backup_type: backup_type.into(),
backup_id: backup_id.into(),
}
}
pub fn backup_type(&self) -> &str {
@ -77,7 +90,6 @@ impl BackupGroup {
}
pub fn group_path(&self) -> PathBuf {
let mut relative_path = PathBuf::new();
relative_path.push(&self.backup_type);
@ -88,46 +100,65 @@ impl BackupGroup {
}
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
let mut list = vec![];
let mut path = base_path.to_owned();
path.push(self.group_path());
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
|l2_fd, backup_time, file_type| {
if file_type != nix::dir::Type::Directory {
return Ok(());
}
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
let backup_dir =
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
let files = list_backup_files(l2_fd, backup_time)?;
list.push(BackupInfo { backup_dir, files });
Ok(())
})?;
},
)?;
Ok(list)
}
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
let mut last = None;
let mut path = base_path.to_owned();
path.push(self.group_path());
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
|l2_fd, backup_time, file_type| {
if file_type != nix::dir::Type::Directory {
return Ok(());
}
let mut manifest_path = PathBuf::from(backup_time);
manifest_path.push(MANIFEST_BLOB_NAME);
use nix::fcntl::{openat, OFlag};
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
match openat(
l2_fd,
&manifest_path,
OFlag::O_RDONLY,
nix::sys::stat::Mode::empty(),
) {
Ok(rawfd) => {
/* manifest exists --> assume backup was successful */
/* close else this leaks! */
nix::unistd::close(rawfd)?;
},
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
}
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
return Ok(());
}
Err(err) => {
bail!("last_successful_backup: unexpected error - {}", err);
}
@ -135,13 +166,16 @@ impl BackupGroup {
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
if let Some(last_timestamp) = last {
if timestamp > last_timestamp { last = Some(timestamp); }
if timestamp > last_timestamp {
last = Some(timestamp);
}
} else {
last = Some(timestamp);
}
Ok(())
})?;
},
)?;
Ok(last)
}
@ -162,7 +196,8 @@ impl std::str::FromStr for BackupGroup {
///
/// This parses strings like `vm/100".
fn from_str(path: &str) -> Result<Self, Self::Err> {
let cap = GROUP_PATH_REGEX.captures(path)
let cap = GROUP_PATH_REGEX
.captures(path)
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
Ok(Self {
@ -182,11 +217,10 @@ pub struct BackupDir {
/// Backup timestamp
backup_time: i64,
// backup_time as rfc3339
backup_time_string: String
backup_time_string: String,
}
impl BackupDir {
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
where
T: Into<String>,
@ -196,7 +230,11 @@ impl BackupDir {
BackupDir::with_group(group, backup_time)
}
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
pub fn with_rfc3339<T, U, V>(
backup_type: T,
backup_id: U,
backup_time_string: V,
) -> Result<Self, Error>
where
T: Into<String>,
U: Into<String>,
@ -205,12 +243,20 @@ impl BackupDir {
let backup_time_string = backup_time_string.into();
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
let group = BackupGroup::new(backup_type.into(), backup_id.into());
Ok(Self { group, backup_time, backup_time_string })
Ok(Self {
group,
backup_time,
backup_time_string,
})
}
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
let backup_time_string = Self::backup_time_to_string(backup_time)?;
Ok(Self { group, backup_time, backup_time_string })
Ok(Self {
group,
backup_time,
backup_time_string,
})
}
pub fn group(&self) -> &BackupGroup {
@ -226,7 +272,6 @@ impl BackupDir {
}
pub fn relative_path(&self) -> PathBuf {
let mut relative_path = self.group.group_path();
relative_path.push(self.backup_time_string.clone());
@ -247,7 +292,8 @@ impl std::str::FromStr for BackupDir {
///
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
fn from_str(path: &str) -> Result<Self, Self::Err> {
let cap = SNAPSHOT_PATH_REGEX.captures(path)
let cap = SNAPSHOT_PATH_REGEX
.captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
BackupDir::with_rfc3339(
@ -276,7 +322,6 @@ pub struct BackupInfo {
}
impl BackupInfo {
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
let mut path = base_path.to_owned();
path.push(backup_dir.relative_path());
@ -287,19 +332,24 @@ impl BackupInfo {
}
/// Finds the latest backup inside a backup group
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
-> Result<Option<BackupInfo>, Error>
{
pub fn last_backup(
base_path: &Path,
group: &BackupGroup,
only_finished: bool,
) -> Result<Option<BackupInfo>, Error> {
let backups = group.list_backups(base_path)?;
Ok(backups.into_iter()
Ok(backups
.into_iter()
.filter(|item| !only_finished || item.is_finished())
.max_by_key(|item| item.backup_dir.backup_time()))
}
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
if ascendending { // oldest first
if ascendending {
// oldest first
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
} else { // newest first
} else {
// newest first
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
}
}
@ -316,31 +366,52 @@ impl BackupInfo {
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
let mut list = Vec::new();
tools::scandir(libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |_, backup_id, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(
libc::AT_FDCWD,
base_path,
&BACKUP_TYPE_REGEX,
|l0_fd, backup_type, file_type| {
if file_type != nix::dir::Type::Directory {
return Ok(());
}
tools::scandir(
l0_fd,
backup_type,
&BACKUP_ID_REGEX,
|_, backup_id, file_type| {
if file_type != nix::dir::Type::Directory {
return Ok(());
}
list.push(BackupGroup::new(backup_type, backup_id));
Ok(())
})
})?;
},
)
},
)?;
Ok(list)
}
pub fn is_finished(&self) -> bool {
// backup is considered unfinished if there is no manifest
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
self.files
.iter()
.any(|name| name == super::MANIFEST_BLOB_NAME)
}
}
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
fn list_backup_files<P: ?Sized + nix::NixPath>(
dirfd: RawFd,
path: &P,
) -> Result<Vec<String>, Error> {
let mut files = vec![];
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
if file_type != nix::dir::Type::File { return Ok(()); }
if file_type != nix::dir::Type::File {
return Ok(());
}
files.push(filename.to_owned());
Ok(())
})?;

View File

@ -153,6 +153,34 @@ impl DataStore {
Ok(out)
}
/// Fast index verification - only check if chunks exists
pub fn fast_index_verification(
&self,
index: &dyn IndexFile,
checked: &mut HashSet<[u8;32]>,
) -> Result<(), Error> {
for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap();
if checked.contains(&info.digest) {
continue;
}
self.stat_chunk(&info.digest).
map_err(|err| {
format_err!(
"fast_index_verification error, stat_chunk {} failed - {}",
proxmox::tools::digest_to_hex(&info.digest),
err,
)
})?;
checked.insert(info.digest);
}
Ok(())
}
pub fn name(&self) -> &str {
self.chunk_store.name()
}
@ -686,6 +714,11 @@ impl DataStore {
}
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
std::fs::metadata(chunk_path).map_err(Error::from)
}
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
@ -781,4 +814,3 @@ impl DataStore {
self.verify_new
}
}

View File

@ -1,8 +1,8 @@
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{Ordering, AtomicUsize};
use std::time::Instant;
use nix::dir::Dir;
use std::collections::HashSet;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use anyhow::{bail, format_err, Error};
@ -25,8 +25,8 @@ use crate::{
server::UPID,
task::TaskState,
task_log,
tools::ParallelHandler,
tools::fs::lock_dir_noblock_shared,
tools::ParallelHandler,
};
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
@ -34,8 +34,8 @@ use crate::{
pub struct VerifyWorker {
worker: Arc<dyn TaskState + Send + Sync>,
datastore: Arc<DataStore>,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
}
impl VerifyWorker {
@ -45,15 +45,18 @@ impl VerifyWorker {
worker,
datastore,
// start with 16k chunks == up to 64G data
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16*1024))),
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))),
// start with 64 chunks since we assume there are few corrupt ones
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
}
}
}
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
fn verify_blob(
datastore: Arc<DataStore>,
backup_dir: &BackupDir,
info: &FileInfo,
) -> Result<(), Error> {
let blob = datastore.load_blob(backup_dir, &info.filename)?;
let raw_size = blob.raw_size();
@ -88,7 +91,11 @@ fn rename_corrupted_chunk(
let mut new_path = path.clone();
loop {
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
if new_path.exists() && counter < 9 {
counter += 1;
} else {
break;
}
}
match std::fs::rename(&path, &new_path) {
@ -109,7 +116,6 @@ fn verify_index_chunks(
index: Box<dyn IndexFile + Send>,
crypt_mode: CryptMode,
) -> Result<(), Error> {
let errors = Arc::new(AtomicUsize::new(0));
let start_time = Instant::now();
@ -124,8 +130,9 @@ fn verify_index_chunks(
let errors2 = Arc::clone(&errors);
let decoder_pool = ParallelHandler::new(
"verify chunk decoder", 4,
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
"verify chunk decoder",
4,
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
let chunk_crypt_mode = match chunk.crypt_mode() {
Err(err) => {
corrupt_chunks2.lock().unwrap().insert(digest);
@ -159,23 +166,65 @@ fn verify_index_chunks(
}
);
for pos in 0..index.index_count() {
let skip_chunk = |digest: &[u8; 32]| -> bool {
if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
true
} else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
let digest_str = proxmox::tools::digest_to_hex(digest);
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
errors.fetch_add(1, Ordering::SeqCst);
true
} else {
false
}
};
let index_count = index.index_count();
let mut chunk_list = Vec::with_capacity(index_count);
use std::os::unix::fs::MetadataExt;
for pos in 0..index_count {
if pos & 1023 == 0 {
verify_worker.worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
}
let info = index.chunk_info(pos).unwrap();
if skip_chunk(&info.digest) {
continue; // already verified or marked corrupt
}
match verify_worker.datastore.stat_chunk(&info.digest) {
Err(err) => {
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
errors.fetch_add(1, Ordering::SeqCst);
rename_corrupted_chunk(
verify_worker.datastore.clone(),
&info.digest,
&verify_worker.worker,
);
}
Ok(metadata) => {
chunk_list.push((pos, metadata.ino()));
}
}
}
// sorting by inode improves data locality, which makes it lots faster on spinners
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
for (pos, _) in chunk_list {
verify_worker.worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let info = index.chunk_info(pos).unwrap();
let size = info.size();
if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
continue; // already verified
}
if verify_worker.corrupt_chunks.lock().unwrap().contains(&info.digest) {
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
errors.fetch_add(1, Ordering::SeqCst);
continue;
// we must always recheck this here, the parallel worker below alter it!
if skip_chunk(&info.digest) {
continue; // already verified or marked corrupt
}
match verify_worker.datastore.load_chunk(&info.digest) {
@ -183,10 +232,14 @@ fn verify_index_chunks(
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
errors.fetch_add(1, Ordering::SeqCst);
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
continue;
rename_corrupted_chunk(
verify_worker.datastore.clone(),
&info.digest,
&verify_worker.worker,
);
}
Ok(chunk) => {
let size = info.size();
read_bytes += chunk.raw_size();
decoder_pool.send((chunk, info.digest, size))?;
decoded_bytes += size;
@ -198,11 +251,11 @@ fn verify_index_chunks(
let elapsed = start_time.elapsed().as_secs_f64();
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
let read_speed = read_bytes_mib/elapsed;
let decode_speed = decoded_bytes_mib/elapsed;
let read_speed = read_bytes_mib / elapsed;
let decode_speed = decoded_bytes_mib / elapsed;
let error_count = errors.load(Ordering::SeqCst);
@ -229,7 +282,6 @@ fn verify_fixed_index(
backup_dir: &BackupDir,
info: &FileInfo,
) -> Result<(), Error> {
let mut path = backup_dir.relative_path();
path.push(&info.filename);
@ -244,11 +296,7 @@ fn verify_fixed_index(
bail!("wrong index checksum");
}
verify_index_chunks(
verify_worker,
Box::new(index),
info.chunk_crypt_mode(),
)
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
}
fn verify_dynamic_index(
@ -256,7 +304,6 @@ fn verify_dynamic_index(
backup_dir: &BackupDir,
info: &FileInfo,
) -> Result<(), Error> {
let mut path = backup_dir.relative_path();
path.push(&info.filename);
@ -271,11 +318,7 @@ fn verify_dynamic_index(
bail!("wrong index checksum");
}
verify_index_chunks(
verify_worker,
Box::new(index),
info.chunk_crypt_mode(),
)
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
}
/// Verify a single backup snapshot
@ -296,15 +339,12 @@ pub fn verify_backup_dir(
let snap_lock = lock_dir_noblock_shared(
&verify_worker.datastore.snapshot_path(&backup_dir),
"snapshot",
"locked by another operation");
"locked by another operation",
);
match snap_lock {
Ok(snap_lock) => verify_backup_dir_with_lock(
verify_worker,
backup_dir,
upid,
filter,
snap_lock
),
Ok(snap_lock) => {
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
}
Err(err) => {
task_log!(
verify_worker.worker,
@ -361,19 +401,11 @@ pub fn verify_backup_dir_with_lock(
let result = proxmox::try_block!({
task_log!(verify_worker.worker, " check {}", info.filename);
match archive_type(&info.filename)? {
ArchiveType::FixedIndex =>
verify_fixed_index(
verify_worker,
&backup_dir,
info,
),
ArchiveType::DynamicIndex =>
verify_dynamic_index(
verify_worker,
&backup_dir,
info,
),
ArchiveType::Blob => verify_blob(verify_worker.datastore.clone(), &backup_dir, info),
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info),
ArchiveType::Blob => {
verify_blob(verify_worker.datastore.clone(), &backup_dir, info)
}
}
});
@ -392,7 +424,6 @@ pub fn verify_backup_dir_with_lock(
error_count += 1;
verify_result = VerifyState::Failed;
}
}
let verify_state = SnapshotVerifyState {
@ -400,9 +431,12 @@ pub fn verify_backup_dir_with_lock(
upid,
};
let verify_state = serde_json::to_value(verify_state)?;
verify_worker.datastore.update_manifest(&backup_dir, |manifest| {
verify_worker
.datastore
.update_manifest(&backup_dir, |manifest| {
manifest.unprotected["verify_state"] = verify_state;
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
})
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
Ok(error_count == 0)
}
@ -421,7 +455,6 @@ pub fn verify_backup_group(
upid: &UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<Vec<String>, Error> {
let mut errors = Vec::new();
let mut list = match group.list_backups(&verify_worker.datastore.base_path()) {
Ok(list) => list,
@ -438,26 +471,23 @@ pub fn verify_backup_group(
};
let snapshot_count = list.len();
task_log!(verify_worker.worker, "verify group {}:{} ({} snapshots)", verify_worker.datastore.name(), group, snapshot_count);
task_log!(
verify_worker.worker,
"verify group {}:{} ({} snapshots)",
verify_worker.datastore.name(),
group,
snapshot_count
);
progress.group_snapshots = snapshot_count as u64;
BackupInfo::sort_list(&mut list, false); // newest first
for (pos, info) in list.into_iter().enumerate() {
if !verify_backup_dir(
verify_worker,
&info.backup_dir,
upid.clone(),
filter,
)? {
if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
errors.push(info.backup_dir.to_string());
}
progress.done_snapshots = pos as u64 + 1;
task_log!(
verify_worker.worker,
"percentage done: {}",
progress
);
task_log!(verify_worker.worker, "percentage done: {}", progress);
}
Ok(errors)
@ -521,11 +551,7 @@ pub fn verify_all_backups(
.filter(filter_by_owner)
.collect::<Vec<BackupGroup>>(),
Err(err) => {
task_log!(
worker,
"unable to list backups: {}",
err,
);
task_log!(worker, "unable to list backups: {}", err,);
return Ok(errors);
}
};
@ -542,13 +568,8 @@ pub fn verify_all_backups(
progress.done_snapshots = 0;
progress.group_snapshots = 0;
let mut group_errors = verify_backup_group(
verify_worker,
&group,
&mut progress,
upid,
filter,
)?;
let mut group_errors =
verify_backup_group(verify_worker, &group, &mut progress, upid, filter)?;
errors.append(&mut group_errors);
}

View File

@ -1,18 +1,18 @@
/// Control magnetic tape drive operation
///
/// This is a Rust implementation, meant to replace the 'mt' command
/// line tool.
/// This is a Rust implementation, using the Proxmox userspace tape
/// driver. This is meant as replacement fot the 'mt' command line
/// tool.
///
/// Features:
///
/// - written in Rust
/// - use Proxmox userspace driver (using SG_IO)
/// - optional json output format
/// - support tape alert flags
/// - support volume statistics
/// - read cartridge memory
use std::collections::HashMap;
use anyhow::{bail, Error};
use serde_json::Value;
@ -36,6 +36,12 @@ pub const FILE_MARK_COUNT_SCHEMA: Schema =
.maximum(i32::MAX as isize)
.schema();
pub const FILE_MARK_POSITION_SCHEMA: Schema =
IntegerSchema::new("File mark position (0 is BOT).")
.minimum(0)
.maximum(i32::MAX as isize)
.schema();
pub const RECORD_COUNT_SCHEMA: Schema =
IntegerSchema::new("Record count.")
.minimum(1)
@ -43,7 +49,7 @@ pub const RECORD_COUNT_SCHEMA: Schema =
.schema();
pub const DRIVE_OPTION_SCHEMA: Schema = StringSchema::new(
"Linux Tape Driver Option, either numeric value or option name.")
"Lto Tape Driver Option, either numeric value or option name.")
.schema();
pub const DRIVE_OPTION_LIST_SCHEMA: Schema =
@ -57,103 +63,60 @@ use proxmox_backup::{
drive::complete_drive_name,
},
api2::types::{
LINUX_DRIVE_PATH_SCHEMA,
LTO_DRIVE_PATH_SCHEMA,
DRIVE_NAME_SCHEMA,
LinuxTapeDrive,
LtoTapeDrive,
},
tape::{
complete_drive_path,
linux_tape_device_list,
lto_tape_device_list,
drive::{
linux_mtio::{MTCmd, SetDrvBufferOptions},
TapeDriver,
LinuxTapeHandle,
open_linux_tape_device,
LtoTapeHandle,
open_lto_tape_device,
},
},
};
lazy_static::lazy_static!{
static ref DRIVE_OPTIONS: HashMap<String, SetDrvBufferOptions> = {
let mut map = HashMap::new();
for i in 0..31 {
let bit: i32 = 1 << i;
let flag = SetDrvBufferOptions::from_bits_truncate(bit);
if flag.bits() == 0 { continue; }
let name = format!("{:?}", flag)
.to_lowercase()
.replace("_", "-");
map.insert(name, flag);
}
map
};
}
fn parse_drive_options(options: Vec<String>) -> Result<SetDrvBufferOptions, Error> {
let mut value = SetDrvBufferOptions::empty();
for option in options.iter() {
if let Ok::<i32,_>(v) = option.parse() {
value |= SetDrvBufferOptions::from_bits_truncate(v);
} else if let Some(v) = DRIVE_OPTIONS.get(option) {
value |= *v;
} else {
let option = option.to_lowercase().replace("_", "-");
if let Some(v) = DRIVE_OPTIONS.get(&option) {
value |= *v;
} else {
bail!("unknown drive option {}", option);
}
}
}
Ok(value)
}
fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
if let Some(name) = param["drive"].as_str() {
let (config, _digest) = config::drive::config()?;
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
}
if let Some(device) = param["device"].as_str() {
eprintln!("using device {}", device);
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&device)?))
return LtoTapeHandle::new(open_lto_tape_device(&device)?);
}
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
let (config, _digest) = config::drive::config()?;
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
}
if let Ok(device) = std::env::var("TAPE") {
eprintln!("using device {}", device);
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&device)?))
return LtoTapeHandle::new(open_lto_tape_device(&device)?);
}
let (config, _digest) = config::drive::config()?;
let mut drive_names = Vec::new();
for (name, (section_type, _)) in config.sections.iter() {
if section_type != "linux" { continue; }
if section_type != "lto" { continue; }
drive_names.push(name);
}
if drive_names.len() == 1 {
let name = drive_names[0];
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
return Ok(LinuxTapeHandle::new(open_linux_tape_device(&drive.path)?))
return LtoTapeHandle::new(open_lto_tape_device(&drive.path)?);
}
bail!("no drive/device specified");
@ -167,26 +130,22 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
schema: FILE_MARK_COUNT_SCHEMA,
schema: FILE_MARK_POSITION_SCHEMA,
},
},
},
)]
/// Position the tape at the beginning of the count file.
///
/// Positioning is done by first rewinding the tape and then spacing
/// forward over count file marks.
fn asf(count: i32, param: Value) -> Result<(), Error> {
/// Position the tape at the beginning of the count file (after
/// filemark count)
fn asf(count: u64, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.rewind()?;
handle.forward_space_count_files(count)?;
handle.locate_file(count)?;
Ok(())
}
@ -200,7 +159,7 @@ fn asf(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -212,7 +171,7 @@ fn asf(count: i32, param: Value) -> Result<(), Error> {
/// Backward space count files (position before file mark).
///
/// The tape is positioned on the last block of the previous file.
fn bsf(count: i32, param: Value) -> Result<(), Error> {
fn bsf(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
@ -230,7 +189,7 @@ fn bsf(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -243,11 +202,12 @@ fn bsf(count: i32, param: Value) -> Result<(), Error> {
///
/// This leaves the tape positioned at the first block of the file
/// that is count - 1 files before the current file.
fn bsfm(count: i32, param: Value) -> Result<(), Error> {
fn bsfm(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTBSFM, count, "bsfm")?;
handle.backward_space_count_files(count)?;
handle.forward_space_count_files(1)?;
Ok(())
}
@ -261,7 +221,7 @@ fn bsfm(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -271,11 +231,11 @@ fn bsfm(count: i32, param: Value) -> Result<(), Error> {
},
)]
/// Backward space records.
fn bsr(count: i32, param: Value) -> Result<(), Error> {
fn bsr(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTBSR, count, "backward space records")?;
handle.backward_space_count_records(count)?;
Ok(())
}
@ -289,7 +249,7 @@ fn bsr(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
"output-format": {
@ -340,7 +300,7 @@ fn cartridge_memory(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
"output-format": {
@ -389,7 +349,7 @@ fn tape_alert_flags(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -413,7 +373,7 @@ fn eject(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -423,7 +383,7 @@ fn eject(param: Value) -> Result<(), Error> {
fn eod(param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.move_to_eom()?;
handle.move_to_eom(false)?;
Ok(())
}
@ -437,7 +397,7 @@ fn eod(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
fast: {
@ -449,7 +409,7 @@ fn eod(param: Value) -> Result<(), Error> {
},
},
)]
/// Erase media
/// Erase media (from current position)
fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
@ -466,7 +426,36 @@ fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
fast: {
description: "Use fast erase.",
type: bool,
optional: true,
default: true,
},
},
},
)]
/// Format media, single partition
fn format(fast: Option<bool>, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.format_media(fast.unwrap_or(true))?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -478,7 +467,7 @@ fn erase(fast: Option<bool>, param: Value) -> Result<(), Error> {
/// Forward space count files (position after file mark).
///
/// The tape is positioned on the first block of the next file.
fn fsf(count: i32, param: Value) -> Result<(), Error> {
fn fsf(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
@ -495,7 +484,7 @@ fn fsf(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -508,11 +497,12 @@ fn fsf(count: i32, param: Value) -> Result<(), Error> {
///
/// This leaves the tape positioned at the last block of the file that
/// is count - 1 files past the current file.
fn fsfm(count: i32, param: Value) -> Result<(), Error> {
fn fsfm(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTFSFM, count, "fsfm")?;
handle.forward_space_count_files(count)?;
handle.backward_space_count_files(1)?;
Ok(())
}
@ -526,7 +516,7 @@ fn fsfm(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -536,11 +526,11 @@ fn fsfm(count: i32, param: Value) -> Result<(), Error> {
},
)]
/// Forward space records.
fn fsr(count: i32, param: Value) -> Result<(), Error> {
fn fsr(count: usize, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTFSR, count, "forward space records")?;
handle.forward_space_count_records(count)?;
Ok(())
}
@ -554,7 +544,7 @@ fn fsr(count: i32, param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -564,7 +554,7 @@ fn fsr(count: i32, param: Value) -> Result<(), Error> {
fn load(param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtload()?;
handle.load()?;
Ok(())
}
@ -578,7 +568,7 @@ fn load(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -589,7 +579,7 @@ fn lock(param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTLOCK, 1, "lock tape drive door")?;
handle.lock()?;
Ok(())
}
@ -603,7 +593,7 @@ fn lock(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -634,7 +624,7 @@ fn scan(param: Value) -> Result<(), Error> {
let output_format = get_output_format(&param);
let list = linux_tape_device_list();
let list = lto_tape_device_list();
if output_format == "json-pretty" {
println!("{}", serde_json::to_string_pretty(&list)?);
@ -657,7 +647,6 @@ fn scan(param: Value) -> Result<(), Error> {
Ok(())
}
#[api(
input: {
properties: {
@ -666,36 +655,7 @@ fn scan(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
size: {
description: "Block size in bytes.",
minimum: 0,
},
},
},
)]
/// Set the block size of the drive
fn setblk(size: i32, param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTSETBLK, size, "set block size")?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
"output-format": {
@ -745,123 +705,7 @@ fn status(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
options: {
schema: DRIVE_OPTION_LIST_SCHEMA,
optional: true,
},
defaults: {
description: "Set default options (buffer-writes async-writes read-ahead can-bsr).",
type: bool,
optional: true,
},
},
},
)]
/// Set device driver options (root only)
fn st_options(
options: Option<Vec<String>>,
defaults: Option<bool>,
param: Value) -> Result<(), Error> {
let handle = get_tape_handle(&param)?;
let options = match defaults {
Some(true) => {
if options.is_some() {
bail!("option --defaults conflicts with specified options");
}
let mut list = Vec::new();
list.push(String::from("buffer-writes"));
list.push(String::from("async-writes"));
list.push(String::from("read-ahead"));
list.push(String::from("can-bsr"));
list
}
Some(false) | None => {
options.unwrap_or_else(|| Vec::new())
}
};
let value = parse_drive_options(options)?;
handle.set_drive_buffer_options(value)?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
options: {
schema: DRIVE_OPTION_LIST_SCHEMA,
},
},
},
)]
/// Set selected device driver options bits (root only)
fn st_set_options(options: Vec<String>, param: Value) -> Result<(), Error> {
let handle = get_tape_handle(&param)?;
let value = parse_drive_options(options)?;
handle.drive_buffer_set_options(value)?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
options: {
schema: DRIVE_OPTION_LIST_SCHEMA,
},
},
},
)]
/// Clear selected device driver options bits (root only)
fn st_clear_options(options: Vec<String>, param: Value) -> Result<(), Error> {
let handle = get_tape_handle(&param)?;
let value = parse_drive_options(options)?;
handle.drive_buffer_clear_options(value)?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
},
@ -872,7 +716,7 @@ fn unlock(param: Value) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTUNLOCK, 1, "unlock tape drive door")?;
handle.unlock()?;
Ok(())
}
@ -886,7 +730,7 @@ fn unlock(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
"output-format": {
@ -935,7 +779,7 @@ fn volume_statistics(param: Value) -> Result<(), Error> {
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
count: {
@ -946,10 +790,68 @@ fn volume_statistics(param: Value) -> Result<(), Error> {
},
)]
/// Write count (default 1) EOF marks at current position.
fn weof(count: Option<i32>, param: Value) -> Result<(), Error> {
fn weof(count: Option<usize>, param: Value) -> Result<(), Error> {
let count = count.unwrap_or(1);
let mut handle = get_tape_handle(&param)?;
handle.mtop(MTCmd::MTWEOF, count.unwrap_or(1), "write EOF mark")?;
handle.write_filemarks(count)?;
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
compression: {
description: "Enable/disable compression.",
type: bool,
optional: true,
},
blocksize: {
description: "Set tape drive block_length (0 is variable length).",
type: u32,
minimum: 0,
maximum: 0x80_00_00,
optional: true,
},
buffer_mode: {
description: "Use drive buffer.",
type: bool,
optional: true,
},
defaults: {
description: "Set default options",
type: bool,
optional: true,
},
},
},
)]
/// Set varios drive options
fn options(
compression: Option<bool>,
blocksize: Option<u32>,
buffer_mode: Option<bool>,
defaults: Option<bool>,
param: Value,
) -> Result<(), Error> {
let mut handle = get_tape_handle(&param)?;
if let Some(true) = defaults {
handle.set_default_options()?;
}
handle.set_drive_options(compression, blocksize, buffer_mode)?;
Ok(())
}
@ -967,7 +869,6 @@ fn main() -> Result<(), Error> {
CliCommand::new(method)
.completion_cb("drive", complete_drive_name)
.completion_cb("device", complete_drive_path)
.completion_cb("options", complete_option_name)
};
let cmd_def = CliCommandMap::new()
@ -980,18 +881,16 @@ fn main() -> Result<(), Error> {
.insert("eject", std_cmd(&API_METHOD_EJECT))
.insert("eod", std_cmd(&API_METHOD_EOD))
.insert("erase", std_cmd(&API_METHOD_ERASE))
.insert("format", std_cmd(&API_METHOD_FORMAT))
.insert("fsf", std_cmd(&API_METHOD_FSF).arg_param(&["count"]))
.insert("fsfm", std_cmd(&API_METHOD_FSFM).arg_param(&["count"]))
.insert("fsr", std_cmd(&API_METHOD_FSR).arg_param(&["count"]))
.insert("load", std_cmd(&API_METHOD_LOAD))
.insert("lock", std_cmd(&API_METHOD_LOCK))
.insert("options", std_cmd(&API_METHOD_OPTIONS))
.insert("rewind", std_cmd(&API_METHOD_REWIND))
.insert("scan", CliCommand::new(&API_METHOD_SCAN))
.insert("setblk", CliCommand::new(&API_METHOD_SETBLK).arg_param(&["size"]))
.insert("status", std_cmd(&API_METHOD_STATUS))
.insert("stoptions", std_cmd(&API_METHOD_ST_OPTIONS).arg_param(&["options"]))
.insert("stsetoptions", std_cmd(&API_METHOD_ST_SET_OPTIONS).arg_param(&["options"]))
.insert("stclearoptions", std_cmd(&API_METHOD_ST_CLEAR_OPTIONS).arg_param(&["options"]))
.insert("tape-alert-flags", std_cmd(&API_METHOD_TAPE_ALERT_FLAGS))
.insert("unlock", std_cmd(&API_METHOD_UNLOCK))
.insert("volume-statistics", std_cmd(&API_METHOD_VOLUME_STATISTICS))
@ -1005,11 +904,3 @@ fn main() -> Result<(), Error> {
Ok(())
}
// Completion helpers
pub fn complete_option_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
DRIVE_OPTIONS
.keys()
.map(String::from)
.collect()
}

View File

@ -33,7 +33,7 @@ use proxmox_backup::{
SCSI_CHANGER_PATH_SCHEMA,
CHANGER_NAME_SCHEMA,
ScsiTapeChanger,
LinuxTapeDrive,
LtoTapeDrive,
},
tape::{
linux_tape_changer_list,
@ -67,7 +67,7 @@ fn get_changer_handle(param: &Value) -> Result<File, Error> {
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
let (config, _digest) = config::drive::config()?;
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
if let Some(changer) = drive.changer {
let changer_config: ScsiTapeChanger = config.lookup("changer", &changer)?;
eprintln!("using device {}", changer_config.path);

View File

@ -6,8 +6,11 @@ use proxmox::api::RpcEnvironmentType;
//use proxmox_backup::tools;
//use proxmox_backup::api_schema::config::*;
use proxmox_backup::server::rest::*;
use proxmox_backup::server;
use proxmox_backup::server::{
self,
auth::default_api_auth,
rest::*,
};
use proxmox_backup::tools::daemon;
use proxmox_backup::auth_helpers::*;
use proxmox_backup::config;
@ -53,7 +56,11 @@ async fn run() -> Result<(), Error> {
let _ = csrf_secret(); // load with lazy_static
let mut config = server::ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
buildcfg::JS_DIR,
&proxmox_backup::api2::ROUTER,
RpcEnvironmentType::PRIVILEGED,
default_api_auth(),
)?;
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());

View File

@ -1,7 +1,5 @@
use std::collections::HashSet;
use std::convert::TryFrom;
use std::io::{self, Read, Write, Seek, SeekFrom};
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
@ -19,7 +17,7 @@ use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox::{
tools::{
time::{strftime_local, epoch_i64},
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
fs::{file_get_json, replace_file, CreateOptions, image_size},
},
api::{
api,
@ -32,7 +30,11 @@ use proxmox::{
};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use proxmox_backup::tools;
use proxmox_backup::tools::{
self,
StdChannelWriter,
TokioWriterAdapter,
};
use proxmox_backup::api2::types::*;
use proxmox_backup::api2::version;
use proxmox_backup::client::*;
@ -67,8 +69,18 @@ use proxmox_backup::backup::{
mod proxmox_backup_client;
use proxmox_backup_client::*;
mod proxmox_client_tools;
use proxmox_client_tools::*;
pub mod proxmox_client_tools;
use proxmox_client_tools::{
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
extract_repository_from_value,
key_source::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
},
CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
};
fn record_repository(repo: &BackupRepository) {
@ -162,7 +174,7 @@ async fn backup_directory<P: AsRef<Path>>(
dir_path: P,
archive_name: &str,
chunk_size: Option<usize>,
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
upload_options: UploadOptions,
) -> Result<BackupStats, Error> {
@ -460,7 +472,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
}
struct CatalogUploadResult {
catalog_writer: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
}
@ -473,7 +485,7 @@ fn spawn_catalog_upload(
let catalog_chunk_size = 512*1024;
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(StdChannelWriter::new(catalog_tx)))?));
let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
@ -499,437 +511,6 @@ fn spawn_catalog_upload(
Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx })
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum KeySource {
DefaultKey,
Fd,
Path(String),
}
fn format_key_source(source: &KeySource, key_type: &str) -> String {
match source {
KeySource::DefaultKey => format!("Using default {} key..", key_type),
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
struct KeyWithSource {
pub source: KeySource,
pub key: Vec<u8>,
}
impl KeyWithSource {
pub fn from_fd(key: Vec<u8>) -> Self {
Self {
source: KeySource::Fd,
key,
}
}
pub fn from_default(key: Vec<u8>) -> Self {
Self {
source: KeySource::DefaultKey,
key,
}
}
pub fn from_path(path: String, key: Vec<u8>) -> Self {
Self {
source: KeySource::Path(path),
key,
}
}
}
#[derive(Debug, Eq, PartialEq)]
struct CryptoParams {
mode: CryptMode,
enc_key: Option<KeyWithSource>,
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
master_pubkey: Option<KeyWithSource>,
}
fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
let keyfile = match param.get("keyfile") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --keyfile parameter type"),
None => None,
};
let key_fd = match param.get("keyfd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --keyfd parameter type"),
None => None,
};
let master_pubkey_file = match param.get("master-pubkey-file") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --master-pubkey-file parameter type"),
None => None,
};
let master_pubkey_fd = match param.get("master-pubkey-fd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
None => None,
};
let mode: Option<CryptMode> = match param.get("crypt-mode") {
Some(mode) => Some(serde_json::from_value(mode.clone())?),
None => None,
};
let key = match (keyfile, key_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = { input }.read_to_end(&mut data).map_err(|err| {
format_err!("error reading encryption key from fd {}: {}", fd, err)
})?;
Some(KeyWithSource::from_fd(data))
}
};
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = { input }
.read_to_end(&mut data)
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
Some(KeyWithSource::from_fd(data))
}
};
let res = match mode {
// no crypt mode, enable encryption if keys are available
None => match (key, master_pubkey) {
// only default keys if available
(None, None) => match key::read_optional_default_encryption_key()? {
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
enc_key => {
let master_pubkey = key::read_optional_default_master_pubkey()?;
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit master key, default enc key needed
(None, master_pubkey) => match key::read_optional_default_encryption_key()? {
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
enc_key => {
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit keyfile, maybe default master key
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: key::read_optional_default_master_pubkey()? },
// explicit keyfile and master key
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
},
// explicitly disabled encryption
Some(CryptMode::None) => match (key, master_pubkey) {
// no keys => OK, no encryption
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
// --keyfile and --crypt-mode=none
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
// --master-pubkey-file and --crypt-mode=none
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
},
// explicitly enabled encryption
Some(mode) => match (key, master_pubkey) {
// no key, maybe master key
(None, master_pubkey) => match key::read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
enc_key => {
eprintln!("Encrypting with default encryption key!");
let master_pubkey = match master_pubkey {
None => key::read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams {
mode,
enc_key,
master_pubkey,
}
},
},
// --keyfile and --crypt-mode other than none
(enc_key, master_pubkey) => {
let master_pubkey = match master_pubkey {
None => key::read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams { mode, enc_key, master_pubkey }
},
},
};
Ok(res)
}
#[test]
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
let some_key = vec![1;1];
let default_key = vec![2;1];
let some_master_key = vec![3;1];
let default_master_key = vec![4;1];
let keypath = "./target/testout/keyfile.test";
let master_keypath = "./target/testout/masterkeyfile.test";
let invalid_keypath = "./target/testout/invalid_keyfile.test";
let no_key_res = CryptoParams {
enc_key: None,
master_pubkey: None,
mode: CryptMode::None,
};
let some_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let some_key_some_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_path(
master_keypath.to_string(),
some_master_key.clone(),
)),
mode: CryptMode::Encrypt,
};
let some_key_default_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
mode: CryptMode::Encrypt,
};
let some_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
let default_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let default_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
replace_file(&keypath, &some_key, CreateOptions::default())?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now set a default key
unsafe { key::set_test_encryption_key(Ok(Some(default_key.clone()))); }
// and repeat
// no params but default key == default key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), default_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
assert_eq!(res.unwrap(), default_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
assert_eq!(res.unwrap(), default_key_res);
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now make default key retrieval error
unsafe { key::set_test_encryption_key(Err(format_err!("test error"))); }
// and repeat
// no params, default key retrieval errors == Error
assert!(crypto_parameters(&json!({})).is_err());
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now remove default key again
unsafe { key::set_test_encryption_key(Ok(None)); }
// set a default master key
unsafe { key::set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
// and use an explicit master key
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
// just a default == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
// same with fallback to default master key
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// crypt mode none == error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
// with just default master key == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt without enc key == error
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// invalid master keyfile parameter always errors when a key is passed, even with a valid
// default master key
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
Ok(())
}
#[api(
input: {
properties: {
@ -1160,7 +741,7 @@ async fn create_backup(
);
let (key, created, fingerprint) =
decrypt_key(&key_with_source.key, &key::get_encryption_key_password)?;
decrypt_key(&key_with_source.key, &get_encryption_key_password)?;
println!("Encryption key fingerprint: {}", fingerprint);
let crypt_config = CryptConfig::new(key)?;
@ -1510,7 +1091,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
None => None,
Some(ref key) => {
let (key, _, _) =
decrypt_key(&key.key, &key::get_encryption_key_password).map_err(|err| {
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
eprintln!("{}", format_key_source(&key.source, "encryption"));
err
})?;

View File

@ -14,6 +14,7 @@ use proxmox::api::RpcEnvironmentType;
use proxmox_backup::{
backup::DataStore,
server::{
auth::default_api_auth,
WorkerTask,
ApiConfig,
rest::*,
@ -40,6 +41,7 @@ use proxmox_backup::tools::{
disks::{
DiskManage,
zfs_pool_stats,
get_pool_from_dataset,
},
logrotate::LogRotate,
socket::{
@ -84,12 +86,11 @@ async fn run() -> Result<(), Error> {
let _ = csrf_secret(); // load with lazy_static
let mut config = ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
// Enable experimental tape UI if tape.cfg exists
if Path::new("/etc/proxmox-backup/tape.cfg").exists() {
config.enable_tape_ui = true;
}
buildcfg::JS_DIR,
&proxmox_backup::api2::ROUTER,
RpcEnvironmentType::PUBLIC,
default_api_auth(),
)?;
config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs");
@ -865,8 +866,9 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
let mut device_stat = None;
match fs_type.as_str() {
"zfs" => {
if let Some(pool) = source {
match zfs_pool_stats(&pool) {
if let Some(source) = source {
let pool = get_pool_from_dataset(&source).unwrap_or(&source);
match zfs_pool_stats(pool) {
Ok(stat) => device_stat = stat,
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
}

View File

@ -0,0 +1,477 @@
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use proxmox::api::{
api,
cli::{
default_table_format_options, format_and_print_result_full, get_output_format,
run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig, OUTPUT_FORMAT,
},
};
use pxar::accessor::aio::Accessor;
use pxar::decoder::aio::Decoder;
use proxmox_backup::api2::{helpers, types::ArchiveEntry};
use proxmox_backup::backup::{
decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
DirEntryAttribute, IndexFile, LocalDynamicReadAt, CATALOG_NAME,
};
use proxmox_backup::client::{BackupReader, RemoteChunkReader};
use proxmox_backup::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
use proxmox_backup::tools;
// use "pub" so rust doesn't complain about "unused" functions in the module
pub mod proxmox_client_tools;
use proxmox_client_tools::{
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
key_source::{
crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA,
},
REPO_URL_SCHEMA,
};
mod proxmox_file_restore;
use proxmox_file_restore::*;
enum ExtractPath {
ListArchives,
Pxar(String, Vec<u8>),
VM(String, Vec<u8>),
}
fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
let mut bytes = if base64 {
base64::decode(path)?
} else {
path.into_bytes()
};
if bytes == b"/" {
return Ok(ExtractPath::ListArchives);
}
while !bytes.is_empty() && bytes[0] == b'/' {
bytes.remove(0);
}
let (file, path) = {
let slash_pos = bytes.iter().position(|c| *c == b'/').unwrap_or(bytes.len());
let path = bytes.split_off(slash_pos);
let file = String::from_utf8(bytes)?;
(file, path)
};
if file.ends_with(".pxar.didx") {
Ok(ExtractPath::Pxar(file, path))
} else if file.ends_with(".img.fidx") {
Ok(ExtractPath::VM(file, path))
} else {
bail!("'{}' is not supported for file-restore", file);
}
}
fn keyfile_path(param: &Value) -> Option<String> {
if let Some(Value::String(keyfile)) = param.get("keyfile") {
return Some(keyfile.to_owned());
}
if let Some(Value::Number(keyfd)) = param.get("keyfd") {
return Some(format!("/dev/fd/{}", keyfd));
}
None
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Group/Snapshot path.",
},
"path": {
description: "Path to restore. Directories will be restored as .zip files.",
type: String,
},
"base64": {
type: Boolean,
description: "If set, 'path' will be interpreted as base64 encoded.",
optional: true,
default: false,
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
"keyfd": {
schema: KEYFD_SCHEMA,
optional: true,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
"driver": {
type: BlockDriverType,
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
},
returns: {
description: "A list of elements under the given path",
type: Array,
items: {
type: ArchiveEntry,
}
}
)]
/// List a directory from a backup snapshot.
async fn list(
snapshot: String,
path: String,
base64: bool,
param: Value,
) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?;
let snapshot: BackupDir = snapshot.parse()?;
let path = parse_path(path, base64)?;
let keyfile = keyfile_path(&param);
let crypto = crypto_parameters_keep_fd(&param)?;
let crypt_config = match crypto.enc_key {
None => None,
Some(ref key) => {
let (key, _, _) =
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
eprintln!("{}", format_key_source(&key.source, "encryption"));
err
})?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
let client = connect(&repo)?;
let client = BackupReader::start(
client,
crypt_config.clone(),
repo.store(),
&snapshot.group().backup_type(),
&snapshot.group().backup_id(),
snapshot.backup_time(),
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let result = match path {
ExtractPath::ListArchives => {
let mut entries = vec![];
for file in manifest.files() {
if !file.filename.ends_with(".pxar.didx") && !file.filename.ends_with(".img.fidx") {
continue;
}
let path = format!("/{}", file.filename);
let attr = if file.filename.ends_with(".pxar.didx") {
// a pxar file is a file archive, so it's root is also a directory root
Some(&DirEntryAttribute::Directory { start: 0 })
} else {
None
};
entries.push(ArchiveEntry::new(path.as_bytes(), attr));
}
Ok(entries)
}
ExtractPath::Pxar(file, mut path) => {
let index = client
.download_dynamic_index(&manifest, CATALOG_NAME)
.await?;
let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalog_reader = CatalogReader::new(reader);
let mut fullpath = file.into_bytes();
fullpath.append(&mut path);
helpers::list_dir_content(&mut catalog_reader, &fullpath)
}
ExtractPath::VM(file, path) => {
let details = SnapRestoreDetails {
manifest,
repo,
snapshot,
keyfile,
};
let driver: Option<BlockDriverType> = match param.get("driver") {
Some(drv) => Some(serde_json::from_value(drv.clone())?),
None => None,
};
data_list(driver, details, file, path).await
}
}?;
let options = default_table_format_options()
.sortby("type", false)
.sortby("text", false)
.column(ColumnConfig::new("type"))
.column(ColumnConfig::new("text").header("name"))
.column(ColumnConfig::new("mtime").header("last modified"))
.column(ColumnConfig::new("size"));
let output_format = get_output_format(&param);
format_and_print_result_full(
&mut json!(result),
&API_METHOD_LIST.returns,
&output_format,
&options,
);
Ok(())
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Group/Snapshot path.",
},
"path": {
description: "Path to restore. Directories will be restored as .zip files if extracted to stdout.",
type: String,
},
"base64": {
type: Boolean,
description: "If set, 'path' will be interpreted as base64 encoded.",
optional: true,
default: false,
},
target: {
type: String,
optional: true,
description: "Target directory path. Use '-' to write to standard output.",
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
"keyfd": {
schema: KEYFD_SCHEMA,
optional: true,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
verbose: {
type: Boolean,
description: "Print verbose information",
optional: true,
default: false,
},
"driver": {
type: BlockDriverType,
optional: true,
},
}
}
)]
/// Restore files from a backup snapshot.
async fn extract(
snapshot: String,
path: String,
base64: bool,
target: Option<String>,
verbose: bool,
param: Value,
) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?;
let snapshot: BackupDir = snapshot.parse()?;
let orig_path = path;
let path = parse_path(orig_path.clone(), base64)?;
let target = match target {
Some(target) if target == "-" => None,
Some(target) => Some(PathBuf::from(target)),
None => Some(std::env::current_dir()?),
};
let keyfile = keyfile_path(&param);
let crypto = crypto_parameters_keep_fd(&param)?;
let crypt_config = match crypto.enc_key {
None => None,
Some(ref key) => {
let (key, _, _) =
decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
eprintln!("{}", format_key_source(&key.source, "encryption"));
err
})?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
let client = connect(&repo)?;
let client = BackupReader::start(
client,
crypt_config.clone(),
repo.store(),
&snapshot.group().backup_type(),
&snapshot.group().backup_id(),
snapshot.backup_time(),
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?;
match path {
ExtractPath::Pxar(archive_name, path) => {
let file_info = manifest.lookup_file_info(&archive_name)?;
let index = client
.download_dynamic_index(&manifest, &archive_name)
.await?;
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size();
let reader = LocalDynamicReadAt::new(reader);
let decoder = Accessor::new(reader, archive_size).await?;
extract_to_target(decoder, &path, target, verbose).await?;
}
ExtractPath::VM(file, path) => {
let details = SnapRestoreDetails {
manifest,
repo,
snapshot,
keyfile,
};
let driver: Option<BlockDriverType> = match param.get("driver") {
Some(drv) => Some(serde_json::from_value(drv.clone())?),
None => None,
};
if let Some(mut target) = target {
let reader = data_extract(driver, details, file, path.clone(), true).await?;
let decoder = Decoder::from_tokio(reader).await?;
extract_sub_dir_seq(&target, decoder, verbose).await?;
// we extracted a .pxarexclude-cli file auto-generated by the VM when encoding the
// archive, this file is of no use for the user, so try to remove it
target.push(".pxarexclude-cli");
std::fs::remove_file(target).map_err(|e| {
format_err!("unable to remove temporary .pxarexclude-cli file - {}", e)
})?;
} else {
let mut reader = data_extract(driver, details, file, path.clone(), false).await?;
tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await?;
}
}
_ => {
bail!("cannot extract '{}'", orig_path);
}
}
Ok(())
}
async fn extract_to_target<T>(
decoder: Accessor<T>,
path: &[u8],
target: Option<PathBuf>,
verbose: bool,
) -> Result<(), Error>
where
T: pxar::accessor::ReadAt + Clone + Send + Sync + Unpin + 'static,
{
let path = if path.is_empty() { b"/" } else { path };
let root = decoder.open_root().await?;
let file = root
.lookup(OsStr::from_bytes(path))
.await?
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
if let Some(target) = target {
extract_sub_dir(target, decoder, OsStr::from_bytes(path), verbose).await?;
} else {
match file.kind() {
pxar::EntryKind::File { .. } => {
tokio::io::copy(&mut file.contents().await?, &mut tokio::io::stdout()).await?;
}
_ => {
create_zip(
tokio::io::stdout(),
decoder,
OsStr::from_bytes(path),
verbose,
)
.await?;
}
}
}
Ok(())
}
fn main() {
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
.arg_param(&["snapshot", "path"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot);
let restore_cmd_def = CliCommand::new(&API_METHOD_EXTRACT)
.arg_param(&["snapshot", "path", "target"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("target", tools::complete_file_name);
let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
.arg_param(&["name"])
.completion_cb("name", complete_block_driver_ids);
let cmd_def = CliCommandMap::new()
.insert("list", list_cmd_def)
.insert("extract", restore_cmd_def)
.insert("status", status_cmd_def)
.insert("stop", stop_cmd_def);
let rpcenv = CliEnvironment::new();
run_cli_command(
cmd_def,
rpcenv,
Some(|future| proxmox_backup::tools::runtime::main(future)),
);
}

View File

@ -0,0 +1,124 @@
///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
use anyhow::{bail, format_err, Error};
use log::error;
use lazy_static::lazy_static;
use std::os::unix::{
io::{FromRawFd, RawFd},
net,
};
use std::path::Path;
use std::sync::{Arc, Mutex};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use proxmox::api::RpcEnvironmentType;
use proxmox_backup::client::DEFAULT_VSOCK_PORT;
use proxmox_backup::server::{rest::*, ApiConfig};
mod proxmox_restore_daemon;
use proxmox_restore_daemon::*;
/// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
/// We should never have more than a few requests in queue, so use a low number.
pub const MAX_PENDING: usize = 32;
/// Will be present in base initramfs
pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
lazy_static! {
/// The current disks state. Use for accessing data on the attached snapshots.
pub static ref DISK_STATE: Arc<Mutex<DiskState>> = {
Arc::new(Mutex::new(DiskState::scan().unwrap()))
};
}
/// This is expected to be run by 'proxmox-file-restore' within a mini-VM
fn main() -> Result<(), Error> {
if !Path::new(VM_DETECT_FILE).exists() {
bail!(concat!(
"This binary is not supposed to be run manually. ",
"Please use 'proxmox-file-restore' instead."
));
}
// don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
// stdout to a serial terminal attached by QEMU)
env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
.write_style(env_logger::WriteStyle::Never)
.init();
// scan all attached disks now, before starting the API
// this will panic and stop the VM if anything goes wrong
{
let _disk_state = DISK_STATE.lock().unwrap();
}
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {
watchdog_init();
let auth_config = Arc::new(
auth::ticket_auth().map_err(|err| format_err!("reading ticket file failed: {}", err))?,
);
let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC, auth_config)?;
let rest_server = RestServer::new(config);
let vsock_fd = get_vsock_fd()?;
let connections = accept_vsock_connections(vsock_fd);
let receiver_stream = ReceiverStream::new(connections);
let acceptor = hyper::server::accept::from_stream(receiver_stream);
hyper::Server::builder(acceptor).serve(rest_server).await?;
bail!("hyper server exited");
}
fn accept_vsock_connections(
vsock_fd: RawFd,
) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
use nix::sys::socket::*;
let (sender, receiver) = mpsc::channel(MAX_PENDING);
tokio::spawn(async move {
loop {
let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
// we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
let client_fd = accept(vsock_fd)?;
let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
stream.set_nonblocking(true)?;
tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
});
match stream {
Ok(stream) => {
if sender.send(Ok(stream)).await.is_err() {
error!("connection accept channel was closed");
}
}
Err(err) => {
error!("error accepting vsock connetion: {}", err);
}
}
}
});
receiver
}
fn get_vsock_fd() -> Result<RawFd, Error> {
use nix::sys::socket::*;
let sock_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)?;
let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
listen(sock_fd, MAX_PENDING)?;
Ok(sock_fd)
}

View File

@ -27,10 +27,13 @@ use proxmox_backup::{
api2::{
self,
types::{
Authid,
DATASTORE_SCHEMA,
DATASTORE_MAP_LIST_SCHEMA,
DRIVE_NAME_SCHEMA,
MEDIA_LABEL_SCHEMA,
MEDIA_POOL_NAME_SCHEMA,
Userid,
},
},
config::{
@ -40,6 +43,7 @@ use proxmox_backup::{
media_pool::complete_pool_name,
},
tape::{
BlockReadError,
drive::{
open_drive,
lock_tape_device,
@ -112,8 +116,8 @@ pub fn extract_drive_name(
},
},
)]
/// Erase media
async fn erase_media(mut param: Value) -> Result<(), Error> {
/// Format media
async fn format_media(mut param: Value) -> Result<(), Error> {
let output_format = get_output_format(&param);
@ -123,7 +127,7 @@ async fn erase_media(mut param: Value) -> Result<(), Error> {
let mut client = connect_to_localhost()?;
let path = format!("api2/json/tape/drive/{}/erase-media", drive);
let path = format!("api2/json/tape/drive/{}/format-media", drive);
let result = client.post(&path, Some(param)).await?;
view_task_result(&mut client, result, &output_format).await?;
@ -548,7 +552,7 @@ fn move_to_eom(mut param: Value) -> Result<(), Error> {
let mut drive = open_drive(&config, &drive)?;
drive.move_to_eom()?;
drive.move_to_eom(false)?;
Ok(())
}
@ -584,12 +588,19 @@ fn debug_scan(mut param: Value) -> Result<(), Error> {
loop {
let file_number = drive.current_file_number()?;
match drive.read_next_file()? {
None => {
println!("EOD");
match drive.read_next_file() {
Err(BlockReadError::EndOfFile) => {
println!("filemark number {}", file_number);
continue;
},
Some(mut reader) => {
}
Err(BlockReadError::EndOfStream) => {
println!("got EOT");
return Ok(());
}
Err(BlockReadError::Error(err)) => {
return Err(err.into());
}
Ok(mut reader) => {
println!("got file number {}", file_number);
let header: Result<MediaContentHeader, _> = unsafe { reader.read_le_value() };
@ -611,8 +622,15 @@ fn debug_scan(mut param: Value) -> Result<(), Error> {
println!("unable to read content header - {}", err);
}
}
let bytes = reader.skip_to_end()?;
let bytes = reader.skip_data()?;
println!("skipped {}", HumanByte::from(bytes));
if let Ok(true) = reader.has_end_marker() {
if reader.is_incomplete()? {
println!("WARNING: file is incomplete");
}
} else {
println!("WARNING: file without end marker");
}
}
}
}
@ -738,8 +756,9 @@ async fn status(mut param: Value) -> Result<(), Error> {
let options = default_table_format_options()
.column(ColumnConfig::new("blocksize"))
.column(ColumnConfig::new("density"))
.column(ColumnConfig::new("status"))
.column(ColumnConfig::new("options"))
.column(ColumnConfig::new("compression"))
.column(ColumnConfig::new("buffer-mode"))
.column(ColumnConfig::new("write-protect"))
.column(ColumnConfig::new("alert-flags"))
.column(ColumnConfig::new("file-number"))
.column(ColumnConfig::new("block-number"))
@ -781,8 +800,8 @@ async fn clean_drive(mut param: Value) -> Result<(), Error> {
let mut client = connect_to_localhost()?;
let path = format!("api2/json/tape/drive/{}/clean-drive", drive);
let result = client.post(&path, Some(param)).await?;
let path = format!("api2/json/tape/drive/{}/clean", drive);
let result = client.put(&path, Some(param)).await?;
view_task_result(&mut client, result, &output_format).await?;
@ -853,7 +872,7 @@ async fn backup(mut param: Value) -> Result<(), Error> {
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
schema: DATASTORE_MAP_LIST_SCHEMA,
},
drive: {
schema: DRIVE_NAME_SCHEMA,
@ -863,6 +882,14 @@ async fn backup(mut param: Value) -> Result<(), Error> {
description: "Media set UUID.",
type: String,
},
"notify-user": {
type: Userid,
optional: true,
},
owner: {
type: Authid,
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
@ -900,6 +927,11 @@ async fn restore(mut param: Value) -> Result<(), Error> {
type: bool,
optional: true,
},
scan: {
description: "Re-read the whole tape to reconstruct the catalog instead of restoring saved versions.",
type: bool,
optional: true,
},
verbose: {
description: "Verbose mode - log all found chunks.",
type: bool,
@ -976,8 +1008,8 @@ fn main() {
.completion_cb("drive", complete_drive_name)
)
.insert(
"erase",
CliCommand::new(&API_METHOD_ERASE_MEDIA)
"format",
CliCommand::new(&API_METHOD_FORMAT_MEDIA)
.completion_cb("drive", complete_drive_name)
)
.insert(

View File

@ -34,6 +34,8 @@ use crate::{
connect,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api()]
#[derive(Copy, Clone, Serialize)]
/// Speed test result
@ -152,7 +154,7 @@ pub async fn benchmark(
let crypt_config = match keyfile {
None => None,
Some(path) => {
let (key, _, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
let (key, _, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}

View File

@ -17,7 +17,6 @@ use crate::{
extract_repository_from_value,
format_key_source,
record_repository,
key::get_encryption_key_password,
decrypt_key,
api_datastore_latest_snapshot,
complete_repository,
@ -38,6 +37,8 @@ use crate::{
Shell,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api(
input: {
properties: {

View File

@ -20,114 +20,10 @@ use proxmox_backup::{
tools::paperkey::{generate_paper_key, PaperkeyFormat},
};
use crate::KeyWithSource;
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
find_default_encryption_key()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
find_default_master_pubkey()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(test)]
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_ENCRYPTION_KEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_ENCRYPTION_KEY = value;
}
#[cfg(test)]
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_MASTER_PUBKEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_MASTER_PUBKEY = value;
}
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods
use std::env::VarError::*;
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
Ok(p) => return Ok(p.as_bytes().to_vec()),
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
Err(NotPresent) => {
// Try another method
}
}
// If we're on a TTY, query the user for a password
if tty::stdin_isatty() {
return Ok(tty::read_password("Encryption Key Password: ")?);
}
bail!("no password input mechanism available");
}
use crate::proxmox_client_tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey,
};
#[api(
input: {

View File

@ -1,5 +1,3 @@
use anyhow::{Context, Error};
mod benchmark;
pub use benchmark::*;
mod mount;
@ -13,29 +11,3 @@ pub use snapshot::*;
pub mod key;
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
}
/// Convenience helper for better error messages:
pub fn find_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<Option<std::path::PathBuf>, Error> {
let file_name = file_name.as_ref();
base_directories()
.map(|base| base.find_config_file(file_name))
.with_context(|| format!("error searching for {}", description))
}
pub fn place_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<std::path::PathBuf, Error> {
let file_name = file_name.as_ref();
base_directories()
.and_then(|base| {
base.place_config_file(file_name).map_err(Error::from)
})
.with_context(|| format!("failed to place {} in xdg home", description))
}

View File

@ -43,6 +43,8 @@ use crate::{
BufferedDynamicReadAt,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[sortable]
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&mount),
@ -182,7 +184,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
None => None,
Some(path) => {
println!("Encryption key file: '{:?}'", path);
let (key, _, fingerprint) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
let (key, _, fingerprint) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
println!("Encryption key fingerprint: '{}'", fingerprint);
Some(Arc::new(CryptConfig::new(key)?))
}

View File

@ -35,6 +35,8 @@ use crate::{
record_repository,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api(
input: {
properties: {
@ -239,7 +241,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
let crypt_config = match crypto.enc_key {
None => None,
Some(key) => {
let (key, _created, _) = decrypt_key(&key.key, &crate::key::get_encryption_key_password)?;
let (key, _created, _) = decrypt_key(&key.key, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}

View File

@ -0,0 +1,585 @@
use std::convert::TryFrom;
use std::path::PathBuf;
use std::os::unix::io::{FromRawFd, RawFd};
use std::io::Read;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::api::schema::*;
use proxmox::sys::linux::tty;
use proxmox::tools::fs::file_get_contents;
use proxmox_backup::backup::CryptMode;
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
pub const KEYFILE_SCHEMA: Schema =
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
.schema();
pub const KEYFD_SCHEMA: Schema =
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
.minimum(0)
.schema();
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
.schema();
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
.minimum(0)
.schema();
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum KeySource {
DefaultKey,
Fd,
Path(String),
}
pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
match source {
KeySource::DefaultKey => format!("Using default {} key..", key_type),
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct KeyWithSource {
pub source: KeySource,
pub key: Vec<u8>,
}
impl KeyWithSource {
pub fn from_fd(key: Vec<u8>) -> Self {
Self {
source: KeySource::Fd,
key,
}
}
pub fn from_default(key: Vec<u8>) -> Self {
Self {
source: KeySource::DefaultKey,
key,
}
}
pub fn from_path(path: String, key: Vec<u8>) -> Self {
Self {
source: KeySource::Path(path),
key,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct CryptoParams {
pub mode: CryptMode,
pub enc_key: Option<KeyWithSource>,
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
pub master_pubkey: Option<KeyWithSource>,
}
pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, false)
}
pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, true)
}
fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
let keyfile = match param.get("keyfile") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --keyfile parameter type"),
None => None,
};
let key_fd = match param.get("keyfd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --keyfd parameter type"),
None => None,
};
let master_pubkey_file = match param.get("master-pubkey-file") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --master-pubkey-file parameter type"),
None => None,
};
let master_pubkey_fd = match param.get("master-pubkey-fd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
None => None,
};
let mode: Option<CryptMode> = match param.get("crypt-mode") {
Some(mode) => Some(serde_json::from_value(mode.clone())?),
None => None,
};
let key = match (keyfile, key_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = input.read_to_end(&mut data).map_err(|err| {
format_err!("error reading encryption key from fd {}: {}", fd, err)
})?;
if keep_keyfd_open {
// don't close fd if requested, and try to reset seek position
std::mem::forget(input);
unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
}
Some(KeyWithSource::from_fd(data))
}
};
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = { input }
.read_to_end(&mut data)
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
Some(KeyWithSource::from_fd(data))
}
};
let res = match mode {
// no crypt mode, enable encryption if keys are available
None => match (key, master_pubkey) {
// only default keys if available
(None, None) => match read_optional_default_encryption_key()? {
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
enc_key => {
let master_pubkey = read_optional_default_master_pubkey()?;
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit master key, default enc key needed
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
enc_key => {
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit keyfile, maybe default master key
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
// explicit keyfile and master key
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
},
// explicitly disabled encryption
Some(CryptMode::None) => match (key, master_pubkey) {
// no keys => OK, no encryption
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
// --keyfile and --crypt-mode=none
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
// --master-pubkey-file and --crypt-mode=none
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
},
// explicitly enabled encryption
Some(mode) => match (key, master_pubkey) {
// no key, maybe master key
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
enc_key => {
eprintln!("Encrypting with default encryption key!");
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams {
mode,
enc_key,
master_pubkey,
}
},
},
// --keyfile and --crypt-mode other than none
(enc_key, master_pubkey) => {
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams { mode, enc_key, master_pubkey }
},
},
};
Ok(res)
}
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
find_default_encryption_key()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
find_default_master_pubkey()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(test)]
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_ENCRYPTION_KEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_ENCRYPTION_KEY = value;
}
#[cfg(test)]
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_MASTER_PUBKEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_MASTER_PUBKEY = value;
}
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods
use std::env::VarError::*;
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
Ok(p) => return Ok(p.as_bytes().to_vec()),
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
Err(NotPresent) => {
// Try another method
}
}
// If we're on a TTY, query the user for a password
if tty::stdin_isatty() {
return Ok(tty::read_password("Encryption Key Password: ")?);
}
bail!("no password input mechanism available");
}
#[test]
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
use serde_json::json;
use proxmox::tools::fs::{replace_file, CreateOptions};
let some_key = vec![1;1];
let default_key = vec![2;1];
let some_master_key = vec![3;1];
let default_master_key = vec![4;1];
let keypath = "./target/testout/keyfile.test";
let master_keypath = "./target/testout/masterkeyfile.test";
let invalid_keypath = "./target/testout/invalid_keyfile.test";
let no_key_res = CryptoParams {
enc_key: None,
master_pubkey: None,
mode: CryptMode::None,
};
let some_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let some_key_some_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_path(
master_keypath.to_string(),
some_master_key.clone(),
)),
mode: CryptMode::Encrypt,
};
let some_key_default_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
mode: CryptMode::Encrypt,
};
let some_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
let default_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let default_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
replace_file(&keypath, &some_key, CreateOptions::default())?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now set a default key
unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
// and repeat
// no params but default key == default key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), default_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
assert_eq!(res.unwrap(), default_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
assert_eq!(res.unwrap(), default_key_res);
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now make default key retrieval error
unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
// and repeat
// no params, default key retrieval errors == Error
assert!(crypto_parameters(&json!({})).is_err());
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now remove default key again
unsafe { set_test_encryption_key(Ok(None)); }
// set a default master key
unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
// and use an explicit master key
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
// just a default == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
// same with fallback to default master key
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// crypt mode none == error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
// with just default master key == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt without enc key == error
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// invalid master keyfile parameter always errors when a key is passed, even with a valid
// default master key
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
Ok(())
}

View File

@ -1,8 +1,7 @@
//! Shared tools useful for common CLI clients.
use std::collections::HashMap;
use anyhow::{bail, format_err, Error};
use anyhow::{bail, format_err, Context, Error};
use serde_json::{json, Value};
use xdg::BaseDirectories;
@ -14,9 +13,12 @@ use proxmox::{
use proxmox_backup::api2::access::user::UserWithTokens;
use proxmox_backup::api2::types::*;
use proxmox_backup::backup::BackupDir;
use proxmox_backup::buildcfg;
use proxmox_backup::client::*;
use proxmox_backup::tools;
pub mod key_source;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
@ -25,24 +27,6 @@ pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.max_length(256)
.schema();
pub const KEYFILE_SCHEMA: Schema =
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
.schema();
pub const KEYFD_SCHEMA: Schema =
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
.minimum(0)
.schema();
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
.schema();
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
.minimum(0)
.schema();
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
.minimum(64)
.maximum(4096)
@ -364,3 +348,40 @@ pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec
result
}
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
}
/// Convenience helper for better error messages:
pub fn find_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<Option<std::path::PathBuf>, Error> {
let file_name = file_name.as_ref();
base_directories()
.map(|base| base.find_config_file(file_name))
.with_context(|| format!("error searching for {}", description))
}
pub fn place_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<std::path::PathBuf, Error> {
let file_name = file_name.as_ref();
base_directories()
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
.with_context(|| format!("failed to place {} in xdg home", description))
}
/// Returns a runtime dir owned by the current user.
/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
let uid = nix::unistd::Uid::current();
let mut path: std::path::PathBuf = buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
path.push(uid.to_string());
tools::create_run_dir()?;
std::fs::create_dir_all(&path)?;
Ok(path)
}

View File

@ -0,0 +1,207 @@
//! Abstraction layer over different methods of accessing a block backup
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::future::Future;
use std::hash::BuildHasher;
use std::pin::Pin;
use proxmox_backup::backup::{BackupDir, BackupManifest};
use proxmox_backup::api2::types::ArchiveEntry;
use proxmox_backup::client::BackupRepository;
use proxmox::api::{api, cli::*};
use super::block_driver_qemu::QemuBlockDriver;
/// Contains details about a snapshot that is to be accessed by block file restore
pub struct SnapRestoreDetails {
pub repo: BackupRepository,
pub snapshot: BackupDir,
pub manifest: BackupManifest,
pub keyfile: Option<String>,
}
/// Return value of a BlockRestoreDriver.status() call, 'id' must be valid for .stop(id)
pub struct DriverStatus {
pub id: String,
pub data: Value,
}
pub type Async<R> = Pin<Box<dyn Future<Output = R> + Send>>;
/// An abstract implementation for retrieving data out of a block file backup
pub trait BlockRestoreDriver {
/// List ArchiveEntrys for the given image file and path
fn data_list(
&self,
details: SnapRestoreDetails,
img_file: String,
path: Vec<u8>,
) -> Async<Result<Vec<ArchiveEntry>, Error>>;
/// pxar=true:
/// Attempt to create a pxar archive of the given file path and return a reader instance for it
/// pxar=false:
/// Attempt to read the file or folder at the given path and return the file content or a zip
/// file as a stream
fn data_extract(
&self,
details: SnapRestoreDetails,
img_file: String,
path: Vec<u8>,
pxar: bool,
) -> Async<Result<Box<dyn tokio::io::AsyncRead + Unpin + Send>, Error>>;
/// Return status of all running/mapped images, result value is (id, extra data), where id must
/// match with the ones returned from list()
fn status(&self) -> Async<Result<Vec<DriverStatus>, Error>>;
/// Stop/Close a running restore method
fn stop(&self, id: String) -> Async<Result<(), Error>>;
/// Returned ids must be prefixed with driver type so that they cannot collide between drivers,
/// the returned values must be passable to stop()
fn list(&self) -> Vec<String>;
}
#[api()]
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)]
pub enum BlockDriverType {
/// Uses a small QEMU/KVM virtual machine to map images securely. Requires PVE-patched QEMU.
Qemu,
}
impl BlockDriverType {
fn resolve(&self) -> impl BlockRestoreDriver {
match self {
BlockDriverType::Qemu => QemuBlockDriver {},
}
}
}
const DEFAULT_DRIVER: BlockDriverType = BlockDriverType::Qemu;
const ALL_DRIVERS: &[BlockDriverType] = &[BlockDriverType::Qemu];
pub async fn data_list(
driver: Option<BlockDriverType>,
details: SnapRestoreDetails,
img_file: String,
path: Vec<u8>,
) -> Result<Vec<ArchiveEntry>, Error> {
let driver = driver.unwrap_or(DEFAULT_DRIVER).resolve();
driver.data_list(details, img_file, path).await
}
pub async fn data_extract(
driver: Option<BlockDriverType>,
details: SnapRestoreDetails,
img_file: String,
path: Vec<u8>,
pxar: bool,
) -> Result<Box<dyn tokio::io::AsyncRead + Send + Unpin>, Error> {
let driver = driver.unwrap_or(DEFAULT_DRIVER).resolve();
driver.data_extract(details, img_file, path, pxar).await
}
#[api(
input: {
properties: {
"driver": {
type: BlockDriverType,
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
},
},
)]
/// Retrieve status information about currently running/mapped restore images
pub async fn status(driver: Option<BlockDriverType>, param: Value) -> Result<(), Error> {
let output_format = get_output_format(&param);
let text = output_format == "text";
let mut ret = json!({});
for dt in ALL_DRIVERS {
if driver.is_some() && &driver.unwrap() != dt {
continue;
}
let drv_name = format!("{:?}", dt);
let drv = dt.resolve();
match drv.status().await {
Ok(data) if data.is_empty() => {
if text {
println!("{}: no mappings", drv_name);
} else {
ret[drv_name] = json!({});
}
}
Ok(data) => {
if text {
println!("{}:", &drv_name);
}
ret[&drv_name]["ids"] = json!({});
for status in data {
if text {
println!("{} \t({})", status.id, status.data);
} else {
ret[&drv_name]["ids"][status.id] = status.data;
}
}
}
Err(err) => {
if text {
eprintln!("error getting status from driver '{}' - {}", drv_name, err);
} else {
ret[drv_name] = json!({ "error": format!("{}", err) });
}
}
}
}
if !text {
format_and_print_result(&ret, &output_format);
}
Ok(())
}
#[api(
input: {
properties: {
"name": {
type: String,
description: "The name of the VM to stop.",
},
},
},
)]
/// Immediately stop/unmap a given image. Not typically necessary, as VMs will stop themselves
/// after a timer anyway.
pub async fn stop(name: String) -> Result<(), Error> {
for drv in ALL_DRIVERS.iter().map(BlockDriverType::resolve) {
if drv.list().contains(&name) {
return drv.stop(name).await;
}
}
bail!("no mapping with name '{}' found", name);
}
/// Autocompletion handler for block mappings
pub fn complete_block_driver_ids<S: BuildHasher>(
_arg: &str,
_param: &HashMap<String, String, S>,
) -> Vec<String> {
ALL_DRIVERS
.iter()
.map(BlockDriverType::resolve)
.map(|d| d.list())
.flatten()
.collect()
}

View File

@ -0,0 +1,330 @@
//! Block file access via a small QEMU restore VM using the PBS block driver in QEMU
use anyhow::{bail, Error};
use futures::FutureExt;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{prelude::*, SeekFrom};
use proxmox::tools::fs::lock_file;
use proxmox_backup::api2::types::ArchiveEntry;
use proxmox_backup::backup::BackupDir;
use proxmox_backup::client::*;
use proxmox_backup::tools;
use super::block_driver::*;
use crate::proxmox_client_tools::get_user_run_dir;
const RESTORE_VM_MAP: &str = "restore-vm-map.json";
pub struct QemuBlockDriver {}
#[derive(Clone, Hash, Serialize, Deserialize)]
struct VMState {
pid: i32,
cid: i32,
ticket: String,
}
struct VMStateMap {
map: HashMap<String, VMState>,
file: File,
}
impl VMStateMap {
fn open_file_raw(write: bool) -> Result<File, Error> {
use std::os::unix::fs::OpenOptionsExt;
let mut path = get_user_run_dir()?;
path.push(RESTORE_VM_MAP);
OpenOptions::new()
.read(true)
.write(write)
.create(write)
.mode(0o600)
.open(path)
.map_err(Error::from)
}
/// Acquire a lock on the state map and retrieve a deserialized version
fn load() -> Result<Self, Error> {
let mut file = Self::open_file_raw(true)?;
lock_file(&mut file, true, Some(std::time::Duration::from_secs(5)))?;
let map = serde_json::from_reader(&file).unwrap_or_default();
Ok(Self { map, file })
}
/// Load a read-only copy of the current VM map. Only use for informational purposes, like
/// shell auto-completion, for anything requiring consistency use load() !
fn load_read_only() -> Result<HashMap<String, VMState>, Error> {
let file = Self::open_file_raw(false)?;
Ok(serde_json::from_reader(&file).unwrap_or_default())
}
/// Write back a potentially modified state map, consuming the held lock
fn write(mut self) -> Result<(), Error> {
self.file.seek(SeekFrom::Start(0))?;
self.file.set_len(0)?;
serde_json::to_writer(self.file, &self.map)?;
// drop ourselves including file lock
Ok(())
}
/// Return the map, but drop the lock immediately
fn read_only(self) -> HashMap<String, VMState> {
self.map
}
}
fn make_name(repo: &BackupRepository, snap: &BackupDir) -> String {
let full = format!("qemu_{}/{}", repo, snap);
tools::systemd::escape_unit(&full, false)
}
/// remove non-responsive VMs from given map, returns 'true' if map was modified
async fn cleanup_map(map: &mut HashMap<String, VMState>) -> bool {
let mut to_remove = Vec::new();
for (name, state) in map.iter() {
let client = VsockClient::new(state.cid, DEFAULT_VSOCK_PORT, Some(state.ticket.clone()));
let res = client
.get("api2/json/status", Some(json!({"keep-timeout": true})))
.await;
if res.is_err() {
// VM is not reachable, remove from map and inform user
to_remove.push(name.clone());
eprintln!(
"VM '{}' (pid: {}, cid: {}) was not reachable, removing from map",
name, state.pid, state.cid
);
}
}
for tr in &to_remove {
map.remove(tr);
}
!to_remove.is_empty()
}
fn new_ticket() -> String {
proxmox::tools::Uuid::generate().to_string()
}
async fn ensure_running(details: &SnapRestoreDetails) -> Result<VsockClient, Error> {
let name = make_name(&details.repo, &details.snapshot);
let mut state = VMStateMap::load()?;
cleanup_map(&mut state.map).await;
let new_cid;
let vms = match state.map.get(&name) {
Some(vm) => {
let client = VsockClient::new(vm.cid, DEFAULT_VSOCK_PORT, Some(vm.ticket.clone()));
let res = client.get("api2/json/status", None).await;
match res {
Ok(_) => {
// VM is running and we just reset its timeout, nothing to do
return Ok(client);
}
Err(err) => {
eprintln!("stale VM detected, restarting ({})", err);
// VM is dead, restart
let vms = start_vm(vm.cid, details).await?;
new_cid = vms.cid;
state.map.insert(name, vms.clone());
vms
}
}
}
None => {
let mut cid = state
.map
.iter()
.map(|v| v.1.cid)
.max()
.unwrap_or(0)
.wrapping_add(1);
// offset cid by user id, to avoid unneccessary retries
let running_uid = nix::unistd::Uid::current();
cid = cid.wrapping_add(running_uid.as_raw() as i32);
// some low CIDs have special meaning, start at 10 to avoid them
cid = cid.max(10);
let vms = start_vm(cid, details).await?;
new_cid = vms.cid;
state.map.insert(name, vms.clone());
vms
}
};
state.write()?;
Ok(VsockClient::new(
new_cid,
DEFAULT_VSOCK_PORT,
Some(vms.ticket.clone()),
))
}
async fn start_vm(cid_request: i32, details: &SnapRestoreDetails) -> Result<VMState, Error> {
let ticket = new_ticket();
let files = details
.manifest
.files()
.iter()
.map(|file| file.filename.clone())
.filter(|name| name.ends_with(".img.fidx"));
let (pid, cid) =
super::qemu_helper::start_vm((cid_request.abs() & 0xFFFF) as u16, details, files, &ticket)
.await?;
Ok(VMState { pid, cid, ticket })
}
impl BlockRestoreDriver for QemuBlockDriver {
fn data_list(
&self,
details: SnapRestoreDetails,
img_file: String,
mut path: Vec<u8>,
) -> Async<Result<Vec<ArchiveEntry>, Error>> {
async move {
let client = ensure_running(&details).await?;
if !path.is_empty() && path[0] != b'/' {
path.insert(0, b'/');
}
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
let mut result = client
.get("api2/json/list", Some(json!({ "path": path })))
.await?;
serde_json::from_value(result["data"].take()).map_err(|err| err.into())
}
.boxed()
}
fn data_extract(
&self,
details: SnapRestoreDetails,
img_file: String,
mut path: Vec<u8>,
pxar: bool,
) -> Async<Result<Box<dyn tokio::io::AsyncRead + Unpin + Send>, Error>> {
async move {
let client = ensure_running(&details).await?;
if !path.is_empty() && path[0] != b'/' {
path.insert(0, b'/');
}
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
let (mut tx, rx) = tokio::io::duplex(1024 * 4096);
tokio::spawn(async move {
if let Err(err) = client
.download(
"api2/json/extract",
Some(json!({ "path": path, "pxar": pxar })),
&mut tx,
)
.await
{
eprintln!("reading file extraction stream failed - {}", err);
}
});
Ok(Box::new(rx) as Box<dyn tokio::io::AsyncRead + Unpin + Send>)
}
.boxed()
}
fn status(&self) -> Async<Result<Vec<DriverStatus>, Error>> {
async move {
let mut state_map = VMStateMap::load()?;
let modified = cleanup_map(&mut state_map.map).await;
let map = if modified {
let m = state_map.map.clone();
state_map.write()?;
m
} else {
state_map.read_only()
};
let mut result = Vec::new();
for (n, s) in map.iter() {
let client = VsockClient::new(s.cid, DEFAULT_VSOCK_PORT, Some(s.ticket.clone()));
let resp = client
.get("api2/json/status", Some(json!({"keep-timeout": true})))
.await;
let name = tools::systemd::unescape_unit(n)
.unwrap_or_else(|_| "<invalid name>".to_owned());
let mut extra = json!({"pid": s.pid, "cid": s.cid});
match resp {
Ok(status) => match status["data"].as_object() {
Some(map) => {
for (k, v) in map.iter() {
extra[k] = v.clone();
}
}
None => {
let err = format!(
"invalid JSON received from /status call: {}",
status.to_string()
);
extra["error"] = json!(err);
}
},
Err(err) => {
let err = format!("error during /status API call: {}", err);
extra["error"] = json!(err);
}
}
result.push(DriverStatus {
id: name,
data: extra,
});
}
Ok(result)
}
.boxed()
}
fn stop(&self, id: String) -> Async<Result<(), Error>> {
async move {
let name = tools::systemd::escape_unit(&id, false);
let mut map = VMStateMap::load()?;
let map_mod = cleanup_map(&mut map.map).await;
match map.map.get(&name) {
Some(state) => {
let client =
VsockClient::new(state.cid, DEFAULT_VSOCK_PORT, Some(state.ticket.clone()));
// ignore errors, this either fails because:
// * the VM is unreachable/dead, in which case we don't want it in the map
// * the call was successful and the connection reset when the VM stopped
let _ = client.get("api2/json/stop", None).await;
map.map.remove(&name);
map.write()?;
}
None => {
if map_mod {
map.write()?;
}
bail!("VM with name '{}' not found", name);
}
}
Ok(())
}
.boxed()
}
fn list(&self) -> Vec<String> {
match VMStateMap::load_read_only() {
Ok(state) => state
.iter()
.filter_map(|(name, _)| tools::systemd::unescape_unit(&name).ok())
.collect(),
Err(_) => Vec::new(),
}
}
}

View File

@ -0,0 +1,6 @@
//! Block device drivers and tools for single file restore
pub mod block_driver;
pub use block_driver::*;
mod qemu_helper;
mod block_driver_qemu;

View File

@ -0,0 +1,276 @@
//! Helper to start a QEMU VM for single file restore.
use std::fs::{File, OpenOptions};
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::path::PathBuf;
use std::time::Duration;
use anyhow::{bail, format_err, Error};
use tokio::time;
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
use proxmox::tools::{
fd::Fd,
fs::{create_path, file_read_string, make_tmp_file, CreateOptions},
};
use proxmox_backup::backup::backup_user;
use proxmox_backup::client::{VsockClient, DEFAULT_VSOCK_PORT};
use proxmox_backup::{buildcfg, tools};
use super::SnapRestoreDetails;
const PBS_VM_NAME: &str = "pbs-restore-vm";
const MAX_CID_TRIES: u64 = 32;
fn create_restore_log_dir() -> Result<String, Error> {
let logpath = format!("{}/file-restore", buildcfg::PROXMOX_BACKUP_LOG_DIR);
proxmox::try_block!({
let backup_user = backup_user()?;
let opts = CreateOptions::new()
.owner(backup_user.uid)
.group(backup_user.gid);
let opts_root = CreateOptions::new()
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
create_path(buildcfg::PROXMOX_BACKUP_LOG_DIR, None, Some(opts))?;
create_path(&logpath, None, Some(opts_root))?;
Ok(())
})
.map_err(|err: Error| format_err!("unable to create file-restore log dir - {}", err))?;
Ok(logpath)
}
fn validate_img_existance() -> Result<(), Error> {
let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN);
let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN);
if !kernel.exists() || !initramfs.exists() {
bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
}
Ok(())
}
fn try_kill_vm(pid: i32) -> Result<(), Error> {
let pid = Pid::from_raw(pid);
if let Ok(()) = kill(pid, None) {
// process is running (and we could kill it), check if it is actually ours
// (if it errors assume we raced with the process's death and ignore it)
if let Ok(cmdline) = file_read_string(format!("/proc/{}/cmdline", pid)) {
if cmdline.split('\0').any(|a| a == PBS_VM_NAME) {
// yes, it's ours, kill it brutally with SIGKILL, no reason to take
// any chances - in this state it's most likely broken anyway
if let Err(err) = kill(pid, Signal::SIGKILL) {
bail!(
"reaping broken VM (pid {}) with SIGKILL failed: {}",
pid,
err
);
}
}
}
}
Ok(())
}
async fn create_temp_initramfs(ticket: &str) -> Result<(Fd, String), Error> {
use std::ffi::CString;
use tokio::fs::File;
let (tmp_fd, tmp_path) =
make_tmp_file("/tmp/file-restore-qemu.initramfs.tmp", CreateOptions::new())?;
nix::unistd::unlink(&tmp_path)?;
tools::fd_change_cloexec(tmp_fd.0, false)?;
let mut f = File::from_std(unsafe { std::fs::File::from_raw_fd(tmp_fd.0) });
let mut base = File::open(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN).await?;
tokio::io::copy(&mut base, &mut f).await?;
let name = CString::new("ticket").unwrap();
tools::cpio::append_file(
&mut f,
ticket.as_bytes(),
&name,
0,
(libc::S_IFREG | 0o400) as u16,
0,
0,
0,
ticket.len() as u32,
)
.await?;
tools::cpio::append_trailer(&mut f).await?;
// forget the tokio file, we close the file descriptor via the returned Fd
std::mem::forget(f);
let path = format!("/dev/fd/{}", &tmp_fd.0);
Ok((tmp_fd, path))
}
pub async fn start_vm(
// u16 so we can do wrapping_add without going too high
mut cid: u16,
details: &SnapRestoreDetails,
files: impl Iterator<Item = String>,
ticket: &str,
) -> Result<(i32, i32), Error> {
validate_img_existance()?;
if let Err(_) = std::env::var("PBS_PASSWORD") {
bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
}
let pid;
let (pid_fd, pid_path) = make_tmp_file("/tmp/file-restore-qemu.pid.tmp", CreateOptions::new())?;
nix::unistd::unlink(&pid_path)?;
tools::fd_change_cloexec(pid_fd.0, false)?;
let (_ramfs_pid, ramfs_path) = create_temp_initramfs(ticket).await?;
let logpath = create_restore_log_dir()?;
let logfile = &format!("{}/qemu.log", logpath);
let mut logrotate = tools::logrotate::LogRotate::new(logfile, false)
.ok_or_else(|| format_err!("could not get QEMU log file names"))?;
if let Err(err) = logrotate.do_rotate(CreateOptions::default(), Some(16)) {
eprintln!("warning: logrotate for QEMU log file failed - {}", err);
}
let mut logfd = OpenOptions::new()
.append(true)
.create_new(true)
.open(logfile)?;
tools::fd_change_cloexec(logfd.as_raw_fd(), false)?;
// preface log file with start timestamp so one can see how long QEMU took to start
writeln!(logfd, "[{}] PBS file restore VM log", {
let now = proxmox::tools::time::epoch_i64();
proxmox::tools::time::epoch_to_rfc3339(now)?
},)?;
let base_args = [
"-chardev",
&format!(
"file,id=log,path=/dev/null,logfile=/dev/fd/{},logappend=on",
logfd.as_raw_fd()
),
"-serial",
"chardev:log",
"-vnc",
"none",
"-enable-kvm",
"-m",
"512",
"-kernel",
buildcfg::PROXMOX_BACKUP_KERNEL_FN,
"-initrd",
&ramfs_path,
"-append",
"quiet",
"-daemonize",
"-pidfile",
&format!("/dev/fd/{}", pid_fd.as_raw_fd()),
"-name",
PBS_VM_NAME,
];
// Generate drive arguments for all fidx files in backup snapshot
let mut drives = Vec::new();
let mut id = 0;
for file in files {
if !file.ends_with(".img.fidx") {
continue;
}
drives.push("-drive".to_owned());
let keyfile = if let Some(ref keyfile) = details.keyfile {
format!(",,keyfile={}", keyfile)
} else {
"".to_owned()
};
drives.push(format!(
"file=pbs:repository={},,snapshot={},,archive={}{},read-only=on,if=none,id=drive{}",
details.repo, details.snapshot, file, keyfile, id
));
drives.push("-device".to_owned());
// drive serial is used by VM to map .fidx files to /dev paths
drives.push(format!("virtio-blk-pci,drive=drive{},serial={}", id, file));
id += 1;
}
// Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
let mut attempts = 0;
loop {
let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64");
qemu_cmd.args(base_args.iter());
qemu_cmd.args(&drives);
qemu_cmd.arg("-device");
qemu_cmd.arg(format!(
"vhost-vsock-pci,guest-cid={},disable-legacy=on",
cid
));
qemu_cmd.stdout(std::process::Stdio::null());
qemu_cmd.stderr(std::process::Stdio::piped());
let res = tokio::task::block_in_place(|| qemu_cmd.spawn()?.wait_with_output())?;
if res.status.success() {
// at this point QEMU is already daemonized and running, so if anything fails we
// technically leave behind a zombie-VM... this shouldn't matter, as it will stop
// itself soon enough (timer), and the following operations are unlikely to fail
let mut pid_file = unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) };
std::mem::forget(pid_fd); // FD ownership is now in pid_fd/File
let mut pidstr = String::new();
pid_file.read_to_string(&mut pidstr)?;
pid = pidstr.trim_end().parse().map_err(|err| {
format_err!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr, err)
})?;
break;
} else {
let out = String::from_utf8_lossy(&res.stderr);
if out.contains("unable to set guest cid: Address already in use") {
attempts += 1;
if attempts >= MAX_CID_TRIES {
bail!("CID '{}' in use, but max attempts reached, aborting", cid);
}
// CID in use, try next higher one
eprintln!("CID '{}' in use by other VM, attempting next one", cid);
// skip special-meaning low values
cid = cid.wrapping_add(1).max(10);
} else {
eprint!("{}", out);
bail!("Starting VM failed. See output above for more information.");
}
}
}
// QEMU has started successfully, now wait for virtio socket to become ready
let pid_t = Pid::from_raw(pid);
for _ in 0..60 {
let client = VsockClient::new(cid as i32, DEFAULT_VSOCK_PORT, Some(ticket.to_owned()));
if let Ok(Ok(_)) =
time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await
{
return Ok((pid, cid as i32));
}
if kill(pid_t, None).is_err() {
// QEMU exited
bail!("VM exited before connection could be established");
}
time::sleep(Duration::from_millis(200)).await;
}
// start failed
if let Err(err) = try_kill_vm(pid) {
eprintln!("killing failed VM failed: {}", err);
}
bail!("starting VM timed out");
}

View File

@ -0,0 +1,370 @@
///! File-restore API running inside the restore VM
use anyhow::{bail, Error};
use futures::FutureExt;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
use log::error;
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
use serde_json::Value;
use std::ffi::OsStr;
use std::fs;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use proxmox::api::{
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment,
SubdirMap,
};
use proxmox::{identity, list_subdirs_api_method, sortable};
use proxmox_backup::api2::types::*;
use proxmox_backup::backup::DirEntryAttribute;
use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
use proxmox_backup::tools::{self, fs::read_subdir, zip::zip_directory};
use pxar::encoder::aio::TokioWriter;
use super::{disk::ResolveResult, watchdog_remaining, watchdog_ping};
// NOTE: All API endpoints must have Permission::Superuser, as the configs for authentication do
// not exist within the restore VM. Safety is guaranteed by checking a ticket via a custom ApiAuth.
const SUBDIRS: SubdirMap = &[
("extract", &Router::new().get(&API_METHOD_EXTRACT)),
("list", &Router::new().get(&API_METHOD_LIST)),
("status", &Router::new().get(&API_METHOD_STATUS)),
("stop", &Router::new().get(&API_METHOD_STOP)),
];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
.subdirs(SUBDIRS);
fn read_uptime() -> Result<f32, Error> {
let uptime = fs::read_to_string("/proc/uptime")?;
// unwrap the Option, if /proc/uptime is empty we have bigger problems
Ok(uptime.split_ascii_whitespace().next().unwrap().parse()?)
}
#[api(
input: {
properties: {
"keep-timeout": {
type: bool,
description: "If true, do not reset the watchdog timer on this API call.",
default: false,
optional: true,
},
},
},
access: {
description: "Permissions are handled outside restore VM. This call can be made without a ticket, but keep-timeout is always assumed 'true' then.",
permission: &Permission::World,
},
returns: {
type: RestoreDaemonStatus,
}
)]
/// General status information
fn status(rpcenv: &mut dyn RpcEnvironment, keep_timeout: bool) -> Result<RestoreDaemonStatus, Error> {
if !keep_timeout && rpcenv.get_auth_id().is_some() {
watchdog_ping();
}
Ok(RestoreDaemonStatus {
uptime: read_uptime()? as i64,
timeout: watchdog_remaining(),
})
}
#[api(
access: {
description: "Permissions are handled outside restore VM.",
permission: &Permission::Superuser,
},
)]
/// Stop the restore VM immediately, this will never return if successful
fn stop() {
use nix::sys::reboot;
println!("/stop called, shutting down");
let err = reboot::reboot(reboot::RebootMode::RB_POWER_OFF).unwrap_err();
println!("'reboot' syscall failed: {}", err);
std::process::exit(1);
}
fn get_dir_entry(path: &Path) -> Result<DirEntryAttribute, Error> {
use nix::sys::stat;
let stat = stat::stat(path)?;
Ok(match stat.st_mode & libc::S_IFMT {
libc::S_IFREG => DirEntryAttribute::File {
size: stat.st_size as u64,
mtime: stat.st_mtime,
},
libc::S_IFDIR => DirEntryAttribute::Directory { start: 0 },
_ => bail!("unsupported file type: {}", stat.st_mode),
})
}
#[api(
input: {
properties: {
"path": {
type: String,
description: "base64-encoded path to list files and directories under",
},
},
},
access: {
description: "Permissions are handled outside restore VM.",
permission: &Permission::Superuser,
},
)]
/// List file details for given file or a list of files and directories under the given path if it
/// points to a directory.
fn list(
path: String,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> {
watchdog_ping();
let mut res = Vec::new();
let param_path = base64::decode(path)?;
let mut path = param_path.clone();
if let Some(b'/') = path.last() {
path.pop();
}
let path_str = OsStr::from_bytes(&path[..]);
let param_path_buf = Path::new(path_str);
let mut disk_state = crate::DISK_STATE.lock().unwrap();
let query_result = disk_state.resolve(&param_path_buf)?;
match query_result {
ResolveResult::Path(vm_path) => {
let root_entry = get_dir_entry(&vm_path)?;
match root_entry {
DirEntryAttribute::File { .. } => {
// list on file, return details
res.push(ArchiveEntry::new(&param_path, Some(&root_entry)));
}
DirEntryAttribute::Directory { .. } => {
// list on directory, return all contained files/dirs
for f in read_subdir(libc::AT_FDCWD, &vm_path)? {
if let Ok(f) = f {
let name = f.file_name().to_bytes();
let path = &Path::new(OsStr::from_bytes(name));
if path.components().count() == 1 {
// ignore '.' and '..'
match path.components().next().unwrap() {
std::path::Component::CurDir
| std::path::Component::ParentDir => continue,
_ => {}
}
}
let mut full_vm_path = PathBuf::new();
full_vm_path.push(&vm_path);
full_vm_path.push(path);
let mut full_path = PathBuf::new();
full_path.push(param_path_buf);
full_path.push(path);
let entry = get_dir_entry(&full_vm_path);
if let Ok(entry) = entry {
res.push(ArchiveEntry::new(
full_path.as_os_str().as_bytes(),
Some(&entry),
));
}
}
}
}
_ => unreachable!(),
}
}
ResolveResult::BucketTypes(types) => {
for t in types {
let mut t_path = path.clone();
t_path.push(b'/');
t_path.extend(t.as_bytes());
res.push(ArchiveEntry::new(
&t_path[..],
None,
));
}
}
ResolveResult::BucketComponents(comps) => {
for c in comps {
let mut c_path = path.clone();
c_path.push(b'/');
c_path.extend(c.as_bytes());
res.push(ArchiveEntry::new(
&c_path[..],
// this marks the beginning of a filesystem, i.e. '/', so this is a Directory
Some(&DirEntryAttribute::Directory { start: 0 }),
));
}
}
}
Ok(res)
}
#[sortable]
pub const API_METHOD_EXTRACT: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&extract),
&ObjectSchema::new(
"Extract a file or directory from the VM as a pxar archive.",
&sorted!([
(
"path",
false,
&StringSchema::new("base64-encoded path to list files and directories under")
.schema()
),
(
"pxar",
true,
&BooleanSchema::new(concat!(
"if true, return a pxar archive, otherwise either the ",
"file content or the directory as a zip file"
))
.default(true)
.schema()
)
]),
),
)
.access(None, &Permission::Superuser);
fn extract(
_parts: Parts,
_req_body: Body,
param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
watchdog_ping();
async move {
let path = tools::required_string_param(&param, "path")?;
let mut path = base64::decode(path)?;
if let Some(b'/') = path.last() {
path.pop();
}
let path = Path::new(OsStr::from_bytes(&path[..]));
let pxar = param["pxar"].as_bool().unwrap_or(true);
let query_result = {
let mut disk_state = crate::DISK_STATE.lock().unwrap();
disk_state.resolve(&path)?
};
let vm_path = match query_result {
ResolveResult::Path(vm_path) => vm_path,
_ => bail!("invalid path, cannot restore meta-directory: {:?}", path),
};
// check here so we can return a real error message, failing in the async task will stop
// the transfer, but not return a useful message
if !vm_path.exists() {
bail!("file or directory {:?} does not exist", path);
}
let (mut writer, reader) = tokio::io::duplex(1024 * 64);
if pxar {
tokio::spawn(async move {
let result = async move {
// pxar always expects a directory as it's root, so to accommodate files as
// well we encode the parent dir with a filter only matching the target instead
let mut patterns = vec![MatchEntry::new(
MatchPattern::Pattern(Pattern::path(b"*").unwrap()),
MatchType::Exclude,
)];
let name = match vm_path.file_name() {
Some(name) => name,
None => bail!("no file name found for path: {:?}", vm_path),
};
if vm_path.is_dir() {
let mut pat = name.as_bytes().to_vec();
patterns.push(MatchEntry::new(
MatchPattern::Pattern(Pattern::path(pat.clone())?),
MatchType::Include,
));
pat.extend(b"/**/*".iter());
patterns.push(MatchEntry::new(
MatchPattern::Pattern(Pattern::path(pat)?),
MatchType::Include,
));
} else {
patterns.push(MatchEntry::new(
MatchPattern::Literal(name.as_bytes().to_vec()),
MatchType::Include,
));
}
let dir_path = vm_path.parent().unwrap_or_else(|| Path::new("/"));
let dir = nix::dir::Dir::open(
dir_path,
nix::fcntl::OFlag::O_NOFOLLOW,
nix::sys::stat::Mode::empty(),
)?;
let options = PxarCreateOptions {
entries_max: ENCODER_MAX_ENTRIES,
device_set: None,
patterns,
verbose: false,
skip_lost_and_found: false,
};
let pxar_writer = TokioWriter::new(writer);
create_archive(dir, pxar_writer, Flags::DEFAULT, |_| Ok(()), None, options)
.await
}
.await;
if let Err(err) = result {
error!("pxar streaming task failed - {}", err);
}
});
} else {
tokio::spawn(async move {
let result = async move {
if vm_path.is_dir() {
zip_directory(&mut writer, &vm_path).await?;
Ok(())
} else if vm_path.is_file() {
let mut file = tokio::fs::OpenOptions::new()
.read(true)
.open(vm_path)
.await?;
tokio::io::copy(&mut file, &mut writer).await?;
Ok(())
} else {
bail!("invalid entry type for path: {:?}", vm_path);
}
}
.await;
if let Err(err) = result {
error!("file or dir streaming task failed - {}", err);
}
});
}
let stream = tokio_util::io::ReaderStream::new(reader);
let body = Body::wrap_stream(stream);
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
}
.boxed()
}

View File

@ -0,0 +1,45 @@
//! Authentication via a static ticket file
use anyhow::{bail, format_err, Error};
use std::fs::File;
use std::io::prelude::*;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::config::cached_user_info::CachedUserInfo;
use proxmox_backup::server::auth::{ApiAuth, AuthError};
const TICKET_FILE: &str = "/ticket";
pub struct StaticAuth {
ticket: String,
}
impl ApiAuth for StaticAuth {
fn check_auth(
&self,
headers: &http::HeaderMap,
_method: &hyper::Method,
_user_info: &CachedUserInfo,
) -> Result<Authid, AuthError> {
match headers.get(hyper::header::AUTHORIZATION) {
Some(header) if header.to_str().unwrap_or("") == &self.ticket => {
Ok(Authid::root_auth_id().to_owned())
}
_ => {
return Err(AuthError::Generic(format_err!(
"invalid file restore ticket provided"
)));
}
}
}
}
pub fn ticket_auth() -> Result<StaticAuth, Error> {
let mut ticket_file = File::open(TICKET_FILE)?;
let mut ticket = String::new();
let len = ticket_file.read_to_string(&mut ticket)?;
if len <= 0 {
bail!("invalid ticket: cannot be empty");
}
Ok(StaticAuth { ticket })
}

View File

@ -0,0 +1,329 @@
//! Low-level disk (image) access functions for file restore VMs.
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use log::{info, warn};
use std::collections::HashMap;
use std::fs::{create_dir_all, File};
use std::io::{BufRead, BufReader};
use std::path::{Component, Path, PathBuf};
use proxmox::const_regex;
use proxmox::tools::fs;
use proxmox_backup::api2::types::BLOCKDEVICE_NAME_REGEX;
const_regex! {
VIRTIO_PART_REGEX = r"^vd[a-z]+(\d+)$";
}
lazy_static! {
static ref FS_OPT_MAP: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
// otherwise ext complains about mounting read-only
m.insert("ext2", "noload");
m.insert("ext3", "noload");
m.insert("ext4", "noload");
// ufs2 is used as default since FreeBSD 5.0 released in 2003, so let's assume that
// whatever the user is trying to restore is not using anything older...
m.insert("ufs", "ufstype=ufs2");
m
};
}
pub enum ResolveResult {
Path(PathBuf),
BucketTypes(Vec<&'static str>),
BucketComponents(Vec<String>),
}
struct PartitionBucketData {
dev_node: String,
number: i32,
mountpoint: Option<PathBuf>,
}
/// A "Bucket" represents a mapping found on a disk, e.g. a partition, a zfs dataset or an LV. A
/// uniquely identifying path to a file then consists of four components:
/// "/disk/bucket/component/path"
/// where
/// disk: fidx file name
/// bucket: bucket type
/// component: identifier of the specific bucket
/// path: relative path of the file on the filesystem indicated by the other parts, may contain
/// more subdirectories
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
enum Bucket {
Partition(PartitionBucketData),
}
impl Bucket {
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
haystack: &'a mut Vec<Bucket>,
ty: A,
comp: B,
) -> Option<&'a mut Bucket> {
let ty = ty.as_ref();
let comp = comp.as_ref();
haystack.iter_mut().find(|b| match b {
Bucket::Partition(data) => ty == "part" && comp.parse::<i32>().unwrap() == data.number,
})
}
fn type_string(&self) -> &'static str {
match self {
Bucket::Partition(_) => "part",
}
}
fn component_string(&self) -> String {
match self {
Bucket::Partition(data) => data.number.to_string(),
}
}
}
/// Functions related to the local filesystem. This mostly exists so we can use 'supported_fs' in
/// try_mount while a Bucket is still mutably borrowed from DiskState.
struct Filesystems {
supported_fs: Vec<String>,
}
impl Filesystems {
fn scan() -> Result<Self, Error> {
// detect kernel supported filesystems
let mut supported_fs = Vec::new();
for f in BufReader::new(File::open("/proc/filesystems")?)
.lines()
.filter_map(Result::ok)
{
// ZFS is treated specially, don't attempt to do a regular mount with it
let f = f.trim();
if !f.starts_with("nodev") && f != "zfs" {
supported_fs.push(f.to_owned());
}
}
Ok(Self { supported_fs })
}
fn ensure_mounted(&self, bucket: &mut Bucket) -> Result<PathBuf, Error> {
match bucket {
Bucket::Partition(data) => {
// regular data partition à la "/dev/vdxN"
if let Some(mp) = &data.mountpoint {
return Ok(mp.clone());
}
let mp = format!("/mnt{}/", data.dev_node);
self.try_mount(&data.dev_node, &mp)?;
let mp = PathBuf::from(mp);
data.mountpoint = Some(mp.clone());
Ok(mp)
}
}
}
fn try_mount(&self, source: &str, target: &str) -> Result<(), Error> {
use nix::mount::*;
create_dir_all(target)?;
// try all supported fs until one works - this is the way Busybox's 'mount' does it too:
// https://git.busybox.net/busybox/tree/util-linux/mount.c?id=808d93c0eca49e0b22056e23d965f0d967433fbb#n2152
// note that ZFS is intentionally left out (see scan())
let flags =
MsFlags::MS_RDONLY | MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
for fs in &self.supported_fs {
let fs: &str = fs.as_ref();
let opts = FS_OPT_MAP.get(fs).copied();
match mount(Some(source), target, Some(fs), flags, opts) {
Ok(()) => {
info!("mounting '{}' succeeded, fstype: '{}'", source, fs);
return Ok(());
}
Err(err) => {
warn!("mount error on '{}' ({}) - {}", source, fs, err);
}
}
}
bail!("all mounts failed or no supported file system")
}
}
pub struct DiskState {
filesystems: Filesystems,
disk_map: HashMap<String, Vec<Bucket>>,
}
impl DiskState {
/// Scan all disks for supported buckets.
pub fn scan() -> Result<Self, Error> {
// create mapping for virtio drives and .fidx files (via serial description)
// note: disks::DiskManager relies on udev, which we don't have
let mut disk_map = HashMap::new();
for entry in proxmox_backup::tools::fs::scan_subdir(
libc::AT_FDCWD,
"/sys/block",
&BLOCKDEVICE_NAME_REGEX,
)?
.filter_map(Result::ok)
{
let name = unsafe { entry.file_name_utf8_unchecked() };
if !name.starts_with("vd") {
continue;
}
let sys_path: &str = &format!("/sys/block/{}", name);
let serial = fs::file_read_string(&format!("{}/serial", sys_path));
let fidx = match serial {
Ok(serial) => serial,
Err(err) => {
warn!("disk '{}': could not read serial file - {}", name, err);
continue;
}
};
let mut parts = Vec::new();
for entry in proxmox_backup::tools::fs::scan_subdir(
libc::AT_FDCWD,
sys_path,
&VIRTIO_PART_REGEX,
)?
.filter_map(Result::ok)
{
let part_name = unsafe { entry.file_name_utf8_unchecked() };
let devnode = format!("/dev/{}", part_name);
let part_path = format!("/sys/block/{}/{}", name, part_name);
// create partition device node for further use
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", part_path))?;
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
let number = fs::file_read_firstline(&format!("{}/partition", part_path))?
.trim()
.parse::<i32>()?;
info!(
"drive '{}' ('{}'): found partition '{}' ({})",
name, fidx, devnode, number
);
let bucket = Bucket::Partition(PartitionBucketData {
dev_node: devnode,
mountpoint: None,
number,
});
parts.push(bucket);
}
disk_map.insert(fidx.to_owned(), parts);
}
Ok(Self {
filesystems: Filesystems::scan()?,
disk_map,
})
}
/// Given a path like "/drive-scsi0.img.fidx/part/0/etc/passwd", this will mount the first
/// partition of 'drive-scsi0' on-demand (i.e. if not already mounted) and return a path
/// pointing to the requested file locally, e.g. "/mnt/vda1/etc/passwd", which can be used to
/// read the file. Given a partial path, i.e. only "/drive-scsi0.img.fidx" or
/// "/drive-scsi0.img.fidx/part", it will return a list of available bucket types or bucket
/// components respectively
pub fn resolve(&mut self, path: &Path) -> Result<ResolveResult, Error> {
let mut cmp = path.components().peekable();
match cmp.peek() {
Some(Component::RootDir) | Some(Component::CurDir) => {
cmp.next();
}
None => bail!("empty path cannot be resolved to file location"),
_ => {}
}
let req_fidx = match cmp.next() {
Some(Component::Normal(x)) => x.to_string_lossy(),
_ => bail!("no or invalid image in path"),
};
let buckets = match self.disk_map.get_mut(req_fidx.as_ref()) {
Some(x) => x,
None => bail!("given image '{}' not found", req_fidx),
};
let bucket_type = match cmp.next() {
Some(Component::Normal(x)) => x.to_string_lossy(),
Some(c) => bail!("invalid bucket in path: {:?}", c),
None => {
// list bucket types available
let mut types = buckets
.iter()
.map(|b| b.type_string())
.collect::<Vec<&'static str>>();
// dedup requires duplicates to be consecutive, which is the case - see scan()
types.dedup();
return Ok(ResolveResult::BucketTypes(types));
}
};
let component = match cmp.next() {
Some(Component::Normal(x)) => x.to_string_lossy(),
Some(c) => bail!("invalid bucket component in path: {:?}", c),
None => {
// list bucket components available
let comps = buckets
.iter()
.filter(|b| b.type_string() == bucket_type)
.map(Bucket::component_string)
.collect();
return Ok(ResolveResult::BucketComponents(comps));
}
};
let mut bucket = match Bucket::filter_mut(buckets, &bucket_type, &component) {
Some(bucket) => bucket,
None => bail!(
"bucket/component path not found: {}/{}/{}",
req_fidx,
bucket_type,
component
),
};
// bucket found, check mount
let mountpoint = self
.filesystems
.ensure_mounted(&mut bucket)
.map_err(|err| {
format_err!(
"mounting '{}/{}/{}' failed: {}",
req_fidx,
bucket_type,
component,
err
)
})?;
let mut local_path = PathBuf::new();
local_path.push(mountpoint);
for rem in cmp {
local_path.push(rem);
}
Ok(ResolveResult::Path(local_path))
}
fn mknod_blk(path: &str, maj: u64, min: u64) -> Result<(), Error> {
use nix::sys::stat;
let dev = stat::makedev(maj, min);
stat::mknod(path, stat::SFlag::S_IFBLK, stat::Mode::S_IRWXU, dev)?;
Ok(())
}
}

View File

@ -0,0 +1,11 @@
///! File restore VM related functionality
mod api;
pub use api::*;
pub mod auth;
mod watchdog;
pub use watchdog::*;
mod disk;
pub use disk::*;

View File

@ -0,0 +1,41 @@
//! Tokio-based watchdog that shuts down the VM if not pinged for TIMEOUT
use std::sync::atomic::{AtomicI64, Ordering};
use proxmox::tools::time::epoch_i64;
const TIMEOUT: i64 = 600; // seconds
static TRIGGERED: AtomicI64 = AtomicI64::new(0);
fn handle_expired() -> ! {
use nix::sys::reboot;
println!("watchdog expired, shutting down");
let err = reboot::reboot(reboot::RebootMode::RB_POWER_OFF).unwrap_err();
println!("'reboot' syscall failed: {}", err);
std::process::exit(1);
}
async fn watchdog_loop() {
use tokio::time::{sleep, Duration};
loop {
let remaining = watchdog_remaining();
if remaining <= 0 {
handle_expired();
}
sleep(Duration::from_secs(remaining as u64)).await;
}
}
/// Initialize watchdog
pub fn watchdog_init() {
watchdog_ping();
tokio::spawn(watchdog_loop());
}
/// Trigger watchdog keepalive
pub fn watchdog_ping() {
TRIGGERED.fetch_max(epoch_i64(), Ordering::AcqRel);
}
/// Returns the remaining time before watchdog expiry in seconds
pub fn watchdog_remaining() -> i64 {
TIMEOUT - (epoch_i64() - TRIGGERED.load(Ordering::Acquire))
}

View File

@ -21,7 +21,7 @@ use proxmox_backup::{
config::drive::{
complete_drive_name,
complete_changer_name,
complete_linux_drive_name,
complete_lto_drive_name,
},
};
@ -33,13 +33,13 @@ pub fn drive_commands() -> CommandLineInterface {
.insert("config",
CliCommand::new(&API_METHOD_GET_CONFIG)
.arg_param(&["name"])
.completion_cb("name", complete_linux_drive_name)
.completion_cb("name", complete_lto_drive_name)
)
.insert(
"remove",
CliCommand::new(&api2::config::drive::API_METHOD_DELETE_DRIVE)
.arg_param(&["name"])
.completion_cb("name", complete_linux_drive_name)
.completion_cb("name", complete_lto_drive_name)
)
.insert(
"create",
@ -53,7 +53,7 @@ pub fn drive_commands() -> CommandLineInterface {
"update",
CliCommand::new(&api2::config::drive::API_METHOD_UPDATE_DRIVE)
.arg_param(&["name"])
.completion_cb("name", complete_linux_drive_name)
.completion_cb("name", complete_lto_drive_name)
.completion_cb("path", complete_drive_path)
.completion_cb("changer", complete_changer_name)
)

View File

@ -177,12 +177,14 @@ fn list_content(
let options = default_table_format_options()
.sortby("media-set-uuid", false)
.sortby("seq-nr", false)
.sortby("store", false)
.sortby("snapshot", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("label-text"))
.column(ColumnConfig::new("pool"))
.column(ColumnConfig::new("media-set-name"))
.column(ColumnConfig::new("seq-nr"))
.column(ColumnConfig::new("store"))
.column(ColumnConfig::new("snapshot"))
.column(ColumnConfig::new("media-set-uuid"))
;

View File

@ -130,22 +130,22 @@ fn extract_archive(
) -> Result<(), Error> {
let mut feature_flags = Flags::DEFAULT;
if no_xattrs {
feature_flags ^= Flags::WITH_XATTRS;
feature_flags.remove(Flags::WITH_XATTRS);
}
if no_fcaps {
feature_flags ^= Flags::WITH_FCAPS;
feature_flags.remove(Flags::WITH_FCAPS);
}
if no_acls {
feature_flags ^= Flags::WITH_ACL;
feature_flags.remove(Flags::WITH_ACL);
}
if no_device_nodes {
feature_flags ^= Flags::WITH_DEVICE_NODES;
feature_flags.remove(Flags::WITH_DEVICE_NODES);
}
if no_fifos {
feature_flags ^= Flags::WITH_FIFOS;
feature_flags.remove(Flags::WITH_FIFOS);
}
if no_sockets {
feature_flags ^= Flags::WITH_SOCKETS;
feature_flags.remove(Flags::WITH_SOCKETS);
}
let pattern = pattern.unwrap_or_else(Vec::new);
@ -353,22 +353,22 @@ async fn create_archive(
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
let mut feature_flags = Flags::DEFAULT;
if no_xattrs {
feature_flags ^= Flags::WITH_XATTRS;
feature_flags.remove(Flags::WITH_XATTRS);
}
if no_fcaps {
feature_flags ^= Flags::WITH_FCAPS;
feature_flags.remove(Flags::WITH_FCAPS);
}
if no_acls {
feature_flags ^= Flags::WITH_ACL;
feature_flags.remove(Flags::WITH_ACL);
}
if no_device_nodes {
feature_flags ^= Flags::WITH_DEVICE_NODES;
feature_flags.remove(Flags::WITH_DEVICE_NODES);
}
if no_fifos {
feature_flags ^= Flags::WITH_FIFOS;
feature_flags.remove(Flags::WITH_FIFOS);
}
if no_sockets {
feature_flags ^= Flags::WITH_SOCKETS;
feature_flags.remove(Flags::WITH_SOCKETS);
}
let writer = pxar::encoder::sync::StandardWriter::new(writer);

View File

@ -1,7 +1,5 @@
/// Tape command implemented using scsi-generic raw commands
///
/// SCSI-generic command needs root privileges, so this binary need
/// to be setuid root.
/// Helper to run tape commands as root. Currently only required
/// to read and set the encryption key.
///
/// This command can use STDIN as tape device handle.
@ -24,41 +22,41 @@ use proxmox_backup::{
config,
backup::Fingerprint,
api2::types::{
LINUX_DRIVE_PATH_SCHEMA,
LTO_DRIVE_PATH_SCHEMA,
DRIVE_NAME_SCHEMA,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
MEDIA_SET_UUID_SCHEMA,
LinuxTapeDrive,
LtoTapeDrive,
},
tape::{
drive::{
TapeDriver,
LinuxTapeHandle,
open_linux_tape_device,
check_tape_is_linux_tape_device,
LtoTapeHandle,
open_lto_tape_device,
check_tape_is_lto_tape_device,
},
},
};
fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
let handle = if let Some(name) = param["drive"].as_str() {
let (config, _digest) = config::drive::config()?;
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
drive.open()?
} else if let Some(device) = param["device"].as_str() {
eprintln!("using device {}", device);
LinuxTapeHandle::new(open_linux_tape_device(&device)?)
LtoTapeHandle::new(open_lto_tape_device(&device)?)?
} else if let Some(true) = param["stdin"].as_bool() {
eprintln!("using stdin");
let fd = std::io::stdin().as_raw_fd();
let file = unsafe { File::from_raw_fd(fd) };
check_tape_is_linux_tape_device(&file)?;
LinuxTapeHandle::new(file)
check_tape_is_lto_tape_device(&file)?;
LtoTapeHandle::new(file)?
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
let (config, _digest) = config::drive::config()?;
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
drive.open()?
} else {
@ -66,13 +64,13 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
let mut drive_names = Vec::new();
for (name, (section_type, _)) in config.sections.iter() {
if section_type != "linux" { continue; }
if section_type != "lto" { continue; }
drive_names.push(name);
}
if drive_names.len() == 1 {
let name = drive_names[0];
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
eprintln!("using device {}", drive.path);
drive.open()?
} else {
@ -83,111 +81,6 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
Ok(handle)
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
stdin: {
description: "Use standard input as device handle.",
type: bool,
optional: true,
},
},
},
)]
/// Tape/Media Status
fn status(
param: Value,
) -> Result<(), Error> {
let result = proxmox::try_block!({
let mut handle = get_tape_handle(&param)?;
handle.get_drive_and_media_status()
}).map_err(|err: Error| err.to_string());
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
stdin: {
description: "Use standard input as device handle.",
type: bool,
optional: true,
},
},
},
)]
/// Read Cartridge Memory (Medium auxiliary memory attributes)
fn cartridge_memory(
param: Value,
) -> Result<(), Error> {
let result = proxmox::try_block!({
let mut handle = get_tape_handle(&param)?;
handle.cartridge_memory()
}).map_err(|err| err.to_string());
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
stdin: {
description: "Use standard input as device handle.",
type: bool,
optional: true,
},
},
},
)]
/// Read Tape Alert Flags
fn tape_alert_flags(
param: Value,
) -> Result<(), Error> {
let result = proxmox::try_block!({
let mut handle = get_tape_handle(&param)?;
let flags = handle.tape_alert_flags()?;
Ok(flags.bits())
}).map_err(|err: Error| err.to_string());
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
#[api(
input: {
properties: {
@ -204,7 +97,7 @@ fn tape_alert_flags(
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
schema: LTO_DRIVE_PATH_SCHEMA,
optional: true,
},
stdin: {
@ -245,40 +138,6 @@ fn set_encryption(
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
device: {
schema: LINUX_DRIVE_PATH_SCHEMA,
optional: true,
},
stdin: {
description: "Use standard input as device handle.",
type: bool,
optional: true,
},
},
},
)]
/// Read volume statistics
fn volume_statistics(
param: Value,
) -> Result<(), Error> {
let result = proxmox::try_block!({
let mut handle = get_tape_handle(&param)?;
handle.volume_statistics()
}).map_err(|err: Error| err.to_string());
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
fn main() -> Result<(), Error> {
// check if we are user root or backup
@ -300,22 +159,6 @@ fn main() -> Result<(), Error> {
}
let cmd_def = CliCommandMap::new()
.insert(
"status",
CliCommand::new(&API_METHOD_STATUS)
)
.insert(
"cartridge-memory",
CliCommand::new(&API_METHOD_CARTRIDGE_MEMORY)
)
.insert(
"tape-alert-flags",
CliCommand::new(&API_METHOD_TAPE_ALERT_FLAGS)
)
.insert(
"volume-statistics",
CliCommand::new(&API_METHOD_VOLUME_STATISTICS)
)
.insert(
"encryption",
CliCommand::new(&API_METHOD_SET_ENCRYPTION)

View File

@ -10,6 +10,14 @@ macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
#[macro_export]
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
#[macro_export]
macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
#[macro_export]
macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
() => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
}
/// namespaced directory for in-memory (tmpfs) run state
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
@ -30,6 +38,15 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(
/// the PID filename for the privileged api daemon
pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
/// filename of the cached initramfs to use for booting single file restore VMs, this file is
/// automatically created by APT hooks
pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
/// filename of the kernel to use for booting single file restore VMs
pub const PROXMOX_BACKUP_KERNEL_FN: &str =
concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
/// Prepend configuration directory to a file name
///
/// This is a simply way to get the full path for configuration files.

View File

@ -1,12 +1,12 @@
use std::collections::HashSet;
use std::os::unix::fs::OpenOptionsExt;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use futures::*;
use futures::stream::Stream;
use futures::future::AbortHandle;
use futures::stream::Stream;
use futures::*;
use serde_json::{json, Value};
use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot};
@ -14,11 +14,11 @@ use tokio_stream::wrappers::ReceiverStream;
use proxmox::tools::digest_to_hex;
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use crate::backup::*;
use crate::tools::format::HumanByte;
use super::{HttpClient, H2Client};
use super::{H2Client, HttpClient};
pub struct BackupWriter {
h2: H2Client,
@ -28,7 +28,6 @@ pub struct BackupWriter {
}
impl Drop for BackupWriter {
fn drop(&mut self) {
self.abort.abort();
}
@ -48,13 +47,32 @@ pub struct UploadOptions {
pub fixed_size: Option<u64>,
}
struct UploadStats {
chunk_count: usize,
chunk_reused: usize,
size: usize,
size_reused: usize,
size_compressed: usize,
duration: std::time::Duration,
csum: [u8; 32],
}
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
impl BackupWriter {
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
Arc::new(Self { h2, abort, crypt_config, verbose })
fn new(
h2: H2Client,
abort: AbortHandle,
crypt_config: Option<Arc<CryptConfig>>,
verbose: bool,
) -> Arc<Self> {
Arc::new(Self {
h2,
abort,
crypt_config,
verbose,
})
}
// FIXME: extract into (flattened) parameter struct?
@ -67,9 +85,8 @@ impl BackupWriter {
backup_id: &str,
backup_time: i64,
debug: bool,
benchmark: bool
benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> {
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
@ -80,34 +97,30 @@ impl BackupWriter {
});
let req = HttpClient::request_builder(
client.server(), client.port(), "GET", "/api2/json/backup", Some(param)).unwrap();
client.server(),
client.port(),
"GET",
"/api2/json/backup",
Some(param),
)
.unwrap();
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
let (h2, abort) = client
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
.await?;
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
}
pub async fn get(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.get(path, param).await
}
pub async fn put(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.put(path, param).await
}
pub async fn post(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.post(path, param).await
}
@ -118,7 +131,9 @@ impl BackupWriter {
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
self.h2.upload("POST", path, param, content_type, data).await
self.h2
.upload("POST", path, param, content_type, data)
.await
}
pub async fn send_upload_request(
@ -129,9 +144,13 @@ impl BackupWriter {
content_type: &str,
data: Vec<u8>,
) -> Result<h2::client::ResponseFuture, Error> {
let request = H2Client::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
let response_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap();
let response_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
Ok(response_future)
}
@ -171,7 +190,16 @@ impl BackupWriter {
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
let size = raw_data.len() as u64;
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
@ -184,7 +212,9 @@ impl BackupWriter {
let blob = match (options.encrypt, &self.crypt_config) {
(false, _) => DataBlob::encode(&data, None, options.compress)?,
(true, None) => bail!("requested encryption without a crypt config"),
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), options.compress)?,
(true, Some(crypt_config)) => {
DataBlob::encode(&data, Some(crypt_config), options.compress)?
}
};
let raw_data = blob.into_inner();
@ -192,7 +222,16 @@ impl BackupWriter {
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": size, "file-name": file_name });
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
@ -202,7 +241,6 @@ impl BackupWriter {
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let src_path = src_path.as_ref();
let mut file = tokio::fs::File::open(src_path)
@ -215,7 +253,8 @@ impl BackupWriter {
.await
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
self.upload_blob_from_data(contents, file_name, options).await
self.upload_blob_from_data(contents, file_name, options)
.await
}
pub async fn upload_stream(
@ -245,72 +284,118 @@ impl BackupWriter {
// try, but ignore errors
match archive_type(archive_name) {
Ok(ArchiveType::FixedIndex) => {
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
let _ = self
.download_previous_fixed_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
Ok(ArchiveType::DynamicIndex) => {
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
let _ = self
.download_previous_dynamic_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
_ => { /* do nothing */ }
}
}
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
let wid = self
.h2
.post(&index_path, Some(param))
.await?
.as_u64()
.unwrap();
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
Self::upload_chunk_info_stream(
let upload_stats = Self::upload_chunk_info_stream(
self.h2.clone(),
wid,
stream,
&prefix,
known_chunks.clone(),
if options.encrypt { self.crypt_config.clone() } else { None },
if options.encrypt {
self.crypt_config.clone()
} else {
None
},
options.compress,
self.verbose,
)
.await?;
let uploaded = size - size_reused;
let vsize_h: HumanByte = size.into();
let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into();
let archive = if self.verbose {
archive_name.to_string()
} else {
crate::tools::format::strip_server_file_extension(archive_name)
};
if archive_name != CATALOG_NAME {
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
let uploaded: HumanByte = uploaded.into();
println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
let speed: HumanByte =
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into();
let size_compressed: HumanByte = upload_stats.size_compressed.into();
println!(
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
archive,
size_dirty,
size,
size_compressed,
upload_stats.duration.as_secs_f64()
);
println!("{}: average backup speed: {}/s", archive, speed);
} else {
println!("Uploaded backup catalog ({})", vsize_h);
println!("Uploaded backup catalog ({})", size);
}
if size_reused > 0 && size > 1024*1024 {
let reused_percent = size_reused as f64 * 100. / size as f64;
let reused: HumanByte = size_reused.into();
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = upload_stats.size_reused.into();
println!(
"{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent
);
}
if self.verbose && chunk_count > 0 {
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
if self.verbose && upload_stats.chunk_count > 0 {
println!(
"{}: Reused {} from {} chunks.",
archive, upload_stats.chunk_reused, upload_stats.chunk_count
);
println!(
"{}: Average chunk size was {}.",
archive,
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
);
println!(
"{}: Average time per request: {} microseconds.",
archive,
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
);
}
let param = json!({
"wid": wid ,
"chunk-count": chunk_count,
"size": size,
"csum": proxmox::tools::digest_to_hex(&csum),
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
"csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats {
size: size as u64,
csum,
size: upload_stats.size as u64,
csum: upload_stats.csum,
})
}
fn response_queue(verbose: bool) -> (
fn response_queue(
verbose: bool,
) -> (
mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>
oneshot::Receiver<Result<(), Error>>,
) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
@ -336,12 +421,16 @@ impl BackupWriter {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
.map_ok(move |result| {
if verbose {
println!("RESPONSE: {:?}", result)
}
})
.map_err(|err| format_err!("pipelined request failed: {}", err))
})
.map(|result| {
let _ignore_closed_channel = verify_result_tx.send(result);
})
}),
);
(verify_queue_tx, verify_result_rx)
@ -418,9 +507,8 @@ impl BackupWriter {
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
@ -428,10 +516,13 @@ impl BackupWriter {
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2.download("previous", Some(param), &mut tmpfile).await?;
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read fixed index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
@ -443,7 +534,11 @@ impl BackupWriter {
}
if self.verbose {
println!("{}: known chunks list length is {}", archive_name, index.index_count());
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
@ -453,9 +548,8 @@ impl BackupWriter {
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
@ -463,10 +557,13 @@ impl BackupWriter {
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2.download("previous", Some(param), &mut tmpfile).await?;
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
@ -478,7 +575,11 @@ impl BackupWriter {
}
if self.verbose {
println!("{}: known chunks list length is {}", archive_name, index.index_count());
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
@ -487,23 +588,29 @@ impl BackupWriter {
/// Retrieve backup time of last backup
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
let data = self.h2.get("previous_backup_time", None).await?;
serde_json::from_value(data)
.map_err(|err| format_err!("Failed to parse backup time value returned by server - {}", err))
serde_json::from_value(data).map_err(|err| {
format_err!(
"Failed to parse backup time value returned by server - {}",
err
)
})
}
/// Download backup manifest (index.json) of last backup
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
self.h2.download("previous", Some(param), &mut raw_data).await?;
self.h2
.download("previous", Some(param), &mut raw_data)
.await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
let manifest =
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok(manifest)
}
@ -517,12 +624,11 @@ impl BackupWriter {
wid: u64,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
prefix: &str,
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_config: Option<Arc<CryptConfig>>,
compress: bool,
verbose: bool,
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
) -> impl Future<Output = Result<UploadStats, Error>> {
let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone();
let known_chunk_count = Arc::new(AtomicUsize::new(0));
@ -530,6 +636,8 @@ impl BackupWriter {
let stream_len = Arc::new(AtomicUsize::new(0));
let stream_len2 = stream_len.clone();
let compressed_stream_len = Arc::new(AtomicU64::new(0));
let compressed_stream_len2 = compressed_stream_len.clone();
let reused_len = Arc::new(AtomicUsize::new(0));
let reused_len2 = reused_len.clone();
@ -547,14 +655,12 @@ impl BackupWriter {
stream
.and_then(move |data| {
let chunk_len = data.len();
total_chunks.fetch_add(1, Ordering::SeqCst);
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
.compress(compress);
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
if let Some(ref crypt_config) = crypt_config {
chunk_builder = chunk_builder.crypt_config(crypt_config);
@ -568,7 +674,9 @@ impl BackupWriter {
let chunk_end = offset + chunk_len as u64;
if !is_fixed_chunk_size { csum.update(&chunk_end.to_le_bytes()); }
if !is_fixed_chunk_size {
csum.update(&chunk_end.to_le_bytes());
}
csum.update(digest);
let chunk_is_known = known_chunks.contains(digest);
@ -577,16 +685,17 @@ impl BackupWriter {
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
} else {
let compressed_stream_len2 = compressed_stream_len.clone();
known_chunks.insert(*digest);
future::ready(chunk_builder
.build()
.map(move |(chunk, digest)| MergedChunkInfo::New(ChunkInfo {
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
MergedChunkInfo::New(ChunkInfo {
chunk,
digest,
chunk_len: chunk_len as u64,
offset,
})
}))
)
}
})
.merge_known_chunks()
@ -614,20 +723,28 @@ impl BackupWriter {
});
let ct = "application/octet-stream";
let request = H2Client::request_builder("localhost", "POST", &upload_chunk_path, Some(param), Some(ct)).unwrap();
let request = H2Client::request_builder(
"localhost",
"POST",
&upload_chunk_path,
Some(param),
Some(ct),
)
.unwrap();
let upload_data = Some(bytes::Bytes::from(chunk_data));
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
future::Either::Left(h2
.send_request(request, upload_data)
.and_then(move |response| async move {
future::Either::Left(h2.send_request(request, upload_data).and_then(
move |response| async move {
upload_queue
.send((new_info, Some(response)))
.await
.map_err(|err| format_err!("failed to send to upload queue: {}", err))
.map_err(|err| {
format_err!("failed to send to upload queue: {}", err)
})
)
},
))
} else {
future::Either::Right(async move {
upload_queue
@ -637,31 +754,37 @@ impl BackupWriter {
})
}
})
.then(move |result| async move {
upload_result.await?.and(result)
}.boxed())
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
.and_then(move |_| {
let duration = start_time.elapsed();
let total_chunks = total_chunks2.load(Ordering::SeqCst);
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
let stream_len = stream_len2.load(Ordering::SeqCst);
let reused_len = reused_len2.load(Ordering::SeqCst);
let chunk_count = total_chunks2.load(Ordering::SeqCst);
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
let size = stream_len2.load(Ordering::SeqCst);
let size_reused = reused_len2.load(Ordering::SeqCst);
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish();
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
futures::future::ok(UploadStats {
chunk_count,
chunk_reused,
size,
size_reused,
size_compressed,
duration,
csum,
})
})
}
/// Upload speed test - prints result to stderr
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
let mut data = vec![];
// generate pseudo random byte sequence
for i in 0..1024*1024 {
for i in 0..1024 * 1024 {
for j in 0..4 {
let byte = ((i >> (j<<3))&0xff) as u8;
let byte = ((i >> (j << 3)) & 0xff) as u8;
data.push(byte);
}
}
@ -680,9 +803,15 @@ impl BackupWriter {
break;
}
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
if verbose {
eprintln!("send test data ({} bytes)", data.len());
}
let request =
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
upload_queue.send(request_future).await?;
}
@ -691,9 +820,16 @@ impl BackupWriter {
let _ = upload_result.await?;
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
eprintln!(
"Uploaded {} chunks in {} seconds.",
repeat,
start_time.elapsed().as_secs()
);
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
eprintln!(
"Time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);
Ok(speed)
}

View File

@ -13,6 +13,10 @@ use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use crate::backup::CatalogWriter;
use crate::tools::{
StdChannelWriter,
TokioWriterAdapter,
};
/// Stream implementation to encode and upload .pxar archives.
///
@ -45,10 +49,10 @@ impl PxarBackupStream {
let error = Arc::new(Mutex::new(None));
let error2 = Arc::clone(&error);
let handler = async move {
let writer = std::io::BufWriter::with_capacity(
let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
buffer_size,
crate::tools::StdChannelWriter::new(tx),
);
StdChannelWriter::new(tx),
));
let verbose = options.verbose;

Some files were not shown because too many files have changed in this diff Show More