Compare commits
170 Commits
Author | SHA1 | Date | |
---|---|---|---|
7397f4a390 | |||
8317873c06 | |||
deef63699e | |||
c6e07769e9 | |||
423df9b1f4 | |||
c879e5af11 | |||
63d9aca96f | |||
c3b1da9e41 | |||
46388e6aef | |||
484d439a7c | |||
ab6615134c | |||
b1149ebb36 | |||
1bfdae7933 | |||
4f09d31085 | |||
58d73ddb1d | |||
6b809ff59b | |||
afe08d2755 | |||
a7bc5d4eaf | |||
97cd0a2a6d | |||
49a92084a9 | |||
9bdeecaee4 | |||
843880f008 | |||
a6ed5e1273 | |||
74f94d0678 | |||
946c3e8a81 | |||
7b212c1f79 | |||
3b2046d263 | |||
1ffe030123 | |||
5255e641fa | |||
c86b6f40d7 | |||
5a718dce17 | |||
1b32750644 | |||
5aa103c3c3 | |||
fd3f690104 | |||
24b638bd9f | |||
9624c5eecb | |||
503dd339a8 | |||
36ea5df444 | |||
dce9dd6f70 | |||
88e28e15e4 | |||
399e48a1ed | |||
7ae571e7cb | |||
4264c5023b | |||
82b7adf90b | |||
71c4a3138f | |||
52991f239f | |||
3435f5491b | |||
aafe8609e5 | |||
a8d69fcf05 | |||
1e68497c03 | |||
74fc844787 | |||
4cda7603c4 | |||
11e1e27a42 | |||
4ea831bfa1 | |||
c1d7d708d4 | |||
3fa2b983c1 | |||
a1e9c05738 | |||
934deeff2d | |||
c162df60c8 | |||
98161fddb5 | |||
be614c625f | |||
87c4cb7419 | |||
93bb51fe7e | |||
713b66b6ed | |||
77bd2a469c | |||
97af919530 | |||
c91602316b | |||
a13573c24a | |||
02543a5c7f | |||
42b68f72e6 | |||
664d8a2765 | |||
e6263c2662 | |||
ae197dda23 | |||
4c116bafb8 | |||
df30017ff8 | |||
3f3ae19d63 | |||
72dc68323c | |||
593f917742 | |||
639419b049 | |||
c5ac2b9ddd | |||
81f293513e | |||
8b5f72b176 | |||
f23f75433f | |||
6d6b4e72d3 | |||
e434258592 | |||
3dc1a2d5b6 | |||
5d95558bae | |||
882c082369 | |||
9a38fa29c2 | |||
14f6c9cb8b | |||
2d55beeca0 | |||
9238cdf50d | |||
5d30f03826 | |||
14263ef989 | |||
e7cb4dc50d | |||
27d864210a | |||
f667f49dab | |||
866c556faf | |||
90d515c97d | |||
4dbe129284 | |||
747c3bc087 | |||
c23e257c5a | |||
16a18dadba | |||
5f76ac37b5 | |||
d74edc3d89 | |||
2f57a433b1 | |||
df7f04364b | |||
98c259b4c1 | |||
799b3d88bc | |||
db22e6b270 | |||
16f0afbfb5 | |||
d3d566f7bd | |||
c96b0de48f | |||
2ce159343b | |||
9e496ff6f1 | |||
8819d1f2f5 | |||
0f9218079a | |||
1cafbdc70d | |||
a3eb7b2cea | |||
d9b8e2c795 | |||
4bd2a9e42d | |||
cef03f4149 | |||
eeb19aeb2d | |||
6c96ec418d | |||
5e4b32706c | |||
30c3c5d66c | |||
e51be33807 | |||
70030b43d0 | |||
724de093dd | |||
ff86ef00a7 | |||
912b3f5bc9 | |||
a4acb6ef84 | |||
d7ee07d838 | |||
53705acece | |||
c8fff67d88 | |||
9fa55e09a7 | |||
e443902583 | |||
32dc4c4604 | |||
f39a900722 | |||
1fc82c41f2 | |||
d2b0c78e23 | |||
adfdc36936 | |||
d8594d87f1 | |||
f66f537da9 | |||
d44185c4a1 | |||
d53fbe2474 | |||
95bda2f25d | |||
c9756b40d1 | |||
8cd29fb24a | |||
505c5f0f76 | |||
2aaae9705e | |||
8aa67ee758 | |||
3865e27e96 | |||
f6c6e09a8a | |||
71282dd988 | |||
80db161e05 | |||
be10cdb122 | |||
7fde1a71ca | |||
a83674ad48 | |||
02f82148cf | |||
39f18b30b6 | |||
69d970a658 | |||
6d55603dcc | |||
3e395378bc | |||
bccdc5fa04 | |||
0bf7ba6c92 | |||
e6b599aa6c | |||
d757021f4c | |||
ee15af6bb8 | |||
3da9b7e0dd |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.9"
|
||||
version = "0.8.14"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -39,11 +39,11 @@ pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.2.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.3.4", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.1", features = [ "tokio-io", "futures-io" ] }
|
||||
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
|
2
Makefile
2
Makefile
@ -69,10 +69,12 @@ doc:
|
||||
.PHONY: build
|
||||
build:
|
||||
rm -rf build
|
||||
rm -f debian/control
|
||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
||||
rm build/debian/control.in build/debian/control.src
|
||||
cp build/debian/control debian/control
|
||||
rm build/Cargo.lock
|
||||
find build/debian -name "*.hint" -delete
|
||||
$(foreach i,$(SUBDIRS), \
|
||||
|
124
debian/changelog
vendored
124
debian/changelog
vendored
@ -1,3 +1,127 @@
|
||||
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
|
||||
|
||||
* verify speed up: use separate IO thread, use datastore-wide cache (instead
|
||||
of per group)
|
||||
|
||||
* ui: datastore content: improve encrypted column
|
||||
|
||||
* ui: datastore content: show more granular verify state, especially for
|
||||
backup group rows
|
||||
|
||||
* verify: log progress in percent
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
|
||||
|
||||
* improve and add to documentation
|
||||
|
||||
* save last verify result in snapshot manifest and show it in the GUI
|
||||
|
||||
* gc: use human readable units for summary in task log
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 27 Aug 2020 16:12:07 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.12-1) unstable; urgency=medium
|
||||
|
||||
* verify: speedup - only verify chunks once
|
||||
|
||||
* verify: sort backup groups
|
||||
|
||||
* bump pxar dep to 0.4.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 25 Aug 2020 08:55:52 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
|
||||
|
||||
* improve sync jobs, allow to stop them and better logging
|
||||
|
||||
* fix #2926: make network interfaces parser more flexible
|
||||
|
||||
* fix #2904: zpool status: parse also those vdevs without READ/ẀRITE/...
|
||||
statistics
|
||||
|
||||
* api2/node/services: turn service api calls into workers
|
||||
|
||||
* docs: add sections describing ACL related commands and describing
|
||||
benchmarking
|
||||
|
||||
* docs: general grammar, wording and typo improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Aug 2020 19:20:03 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.10-1) unstable; urgency=medium
|
||||
|
||||
* ui: acl: add improved permission selector
|
||||
|
||||
* services: make reload safer and default to it in gui
|
||||
|
||||
* ui: rework DataStore content Panel
|
||||
|
||||
* ui: add search box to DataStore content
|
||||
|
||||
* ui: DataStoreContent: keep selection and expansion on reload
|
||||
|
||||
* upload_chunk: allow upload of empty blobs
|
||||
|
||||
* fix #2856: also check whole device for device mapper
|
||||
|
||||
* ui: fix error when reloading DataStoreContent
|
||||
|
||||
* ui: fix in-progress snapshots always showing as "Encrypted"
|
||||
|
||||
* update to pxar 0.3 to support negative timestamps
|
||||
|
||||
* fix #2873: if --pattern is used, default to not extracting
|
||||
|
||||
* finish_backup: test/verify manifest at server side
|
||||
|
||||
* finish_backup: add chunk_upload_stats to manifest
|
||||
|
||||
* src/api2/admin/datastore.rs: add API to get/set Notes for backus
|
||||
|
||||
* list_snapshots: Returns new "comment" property (first line from notes)
|
||||
|
||||
* pxar: create: attempt to use O_NOATIME
|
||||
|
||||
* systemd/time: fix weekday wrapping on month
|
||||
|
||||
* pxar: better error handling on extract
|
||||
|
||||
* pxar/extract: fixup path stack for errors
|
||||
|
||||
* datastore: allow browsing signed pxar files
|
||||
|
||||
* GC: use time pre phase1 to calculate min_atime in phase2
|
||||
|
||||
* gui: user: fix #2898 add dialog to set password
|
||||
|
||||
* fix #2909: handle missing chunks gracefully in garbage collection
|
||||
|
||||
* finish_backup: mark backup as finished only after checks have passed
|
||||
|
||||
* fix: master-key: upload RSA encoded key with backup
|
||||
|
||||
* admin-guide: add section explaining master keys
|
||||
|
||||
* backup: only allow finished backups as base snapshot
|
||||
|
||||
* datastore api: only decode unencrypted indices
|
||||
|
||||
* datastore api: verify blob/index csum from manifest
|
||||
|
||||
* sync, blobs and chunk readers: add more checks and verification
|
||||
|
||||
* verify: add more checks, don't fail on first error
|
||||
|
||||
* mark signed manifests as such
|
||||
|
||||
* backup/prune/forget: improve locking
|
||||
|
||||
* backup: ensure base snapshots are still available after backup
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Aug 2020 15:37:29 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
|
||||
|
||||
* improve termprocy (console) behavior on updating proxmox-backup-server and
|
||||
|
132
debian/control
vendored
Normal file
132
debian/control
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
Source: rust-proxmox-backup
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Build-Depends: debhelper (>= 11),
|
||||
dh-cargo (>= 18),
|
||||
cargo:native,
|
||||
rustc:native,
|
||||
libstd-rust-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
|
||||
librust-base64-0.12+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-0.5+default-dev,
|
||||
librust-chrono-0.4+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.2+default-dev,
|
||||
librust-h2-0.2+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.13+default-dev,
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
librust-nix-0.16+default-dev,
|
||||
librust-nom-5+default-dev (>= 5.1-~~),
|
||||
librust-num-traits-0.2+default-dev,
|
||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||
librust-openssl-0.10+default-dev,
|
||||
librust-pam-0.7+default-dev,
|
||||
librust-pam-sys-0.5+default-dev,
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.3+api-macro-dev (>= 0.3.4-~~),
|
||||
librust-proxmox-0.3+default-dev (>= 0.3.4-~~),
|
||||
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.4-~~),
|
||||
librust-proxmox-0.3+websocket-dev (>= 0.3.4-~~),
|
||||
librust-proxmox-fuse-0.1+default-dev,
|
||||
librust-pxar-0.6+default-dev,
|
||||
librust-pxar-0.6+futures-io-dev,
|
||||
librust-pxar-0.6+tokio-io-dev,
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-6+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-4+default-dev,
|
||||
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
|
||||
librust-tokio-openssl-0.4+default-dev,
|
||||
librust-tokio-util-0.3+codec-dev,
|
||||
librust-tokio-util-0.3+default-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.4+bindgen-dev,
|
||||
librust-zstd-0.4+default-dev,
|
||||
libacl1-dev,
|
||||
libfuse3-dev,
|
||||
libsystemd-dev,
|
||||
uuid-dev,
|
||||
debhelper (>= 12~),
|
||||
bash-completion,
|
||||
python3-docutils,
|
||||
python3-pygments,
|
||||
rsync,
|
||||
fonts-dejavu-core <!nodoc>,
|
||||
fonts-lato <!nodoc>,
|
||||
fonts-open-sans <!nodoc>,
|
||||
graphviz <!nodoc>,
|
||||
latexmk <!nodoc>,
|
||||
python3-sphinx <!nodoc>,
|
||||
texlive-fonts-extra <!nodoc>,
|
||||
texlive-fonts-recommended <!nodoc>,
|
||||
texlive-xetex <!nodoc>,
|
||||
xindy <!nodoc>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.4.1
|
||||
Vcs-Git:
|
||||
Vcs-Browser:
|
||||
Homepage: https://www.proxmox.com
|
||||
|
||||
Package: proxmox-backup-server
|
||||
Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Recommends: zfsutils-linux,
|
||||
Description: Proxmox Backup Server daemon with tools and GUI
|
||||
This package contains the Proxmox Backup Server daemons and related
|
||||
tools. This includes a web-based graphical user interface.
|
||||
|
||||
Package: proxmox-backup-client
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Description: Proxmox Backup Client tools
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
||||
Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
This package contains the Proxmox Backup Documentation files.
|
6
debian/postinst
vendored
6
debian/postinst
vendored
@ -14,6 +14,12 @@ case "$1" in
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
|
||||
fi
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
|
@ -146,6 +146,74 @@ when setting up the backup server.
|
||||
filesystem configuration from being supported for a datastore. For example,
|
||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||
|
||||
Disk Management
|
||||
~~~~~~~~~~~~~~~
|
||||
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||
disks, create various filesystems, and get information about the disks.
|
||||
|
||||
To view the disks connected to the system, use the ``list`` subcommand of
|
||||
``disk``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk list
|
||||
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
|
||||
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
|
||||
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
|
||||
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
|
||||
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
|
||||
|
||||
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk initialize sdX
|
||||
|
||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk, using ``fs
|
||||
create``. The following command creates an ``ext4`` filesystem and passes the
|
||||
``--add-datastore`` parameter, in order to automatically create a datastore on
|
||||
the disk (in this case ``sdd``). This will create a datastore at the location
|
||||
``/mnt/datastore/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||
create datastore 'store1' on disk sdd
|
||||
Percentage done: 1
|
||||
...
|
||||
Percentage done: 99
|
||||
TASK OK
|
||||
|
||||
You can also create a ``zpool`` with various raid levels. The command below
|
||||
creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and mounts it
|
||||
on the root directory (default):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||
create Mirror zpool 'zpool1' on devices 'sdb,sdc'
|
||||
# "zpool" "create" "-o" "ashift=12" "zpool1" "mirror" "sdb" "sdc"
|
||||
|
||||
TASK OK
|
||||
|
||||
.. note::
|
||||
You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||
create a datastore from the disk.
|
||||
|
||||
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||
filesystems and zpools respectively.
|
||||
|
||||
If a disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||
display S.M.A.R.T. attributes using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk smart-attributes sdX
|
||||
|
||||
Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -344,10 +412,10 @@ following roles exist:
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**Admin**
|
||||
The Administrator can do anything.
|
||||
Can do anything.
|
||||
|
||||
**Audit**
|
||||
An Auditor can view things, but is not allowed to change settings.
|
||||
Can view things, but is not allowed to change settings.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
@ -356,10 +424,10 @@ following roles exist:
|
||||
Can view datastore settings and list content. But
|
||||
is not allowed to read the actual data.
|
||||
|
||||
**DataStoreReader**
|
||||
**DatastoreReader**
|
||||
Can Inspect datastore content and can do restores.
|
||||
|
||||
**DataStoreBackup**
|
||||
**DatastoreBackup**
|
||||
Can backup and restore owned backups.
|
||||
|
||||
**DatastorePowerUser**
|
||||
@ -374,6 +442,101 @@ following roles exist:
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
You can use the ``acl`` subcommand to manage and monitor user permissions. For
|
||||
example, the command below will add the user ``john@pbs`` as a
|
||||
**DatastoreAdmin** for the data store ``store1``, located at ``/backup/disk1/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
|
||||
|
||||
You can monitor the roles of each user using the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager acl list
|
||||
┌──────────┬──────────────────┬───────────┬────────────────┐
|
||||
│ ugid │ path │ propagate │ roleid │
|
||||
╞══════════╪══════════════════╪═══════════╪════════════════╡
|
||||
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
|
||||
└──────────┴──────────────────┴───────────┴────────────────┘
|
||||
|
||||
A single user can be assigned multiple permission sets for different data stores.
|
||||
|
||||
.. Note::
|
||||
Naming convention is important here. For data stores on the host,
|
||||
you must use the convention ``/datastore/{storename}``. For example, to set
|
||||
permissions for a data store mounted at ``/mnt/backup/disk4/store2``, you would use
|
||||
``/datastore/store2`` for the path. For remote stores, use the convention
|
||||
``/remote/{remote}/{storename}``, where ``{remote}`` signifies the name of the
|
||||
remote (see `Remote` below) and ``{storename}`` is the name of the data store on
|
||||
the remote.
|
||||
|
||||
Network Management
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
Proxmox Backup Server provides an interface for network configuration, through the
|
||||
``network`` subcommand. This allows you to carry out some basic network
|
||||
management tasks such as adding, configuring and removing network interfaces.
|
||||
|
||||
To get a list of available interfaces, use the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network list
|
||||
┌───────┬────────┬───────────┬────────┬─────────┬───────────────────┬──────────────┬──────────────┐
|
||||
│ name │ type │ autostart │ method │ method6 │ address │ gateway │ ports/slaves │
|
||||
╞═══════╪════════╪═══════════╪════════╪═════════╪═══════════════════╪══════════════╪══════════════╡
|
||||
│ bond0 │ bond │ 1 │ manual │ │ │ │ ens18 ens19 │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ ens18 │ eth │ 1 │ manual │ │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ ens19 │ eth │ 1 │ manual │ │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ vmbr0 │ bridge │ 1 │ static │ │ x.x.x.x/x │ x.x.x.x │ bond0 │
|
||||
└───────┴────────┴───────────┴────────┴─────────┴───────────────────┴──────────────┴──────────────┘
|
||||
|
||||
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||
parameters. The following command shows a template for creating a new bridge:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network create vmbr1 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x --bridge_ports iface_name --type bridge
|
||||
|
||||
You can make changes to the configuration of a network interface with the
|
||||
``update`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network update vmbr1 --cidr y.y.y.y/y
|
||||
|
||||
You can also remove a network interface:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network remove vmbr1
|
||||
|
||||
To view the changes made to the network configuration file, before committing
|
||||
them, use the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network changes
|
||||
|
||||
If you would like to cancel all changes at this point, you can do this using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network revert
|
||||
|
||||
If you are happy with the changes and would like to write them into the
|
||||
configuration file, the command is:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network reload
|
||||
|
||||
You can also configure DNS settings using the ``dns`` subcommand of
|
||||
``proxmox-backup-manager``.
|
||||
|
||||
:term:`Remote`
|
||||
~~~~~~~~~~~~~~
|
||||
@ -432,6 +595,14 @@ provide it with a :term:`schedule` to run regularly. The
|
||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||
# proxmox-backup-manager sync-job remove pbs2-local
|
||||
|
||||
Garbage Collection
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
||||
collection on an entire data store and the ``status`` subcommand to see
|
||||
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||
|
||||
|
||||
Backup Client usage
|
||||
-------------------
|
||||
@ -543,7 +714,9 @@ This will prompt you for a password and then uploads a file archive named
|
||||
|
||||
The ``--repository`` option can get quite long and is used by all
|
||||
commands. You can avoid having to enter this value by setting the
|
||||
environment variable ``PBS_REPOSITORY``.
|
||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
||||
over multiple sessions, you should instead add the below line to your
|
||||
``.bashrc`` file.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -578,7 +751,7 @@ Excluding files/folders from a backup
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||
To tell the Proxmox backup client when and how to ignore files and directories,
|
||||
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||
Whenever the backup client encounters such a file in a directory, it interprets
|
||||
each line as glob match patterns for files and directories that are to be excluded
|
||||
@ -660,7 +833,7 @@ Restoring this backup will result in:
|
||||
. .. file2
|
||||
|
||||
Encryption
|
||||
^^^^^^^^^^
|
||||
~~~~~~~~~~
|
||||
|
||||
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||
mode. To set this up, you first need to create an encryption key:
|
||||
@ -677,6 +850,8 @@ extra protection, you can also create it without a password:
|
||||
|
||||
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||
|
||||
Having created this key, it is now possible to create an encrypted backup, by
|
||||
passing the ``--keyfile`` parameter, with the path to the key file.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -685,12 +860,97 @@ extra protection, you can also create it without a password:
|
||||
Encryption Key Password: **************
|
||||
...
|
||||
|
||||
.. Note:: If you do not specify the name of the backup key, the key will be
|
||||
created in the default location
|
||||
``~/.config/proxmox-backup/encryption-key.json``. ``proxmox-backup-client``
|
||||
will also search this location by default, in case the ``--keyfile``
|
||||
parameter is not specified.
|
||||
|
||||
You can avoid entering the passwords by setting the environment
|
||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
|
||||
.. todo:: Explain master-key
|
||||
Using a master key to store and recover encryption keys
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||
key pair, which can be used to store an encrypted version of the symmetric
|
||||
backup encryption key alongside each backup and recover it later.
|
||||
|
||||
To set up a master key:
|
||||
|
||||
1. Create an encryption key for the backup:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create
|
||||
creating default key at: "~/.config/proxmox-backup/encryption-key.json"
|
||||
Encryption Key Password: **********
|
||||
...
|
||||
|
||||
The resulting file will be saved to ``~/.config/proxmox-backup/encryption-key.json``.
|
||||
|
||||
2. Create an RSA public/private key pair:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create-master-key
|
||||
Master Key Password: *********
|
||||
...
|
||||
|
||||
This will create two files in your current directory, ``master-public.pem``
|
||||
and ``master-private.pem``.
|
||||
|
||||
3. Import the newly created ``master-public.pem`` public certificate, so that
|
||||
``proxmox-backup-client`` can find and use it upon backup.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key import-master-pubkey /path/to/master-public.pem
|
||||
Imported public master key to "~/.config/proxmox-backup/master-public.pem"
|
||||
|
||||
4. With all these files in place, run a backup job:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup etc.pxar:/etc
|
||||
|
||||
The key will be stored in your backup, under the name ``rsa-encrypted.key``.
|
||||
|
||||
.. Note:: The ``--keyfile`` parameter can be excluded, if the encryption key
|
||||
is in the default path. If you specified another path upon creation, you
|
||||
must pass the ``--keyfile`` parameter.
|
||||
|
||||
5. To test that everything worked, you can restore the key from the backup:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client restore /path/to/backup/ rsa-encrypted.key /path/to/target
|
||||
|
||||
.. Note:: You should not need an encryption key to extract this file. However, if
|
||||
a key exists at the default location
|
||||
(``~/.config/proxmox-backup/encryption-key.json``) the program will prompt
|
||||
you for an encryption key password. Simply moving ``encryption-key.json``
|
||||
out of this directory will fix this issue.
|
||||
|
||||
6. Then, use the previously generated master key to decrypt the file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
||||
Enter pass phrase for ./master-private.pem: *********
|
||||
|
||||
7. The target file will now contain the encryption key information in plain
|
||||
text. The success of this can be confirmed by passing the resulting ``json``
|
||||
file, with the ``--keyfile`` parameter, when decrypting files from the backup.
|
||||
|
||||
.. warning:: Without their key, backed up files will be inaccessible. Thus, you should
|
||||
keep keys ordered and in a place that is separate from the contents being
|
||||
backed up. It can happen, for example, that you back up an entire system, using
|
||||
a key on that system. If the system then becomes inaccessable for any reason
|
||||
and needs to be restored, this will not be possible as the encryption key will be
|
||||
lost along with the broken system. In preparation for the worst case scenario,
|
||||
you should consider keeping a paper copy of this key locked away in
|
||||
a safe place.
|
||||
|
||||
Restoring Data
|
||||
~~~~~~~~~~~~~~
|
||||
@ -733,7 +993,7 @@ backup.
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||
|
||||
To get the contents of any archive, you can restore the ``ìndex.json`` file in the
|
||||
To get the contents of any archive, you can restore the ``index.json`` file in the
|
||||
repository to the target path '-'. This will dump the contents to the standard output.
|
||||
|
||||
.. code-block:: console
|
||||
@ -777,7 +1037,7 @@ For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pxar:/ > find etc/ **/*.txt --select
|
||||
pxar:/ > find etc/**/*.txt --select
|
||||
"/etc/X11/rgb.txt"
|
||||
pxar:/ > list-selected
|
||||
etc/**/*.txt
|
||||
@ -815,8 +1075,8 @@ file archive as a read-only filesystem to a mountpoint on your host.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt
|
||||
# ls /mnt
|
||||
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt/mountpoint
|
||||
# ls /mnt/mountpoint
|
||||
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||
|
||||
@ -831,7 +1091,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# umount /mnt
|
||||
# umount /mnt/mountpoint
|
||||
|
||||
Login and Logout
|
||||
~~~~~~~~~~~~~~~~
|
||||
@ -874,8 +1134,8 @@ command:
|
||||
snapshot. They will be inaccessible and unrecoverable.
|
||||
|
||||
|
||||
The manual removal is sometimes required, but normally the prune
|
||||
command is used to systematically delete older backups. Prune lets
|
||||
Although manual removal is sometimes required, the ``prune``
|
||||
command is normally used to systematically delete older backups. Prune lets
|
||||
you specify which backup snapshots you want to keep. The
|
||||
following retention options are available:
|
||||
|
||||
@ -995,6 +1255,42 @@ unused data blocks are removed.
|
||||
|
||||
.. todo:: howto run garbage-collection at regular intervalls (cron)
|
||||
|
||||
Benchmarking
|
||||
~~~~~~~~~~~~
|
||||
The backup client also comes with a benchmarking tool. This tool measures
|
||||
various metrics relating to compression and encryption speeds. You can run a
|
||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client benchmark
|
||||
Uploaded 656 chunks in 5 seconds.
|
||||
Time per request: 7659 microseconds.
|
||||
TLS speed: 547.60 MB/s
|
||||
SHA256 speed: 585.76 MB/s
|
||||
Compression speed: 1923.96 MB/s
|
||||
Decompress speed: 7885.24 MB/s
|
||||
AES256/GCM speed: 3974.03 MB/s
|
||||
┌───────────────────────────────────┬─────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞═══════════════════════════════════╪═════════════════════╡
|
||||
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||
└───────────────────────────────────┴─────────────────────┘
|
||||
|
||||
.. note:: The percentages given in the output table correspond to a
|
||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||
local host, so there is no network involved.
|
||||
|
||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||
rather than the default table format.
|
||||
|
||||
.. _pve-integration:
|
||||
|
||||
|
@ -13,7 +13,8 @@
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page // FIXME
|
||||
// FIXME
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
.. _Rust: https://www.rust-lang.org/
|
||||
|
@ -19,9 +19,9 @@ for various management tasks such as disk management.
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||
|
||||
The installer will guide you through the setup process and allows
|
||||
The installer will guide you through the setup process and allow
|
||||
you to partition the local disk(s), apply basic system configurations
|
||||
(e.g. timezone, language, network), and installs all required packages.
|
||||
(e.g. timezone, language, network), and install all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
|
||||
@ -36,11 +36,11 @@ It includes the following:
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
||||
system.
|
||||
system
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* Our Linux kernel with ZFS support.
|
||||
* Our Linux kernel with ZFS support
|
||||
|
||||
* Complete tool-set to administer backups and all necessary resources
|
||||
|
||||
@ -54,7 +54,7 @@ Install `Proxmox Backup`_ server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||
standard Debian installation. After configuring the
|
||||
standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
@ -76,12 +76,11 @@ does, please use the following:
|
||||
This will install all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of common and useful packages.
|
||||
|
||||
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that the base system and local storage has been set up correctly.
|
||||
|
||||
In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
||||
|
||||
The network configuration is completely up to you as well.
|
||||
.. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
|
||||
installation looks easy, but it assumes that the base system and local
|
||||
storage have been set up correctly. In general this is not trivial, especially
|
||||
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
||||
as well.
|
||||
|
||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
@ -103,9 +102,9 @@ After configuring the
|
||||
server to store backups. Should the hypervisor server fail, you can
|
||||
still access the backups.
|
||||
|
||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
.. note::
|
||||
You can access the webinterface of the Proxmox Backup Server with your web
|
||||
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
||||
|
||||
Client installation
|
||||
-------------------
|
||||
|
@ -15,15 +15,15 @@ encryption (AE_). Using :term:`Rust` as the implementation language guarantees h
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
It features strong client-side encryption. Thus, it's possible to
|
||||
backup data to not fully trusted targets.
|
||||
backup data to targets that are not fully trusted.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||
backup data and provides an API to create backups and restore data. With the
|
||||
API it's also possible to manage disks and other server side resources.
|
||||
backup data and provides an API to create and manage data stores. With the
|
||||
API, it's also possible to manage disks and other server-side resources.
|
||||
|
||||
The backup client uses this API to access the backed up data. With the command
|
||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||
@ -32,7 +32,7 @@ For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||
A single backup is allowed to contain several archives. For example, when you
|
||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||
inside that backup. The VM configuration itself is stored as an extra file.
|
||||
This way, it is easy to access and restore only important parts of the backup
|
||||
This way, it's easy to access and restore only important parts of the backup,
|
||||
without the need to scan the whole backup.
|
||||
|
||||
|
||||
@ -44,29 +44,29 @@ Main Features
|
||||
:term:`container`\ s.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
to provide high speed and memory efficiency.
|
||||
in order to provide high speed and memory efficiency.
|
||||
|
||||
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||
data. The deduplication layer avoids redundancy and minimizes the used
|
||||
storage space.
|
||||
data. The deduplication layer avoids redundancy and minimizes the storage
|
||||
space used.
|
||||
|
||||
:Incremental backups: Changes between backups are typically low. Reading and
|
||||
sending only the delta reduces storage and network impact of backups.
|
||||
sending only the delta reduces the storage and network impact of backups.
|
||||
|
||||
:Data Integrity: The built-in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
:Data Integrity: The built-in `SHA-256`_ checksum algorithm ensures accuracy and
|
||||
consistency in your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
sites. Only deltas containing new data are transferred.
|
||||
|
||||
:Compression: The ultra fast Zstandard_ compression is able to compress
|
||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
||||
provides very high performance on modern hardware.
|
||||
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||
user interface.
|
||||
|
||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||
@ -80,11 +80,11 @@ Reasons for Data Backup?
|
||||
------------------------
|
||||
|
||||
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||
caused by faulty hardware but also by human error.
|
||||
caused by both faulty hardware and human error.
|
||||
|
||||
A common mistake is to accidentally delete a file or folder which is still
|
||||
required. Virtualization can even amplify this problem; it easily happens that
|
||||
a whole virtual machine is deleted by just pressing a single button.
|
||||
required. Virtualization can even amplify this problem, as deleting a whole
|
||||
virtual machine can be as easy as pressing a single button.
|
||||
|
||||
For administrators, backups can serve as a useful toolkit for temporarily
|
||||
storing data. For example, it is common practice to perform full backups before
|
||||
@ -104,16 +104,16 @@ Software Stack
|
||||
|
||||
Proxmox Backup Server consists of multiple components:
|
||||
|
||||
* server-daemon providing, among others, a RESTfull API, super-fast
|
||||
* A server-daemon providing, among other things, a RESTfull API, super-fast
|
||||
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||
events, strict separation of privileged and unprivileged execution
|
||||
environments, ...
|
||||
* JavaScript management webinterface
|
||||
* management CLI tool for the server (`proxmox-backup-manager`)
|
||||
* client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||
any `Linux amd64` environment.
|
||||
environments
|
||||
* A JavaScript management web interface
|
||||
* A management CLI tool for the server (`proxmox-backup-manager`)
|
||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||
any `Linux amd64` environment
|
||||
|
||||
Everything outside of the web interface is written in the Rust programming
|
||||
Aside from the web interface, everything is written in the Rust programming
|
||||
language.
|
||||
|
||||
"The Rust programming language helps you write faster, more reliable software.
|
||||
@ -143,6 +143,7 @@ Mailing Lists
|
||||
|
||||
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||
is the primary communication channel for developers:
|
||||
|
||||
:Mailing list for developers: `PBS Development List`_
|
||||
|
||||
Bug Tracker
|
||||
|
@ -3,8 +3,8 @@
|
||||
Debian Package Repositories
|
||||
---------------------------
|
||||
|
||||
All Debian based systems use APT_ as package management tool. The list of
|
||||
repositories is defined in ``/etc/apt/sources.list`` and ``.list`` files found
|
||||
All Debian based systems use APT_ as a package management tool. The lists of
|
||||
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
|
||||
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||
with the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
@ -26,11 +26,10 @@ update``.
|
||||
|
||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||
|
||||
In addition, you need a package repositories from Proxmox to get the backup
|
||||
server updates.
|
||||
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
|
||||
|
||||
During the Proxmox Backup beta phase only one repository (pbstest) will be
|
||||
available. Once released, a Enterprise repository for production use and a
|
||||
During the Proxmox Backup beta phase, only one repository (pbstest) will be
|
||||
available. Once released, an Enterprise repository for production use and a
|
||||
no-subscription repository will be provided.
|
||||
|
||||
SecureApt
|
||||
@ -39,8 +38,8 @@ SecureApt
|
||||
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||
these signatures to verify that all packages are from a trusted source.
|
||||
|
||||
If you install Proxmox Backup Server from an official ISO image, the key for
|
||||
verification is already installed.
|
||||
If you install Proxmox Backup Server from an official ISO image, the
|
||||
verification key is already installed.
|
||||
|
||||
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||
key with the following commands:
|
||||
@ -136,17 +135,17 @@ During the public beta, there is a repository called ``pbstest``. This one
|
||||
contains the latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
You can configure this using ``/etc/apt/sources.list`` by adding the following
|
||||
line:
|
||||
You can access this repository by adding the following line to
|
||||
``/etc/apt/sources.list``:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: sources.list entry for ``pbstest``
|
||||
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||
|
||||
If you installed Proxmox Backup Server from the official beta ISO you should
|
||||
If you installed Proxmox Backup Server from the official beta ISO, you should
|
||||
have this repository already configured in
|
||||
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||
|
@ -9,7 +9,7 @@ which caters to a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||
Backup Server, for example, efficient storage of hardlinks.
|
||||
The format is designed to reduce storage space needed on the server by achieving
|
||||
a high level of de-duplication.
|
||||
a high level of deduplication.
|
||||
|
||||
Creating an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -18,7 +18,7 @@ Run the following command to create an archive of a folder named ``source``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source
|
||||
# pxar create archive.pxar /path/to/source
|
||||
|
||||
This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
``source`` folder.
|
||||
@ -29,45 +29,44 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
|
||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
||||
boundaries. This design decision is based on the primary use case of creating
|
||||
archives for backups. It is sensible to not back up the contents of certain
|
||||
archives for backups. It makes sense to not back up the contents of certain
|
||||
temporary or system specific files.
|
||||
To alter this behavior and follow device boundaries, use the
|
||||
``--all-file-systems`` flag.
|
||||
|
||||
It is possible to exclude certain files and/or folders from the archive by
|
||||
passing glob match patterns as additional parameters. Whenever a file is matched
|
||||
by one of the patterns, you will get a warning stating that this file is skipped
|
||||
and therefore not included in the archive.
|
||||
passing the ``--exclude`` parameter with ``gitignore``\-style match patterns.
|
||||
|
||||
For example, you can exclude all files ending in ``.txt`` from the archive
|
||||
by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source '**/*.txt'
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||
|
||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
||||
invoking ``pxar``.
|
||||
In order to avoid this, all globs have to be quoted correctly.
|
||||
|
||||
It is possible to pass a list of match patterns to fulfill more complex
|
||||
file exclusion/inclusion behavior, although it is recommended to use the
|
||||
|
||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||
match more than one pattern. This allows you to use more complex
|
||||
file exclusion/inclusion behavior. However, it is recommended to use
|
||||
``.pxarexclude`` files instead for such cases.
|
||||
|
||||
For example you might want to exclude all ``.txt`` files except for a specific
|
||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
||||
by ``!``.
|
||||
All the glob pattern are relative to the ``source`` directory.
|
||||
All the glob patterns are relative to the ``source`` directory.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source '**/*.txt' '!/folder/file.txt'
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||
|
||||
.. NOTE:: The order of the glob match patterns matters as later ones win over
|
||||
.. NOTE:: The order of the glob match patterns matters as later ones override
|
||||
previous ones. Permutations of the same patterns lead to different results.
|
||||
|
||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
|
||||
command line, in a file called ``.pxarexclude-cli`` at the root of
|
||||
the archive.
|
||||
If a file with this name is already present in the source folder during archive
|
||||
creation, this file is not included in the archive and the file containing the
|
||||
@ -86,23 +85,23 @@ The behavior is the same as described in :ref:`creating-backups`.
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An existing archive ``archive.pxar`` is extracted to a ``target`` directory
|
||||
An existing archive, ``archive.pxar``, is extracted to a ``target`` directory
|
||||
with the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar extract archive.pxar --target target
|
||||
# pxar extract archive.pxar /path/to/target
|
||||
|
||||
If no target is provided, the content of the archive is extracted to the current
|
||||
working directory.
|
||||
|
||||
In order to restore only parts of an archive, single files and/or folders,
|
||||
In order to restore only parts of an archive, single files, and/or folders,
|
||||
it is possible to pass the corresponding glob match patterns as additional
|
||||
parameters or use the patterns stored in a file:
|
||||
parameters or to use the patterns stored in a file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar extract etc.pxar '**/*.conf' --target /restore/target/etc
|
||||
# pxar extract etc.pxar /restore/target/etc --pattern '**/*.conf'
|
||||
|
||||
The above example restores all ``.conf`` files encountered in any of the
|
||||
sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
||||
|
@ -4,6 +4,7 @@ use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
pub struct DummyWriter {
|
||||
@ -27,7 +28,7 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let host = "localhost";
|
||||
|
||||
let username = "root@pam";
|
||||
let username = Userid::root_userid();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
|
@ -1,5 +1,6 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
async fn upload_speed() -> Result<f64, Error> {
|
||||
@ -7,7 +8,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
let host = "localhost";
|
||||
let datastore = "store2";
|
||||
|
||||
let username = "root@pam";
|
||||
let username = Userid::root_userid();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
|
@ -2,13 +2,12 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::{api, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{http_err, list_subdirs_api_method};
|
||||
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::*;
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
@ -23,7 +22,7 @@ pub mod role;
|
||||
/// returns Ok(true) if a ticket has to be created
|
||||
/// and Ok(false) if not
|
||||
fn authenticate_user(
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
password: &str,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
@ -31,31 +30,35 @@ fn authenticate_user(
|
||||
) -> Result<bool, Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
if !user_info.is_active_user(&userid) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
if password.starts_with("PBS:") {
|
||||
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
||||
if ticket_username == username {
|
||||
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
||||
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
||||
{
|
||||
if *userid == ticket_userid {
|
||||
return Ok(true);
|
||||
} else {
|
||||
bail!("ticket login failed - wrong username");
|
||||
}
|
||||
bail!("ticket login failed - wrong userid");
|
||||
}
|
||||
} else if password.starts_with("PBSTERM:") {
|
||||
if path.is_none() || privs.is_none() || port.is_none() {
|
||||
bail!("cannot check termnal ticket without path, priv and port");
|
||||
}
|
||||
|
||||
let path = path.unwrap();
|
||||
let privilege_name = privs.unwrap();
|
||||
let port = port.unwrap();
|
||||
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
||||
let privilege_name = privs
|
||||
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
||||
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
||||
|
||||
if let Ok((_age, _data)) =
|
||||
tools::ticket::verify_term_ticket(public_auth_key(), &username, &path, port, password)
|
||||
if let Ok(Empty) = Ticket::parse(password)
|
||||
.and_then(|ticket| ticket.verify(
|
||||
public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&ticket::term_aad(userid, &path, port)),
|
||||
))
|
||||
{
|
||||
for (name, privilege) in PRIVILEGES {
|
||||
if *name == privilege_name {
|
||||
@ -66,7 +69,7 @@ fn authenticate_user(
|
||||
}
|
||||
}
|
||||
|
||||
user_info.check_privs(username, &path_vec, *privilege, false)?;
|
||||
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -75,7 +78,7 @@ fn authenticate_user(
|
||||
}
|
||||
}
|
||||
|
||||
let _ = crate::auth::authenticate_user(username, password)?;
|
||||
let _ = crate::auth::authenticate_user(userid, password)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
@ -83,7 +86,7 @@ fn authenticate_user(
|
||||
input: {
|
||||
properties: {
|
||||
username: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
@ -130,7 +133,7 @@ fn authenticate_user(
|
||||
///
|
||||
/// Returns: An authentication ticket with additional infos.
|
||||
fn create_ticket(
|
||||
username: String,
|
||||
username: Userid,
|
||||
password: String,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
@ -138,7 +141,7 @@ fn create_ticket(
|
||||
) -> Result<Value, Error> {
|
||||
match authenticate_user(&username, &password, path, privs, port) {
|
||||
Ok(true) => {
|
||||
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some(&username), None)?;
|
||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
||||
|
||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
|
||||
@ -156,7 +159,7 @@ fn create_ticket(
|
||||
Err(err) => {
|
||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||
Err(http_err!(UNAUTHORIZED, "permission check failed.".into()))
|
||||
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,7 +168,7 @@ fn create_ticket(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
@ -183,13 +186,15 @@ fn create_ticket(
|
||||
/// Each user is allowed to change his own password. Superuser
|
||||
/// can change all passwords.
|
||||
fn change_password(
|
||||
userid: String,
|
||||
userid: Userid,
|
||||
password: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let current_user = rpcenv.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
let current_user: Userid = rpcenv
|
||||
.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
|
||||
let mut allowed = userid == current_user;
|
||||
|
||||
@ -205,9 +210,8 @@ fn change_password(
|
||||
bail!("you are not authorized to change the password.");
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||
authenticator.store_password(userid.name(), &password)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl;
|
||||
@ -141,7 +142,7 @@ pub fn read_acl(
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
group: {
|
||||
optional: true,
|
||||
@ -167,14 +168,14 @@ pub fn update_acl(
|
||||
path: String,
|
||||
role: String,
|
||||
propagate: Option<bool>,
|
||||
userid: Option<String>,
|
||||
userid: Option<Userid>,
|
||||
group: Option<String>,
|
||||
delete: Option<bool>,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut tree, expected_digest) = acl::config()?;
|
||||
|
||||
@ -192,7 +193,7 @@ pub fn update_acl(
|
||||
} else if let Some(ref userid) = userid {
|
||||
if !delete { // Note: we allow to delete non-existent users
|
||||
let user_cfg = crate::config::user::cached_config()?;
|
||||
if user_cfg.sections.get(userid).is_none() {
|
||||
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
||||
bail!("no such user.");
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::{Schema, StringSchema};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
@ -48,7 +49,7 @@ pub fn list_users(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
@ -87,25 +88,24 @@ pub fn list_users(
|
||||
/// Create new user.
|
||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let user: user::User = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = user::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&user.userid) {
|
||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
||||
bail!("user '{}' already exists.", user.userid);
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
|
||||
|
||||
config.set_data(&user.userid, "user", &user)?;
|
||||
config.set_data(user.userid.as_str(), "user", &user)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
if let Some(password) = password {
|
||||
authenticator.store_password(&username, &password)?;
|
||||
authenticator.store_password(user.userid.name(), &password)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -115,7 +115,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -128,9 +128,9 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
let (config, digest) = user::config()?;
|
||||
let user = config.lookup("user", &userid)?;
|
||||
let user = config.lookup("user", userid.as_str())?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(user)
|
||||
}
|
||||
@ -140,7 +140,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
@ -182,7 +182,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
)]
|
||||
/// Update user configuration.
|
||||
pub fn update_user(
|
||||
userid: String,
|
||||
userid: Userid,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
@ -193,7 +193,7 @@ pub fn update_user(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
@ -202,7 +202,7 @@ pub fn update_user(
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: user::User = config.lookup("user", &userid)?;
|
||||
let mut data: user::User = config.lookup("user", userid.as_str())?;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
@ -222,9 +222,8 @@ pub fn update_user(
|
||||
}
|
||||
|
||||
if let Some(password) = password {
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||
authenticator.store_password(userid.name(), &password)?;
|
||||
}
|
||||
|
||||
if let Some(firstname) = firstname {
|
||||
@ -238,7 +237,7 @@ pub fn update_user(
|
||||
data.email = if email.is_empty() { None } else { Some(email) };
|
||||
}
|
||||
|
||||
config.set_data(&userid, "user", &data)?;
|
||||
config.set_data(userid.as_str(), "user", &data)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
@ -250,7 +249,7 @@ pub fn update_user(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
@ -263,9 +262,9 @@ pub fn update_user(
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
|
||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
@ -274,8 +273,8 @@ pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error>
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&userid) {
|
||||
Some(_) => { config.sections.remove(&userid); },
|
||||
match config.sections.get(userid.as_str()) {
|
||||
Some(_) => { config.sections.remove(userid.as_str()); },
|
||||
None => bail!("user '{}' does not exist.", userid),
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -10,7 +11,8 @@ use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
RpcEnvironment, RpcEnvironmentType, Permission
|
||||
};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -36,7 +38,11 @@ use crate::config::acl::{
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
|
||||
fn check_backup_owner(
|
||||
store: &DataStore,
|
||||
group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
) -> Result<(), Error> {
|
||||
let owner = store.get_owner(group)?;
|
||||
if &owner != userid {
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
@ -44,9 +50,12 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
fn read_backup_index(
|
||||
store: &DataStore,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
|
||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for item in manifest.files() {
|
||||
@ -59,18 +68,22 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
crypt_mode: Some(manifest_crypt_mode),
|
||||
crypt_mode: match manifest.signature {
|
||||
Some(_) => Some(CryptMode::SignOnly),
|
||||
None => Some(CryptMode::None),
|
||||
},
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
Ok((manifest, result))
|
||||
}
|
||||
|
||||
fn get_all_snapshot_files(
|
||||
store: &DataStore,
|
||||
info: &BackupInfo,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
@ -86,7 +99,7 @@ fn get_all_snapshot_files(
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
Ok((manifest, files))
|
||||
}
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
@ -130,9 +143,9 @@ fn list_groups(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<GroupListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -153,7 +166,7 @@ fn list_groups(
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if !list_all {
|
||||
if owner != username { continue; }
|
||||
if owner != userid { continue; }
|
||||
}
|
||||
|
||||
let result_item = GroupListItem {
|
||||
@ -211,20 +224,22 @@ pub fn list_snapshot_files(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
get_all_snapshot_files(&datastore, &info)
|
||||
let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -261,18 +276,18 @@ fn delete_snapshot(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
|
||||
datastore.remove_backup_dir(&snapshot)?;
|
||||
datastore.remove_backup_dir(&snapshot, false)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -317,9 +332,9 @@ pub fn list_snapshots (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -342,27 +357,46 @@ pub fn list_snapshots (
|
||||
let owner = datastore.get_owner(group)?;
|
||||
|
||||
if !list_all {
|
||||
if owner != username { continue; }
|
||||
if owner != userid { continue; }
|
||||
}
|
||||
|
||||
let mut size = None;
|
||||
|
||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok(files) => {
|
||||
let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok((manifest, files)) => {
|
||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
files
|
||||
// extract the first line from notes
|
||||
let comment: Option<String> = manifest.unprotected["notes"]
|
||||
.as_str()
|
||||
.and_then(|notes| notes.lines().next())
|
||||
.map(String::from);
|
||||
|
||||
let verify = manifest.unprotected["verify_state"].clone();
|
||||
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
|
||||
Ok(verify) => verify,
|
||||
Err(err) => {
|
||||
eprintln!("error parsing verification state : '{}'", err);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
(comment, verify, files)
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info
|
||||
.files
|
||||
.iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
.collect()
|
||||
(
|
||||
None,
|
||||
None,
|
||||
info
|
||||
.files
|
||||
.iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
.collect()
|
||||
)
|
||||
},
|
||||
};
|
||||
|
||||
@ -370,6 +404,8 @@ pub fn list_snapshots (
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
comment,
|
||||
verification,
|
||||
files,
|
||||
size,
|
||||
owner: Some(owner),
|
||||
@ -465,27 +501,50 @@ pub fn verify(
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
}
|
||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||
{
|
||||
let success = if let Some(backup_dir) = backup_dir {
|
||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||
"verify",
|
||||
Some(worker_id.clone()),
|
||||
userid,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||
let mut res = Vec::new();
|
||||
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
res
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
let (_count, failed_dirs) = verify_backup_group(
|
||||
datastore,
|
||||
&backup_group,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
None,
|
||||
worker.clone(),
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
verify_all_backups(datastore, worker.clone())?
|
||||
};
|
||||
if !success {
|
||||
bail!("verfication failed - please check the log for details");
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
bail!("verification failed - please check the log for details");
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
@ -570,9 +629,9 @@ fn prune(
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||
|
||||
@ -581,7 +640,7 @@ fn prune(
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, &group, &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
|
||||
|
||||
let prune_options = PruneOptions {
|
||||
keep_last: param["keep-last"].as_u64(),
|
||||
@ -623,7 +682,7 @@ fn prune(
|
||||
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
||||
|
||||
let result = try_block! {
|
||||
if keep_all {
|
||||
@ -660,7 +719,7 @@ fn prune(
|
||||
}));
|
||||
|
||||
if !(dry_run || keep) {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -705,11 +764,15 @@ fn start_garbage_collection(
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
|
||||
{
|
||||
"garbage_collection",
|
||||
Some(store.clone()),
|
||||
Userid::root_userid().clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
datastore.garbage_collection(&worker)
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
@ -773,13 +836,13 @@ fn get_datastore_list(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
let mut entry = json!({ "store": store });
|
||||
@ -824,9 +887,9 @@ fn download_file(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -837,7 +900,7 @@ fn download_file(
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
@ -846,8 +909,8 @@ fn download_file(
|
||||
path.push(&file_name);
|
||||
|
||||
let file = tokio::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
@ -897,9 +960,9 @@ fn download_file_decoded(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -910,9 +973,9 @@ fn download_file_decoded(
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
@ -931,8 +994,10 @@ fn download_file_decoded(
|
||||
"didx" => {
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
@ -944,7 +1009,10 @@ fn download_file_decoded(
|
||||
let index = FixedIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
@ -954,7 +1022,9 @@ fn download_file_decoded(
|
||||
},
|
||||
"blob" => {
|
||||
let file = std::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
// FIXME: load full blob to verify index checksum?
|
||||
|
||||
Body::wrap_stream(
|
||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||
@ -1015,8 +1085,8 @@ fn upload_backup_log(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
check_backup_owner(&datastore, backup_dir.group(), &username)?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
@ -1037,11 +1107,10 @@ fn upload_backup_log(
|
||||
})
|
||||
.await?;
|
||||
|
||||
let blob = DataBlob::from_raw(data)?;
|
||||
// always verify CRC at server side
|
||||
blob.verify_crc()?;
|
||||
let raw_data = blob.raw_data();
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
// always verify blob/CRC at server side
|
||||
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||
|
||||
replace_file(&path, blob.raw_data(), CreateOptions::new())?;
|
||||
|
||||
// fixme: use correct formatter
|
||||
Ok(crate::server::formatter::json_response(Ok(Value::Null)))
|
||||
@ -1086,23 +1155,35 @@ fn catalog(
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let file_name = CATALOG_NAME;
|
||||
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(CATALOG_NAME);
|
||||
path.push(file_name);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
@ -1158,7 +1239,7 @@ fn catalog(
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
&ObjectSchema::new(
|
||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||
"Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
@ -1185,9 +1266,9 @@ fn pxar_file_download(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
@ -1198,10 +1279,7 @@ fn pxar_file_download(
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
@ -1209,15 +1287,26 @@ fn pxar_file_download(
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let pxar_name = split.next().unwrap();
|
||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", pxar_name);
|
||||
}
|
||||
}
|
||||
|
||||
path.push(OsStr::from_bytes(&pxar_name));
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(pxar_name);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&pxar_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader = LocalDynamicReadAt::new(reader);
|
||||
@ -1293,6 +1382,108 @@ fn get_rrd_stats(
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get "notes" for a specific backup
|
||||
fn get_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
|
||||
let notes = manifest["unprotected"]["notes"]
|
||||
.as_str()
|
||||
.unwrap_or("");
|
||||
|
||||
Ok(String::from(notes))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
notes: {
|
||||
description: "A multiline text.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||
},
|
||||
)]
|
||||
/// Set "notes" for a specific backup
|
||||
fn set_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
notes: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
|
||||
manifest["unprotected"]["notes"] = notes.into();
|
||||
|
||||
datastore.store_manifest(&backup_dir, manifest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
@ -1326,6 +1517,12 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_GROUPS)
|
||||
),
|
||||
(
|
||||
"notes",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_GET_NOTES)
|
||||
.put(&API_METHOD_SET_NOTES)
|
||||
),
|
||||
(
|
||||
"prune",
|
||||
&Router::new()
|
||||
|
@ -1,15 +1,15 @@
|
||||
use anyhow::{Error};
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::{get_pull_parameters};
|
||||
use crate::api2::pull::do_sync_job;
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||
use crate::server::UPID;
|
||||
use crate::config::jobstate::{Job, JobState};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -33,33 +33,26 @@ pub fn list_sync_jobs(
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
|
||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||
let tasks = server::read_task_list()?;
|
||||
|
||||
for info in tasks.iter() {
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(id) => id,
|
||||
_ => { continue; },
|
||||
};
|
||||
if let Some(last) = last_tasks.get(worker_id) {
|
||||
if last.upid.starttime < info.upid.starttime {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
} else {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
}
|
||||
|
||||
for job in &mut list {
|
||||
let mut last = 0;
|
||||
if let Some(task) = last_tasks.get(&job.id) {
|
||||
job.last_run_upid = Some(task.upid_str.clone());
|
||||
if let Some((endtime, status)) = &task.state {
|
||||
job.last_run_state = Some(String::from(status));
|
||||
job.last_run_endtime = Some(*endtime);
|
||||
last = *endtime;
|
||||
}
|
||||
}
|
||||
let last_state = JobState::load("syncjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
@ -83,7 +76,7 @@ pub fn list_sync_jobs(
|
||||
}
|
||||
)]
|
||||
/// Runs the sync jobs manually.
|
||||
async fn run_sync_job(
|
||||
fn run_sync_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -92,21 +85,11 @@ async fn run_sync_job(
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
let job = Job::new("syncjob", &id)?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync job '{}' start", &id));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &id));
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::tools::fs::lock_dir_noblock;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -56,12 +57,12 @@ fn upgrade_to_backup_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -88,30 +89,36 @@ async move {
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
let owner = datastore.create_backup_group(&backup_group, &username)?;
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||
|
||||
// permission check
|
||||
if owner != username { // only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", username, owner);
|
||||
if owner != userid { // only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
|
||||
|
||||
if let Some(last) = &last_backup {
|
||||
let _last_guard = if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
bail!("backup timestamp is older than last backup.");
|
||||
}
|
||||
// fixme: abort if last backup is still running - howto test?
|
||||
// Idea: write upid into a file inside snapshot dir. then test if
|
||||
// it is still running here.
|
||||
}
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
WorkerTask::spawn("backup", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||
|
||||
env.debug = debug;
|
||||
env.last_backup = last_backup;
|
||||
@ -144,6 +151,11 @@ async move {
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
|
||||
async move {
|
||||
// keep flock until task ends
|
||||
let _group_guard = _group_guard;
|
||||
let _snap_guard = _snap_guard;
|
||||
let _last_guard = _last_guard;
|
||||
|
||||
let res = select!{
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
|
@ -1,18 +1,21 @@
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ::serde::{Serialize};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::backup::*;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::server::formatter::*;
|
||||
use hyper::{Body, Response};
|
||||
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
struct UploadStatistic {
|
||||
count: u64,
|
||||
size: u64,
|
||||
@ -31,6 +34,19 @@ impl UploadStatistic {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add for UploadStatistic {
|
||||
type Output = Self;
|
||||
|
||||
fn add(self, other: Self) -> Self {
|
||||
Self {
|
||||
count: self.count + other.count,
|
||||
size: self.size + other.size,
|
||||
compressed_size: self.compressed_size + other.compressed_size,
|
||||
duplicates: self.duplicates + other.duplicates,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct DynamicWriterState {
|
||||
name: String,
|
||||
index: DynamicIndexWriter,
|
||||
@ -57,6 +73,8 @@ struct SharedBackupState {
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
backup_size: u64, // sums up size of all files
|
||||
backup_stat: UploadStatistic,
|
||||
}
|
||||
|
||||
impl SharedBackupState {
|
||||
@ -82,7 +100,7 @@ impl SharedBackupState {
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
user: Userid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -95,7 +113,7 @@ pub struct BackupEnvironment {
|
||||
impl BackupEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: String,
|
||||
user: Userid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -108,6 +126,8 @@ impl BackupEnvironment {
|
||||
dynamic_writers: HashMap::new(),
|
||||
fixed_writers: HashMap::new(),
|
||||
known_chunks: HashMap::new(),
|
||||
backup_size: 0,
|
||||
backup_stat: UploadStatistic::new(),
|
||||
};
|
||||
|
||||
Self {
|
||||
@ -353,7 +373,6 @@ impl BackupEnvironment {
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
if csum != expected_csum {
|
||||
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -361,6 +380,8 @@ impl BackupEnvironment {
|
||||
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
state.backup_size += size;
|
||||
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -395,7 +416,6 @@ impl BackupEnvironment {
|
||||
let uuid = data.index.uuid;
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -403,6 +423,8 @@ impl BackupEnvironment {
|
||||
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
state.backup_size += size;
|
||||
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -416,9 +438,8 @@ impl BackupEnvironment {
|
||||
let blob_len = data.len();
|
||||
let orig_len = data.len(); // fixme:
|
||||
|
||||
let blob = DataBlob::from_raw(data)?;
|
||||
// always verify CRC at server side
|
||||
blob.verify_crc()?;
|
||||
// always verify blob/CRC at server side
|
||||
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||
|
||||
let raw_data = blob.raw_data();
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
@ -427,6 +448,8 @@ impl BackupEnvironment {
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.file_counter += 1;
|
||||
state.backup_size += orig_len as u64;
|
||||
state.backup_stat.size += blob_len as u64;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -446,6 +469,28 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
// check manifest
|
||||
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
||||
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
||||
|
||||
let stats = serde_json::to_value(state.backup_stat)?;
|
||||
|
||||
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
||||
|
||||
self.datastore.store_manifest(&self.backup_dir, manifest)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
if let Some(base) = &self.last_backup {
|
||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||
if !path.exists() {
|
||||
bail!(
|
||||
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||
base.backup_dir
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// marks the backup as successful
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
@ -480,7 +525,7 @@ impl BackupEnvironment {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.finished = true;
|
||||
|
||||
self.datastore.remove_backup_dir(&self.backup_dir)?;
|
||||
self.datastore.remove_backup_dir(&self.backup_dir, true)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -505,7 +550,7 @@ impl RpcEnvironment for BackupEnvironment {
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.clone())
|
||||
Some(self.user.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
|
||||
&sorted!([
|
||||
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.minimum(std::mem::size_of::<DataBlobHeader>() as isize)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
.schema()
|
||||
)
|
||||
|
@ -5,6 +5,7 @@ use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
@ -99,7 +100,7 @@ pub fn list_datastores(
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -253,7 +254,7 @@ pub fn update_datastore(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
@ -327,7 +328,7 @@ pub fn update_datastore(
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
use base64;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::remote;
|
||||
@ -60,7 +61,7 @@ pub fn list_remotes(
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||
@ -78,7 +79,7 @@ pub fn list_remotes(
|
||||
/// Create new remote.
|
||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let mut data = param.clone();
|
||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||
@ -154,7 +155,7 @@ pub enum DeletableProperty {
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
optional: true,
|
||||
@ -187,14 +188,14 @@ pub fn update_remote(
|
||||
name: String,
|
||||
comment: Option<String>,
|
||||
host: Option<String>,
|
||||
userid: Option<String>,
|
||||
userid: Option<Userid>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
@ -255,7 +256,7 @@ pub fn update_remote(
|
||||
/// Remove a remote from the configuration file.
|
||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::sync::{self, SyncJobConfig};
|
||||
@ -68,7 +69,7 @@ pub fn list_sync_jobs(
|
||||
/// Create a new sync job.
|
||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -82,6 +83,8 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -184,7 +187,7 @@ pub fn update_sync_job(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
@ -247,7 +250,7 @@ pub fn update_sync_job(
|
||||
/// Remove a sync job configuration
|
||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
@ -263,6 +266,8 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::remove_state_file("syncjob", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1,18 +1,19 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use futures::stream::TryStreamExt;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
use proxmox::http_bail;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => http_err!(NOT_FOUND, format!("open file {:?} failed - not found", path.clone())),
|
||||
_ => http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)),
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
let file = match tokio::fs::File::open(path.clone()).await {
|
||||
Ok(file) => file,
|
||||
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
http_bail!(NOT_FOUND, "open file {:?} failed - not found", path);
|
||||
}
|
||||
Err(err) => http_bail!(BAD_REQUEST, "open file {:?} failed: {}", path, err),
|
||||
};
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
@ -2,10 +2,7 @@ use std::net::TcpListener;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::{
|
||||
future::{FutureExt, TryFutureExt},
|
||||
select,
|
||||
};
|
||||
use futures::future::{FutureExt, TryFutureExt};
|
||||
use hyper::body::Body;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::upgrade::Upgraded;
|
||||
@ -25,18 +22,21 @@ use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_CONSOLE;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
|
||||
pub mod disks;
|
||||
pub mod dns;
|
||||
mod journal;
|
||||
pub mod network;
|
||||
pub mod tasks;
|
||||
|
||||
pub(crate) mod rrd;
|
||||
|
||||
mod apt;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
mod subscription;
|
||||
mod apt;
|
||||
mod syslog;
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
|
||||
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
|
||||
@ -91,12 +91,12 @@ async fn termproxy(
|
||||
cmd: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid = rpcenv
|
||||
let userid: Userid = rpcenv
|
||||
.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
|
||||
if realm != "pam" {
|
||||
if userid.realm() != "pam" {
|
||||
bail!("only pam users can use the console");
|
||||
}
|
||||
|
||||
@ -106,12 +106,11 @@ async fn termproxy(
|
||||
let listener = TcpListener::bind("localhost:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
|
||||
let ticket = tools::ticket::assemble_term_ticket(
|
||||
crate::auth_helpers::private_auth_key(),
|
||||
&userid,
|
||||
&path,
|
||||
port,
|
||||
)?;
|
||||
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?
|
||||
.sign(
|
||||
crate::auth_helpers::private_auth_key(),
|
||||
Some(&ticket::term_aad(&userid, &path, port)),
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
match cmd.as_ref().map(|x| x.as_str()) {
|
||||
@ -134,10 +133,11 @@ async fn termproxy(
|
||||
_ => bail!("invalid command"),
|
||||
};
|
||||
|
||||
let username = userid.name().to_owned();
|
||||
let upid = WorkerTask::spawn(
|
||||
"termproxy",
|
||||
None,
|
||||
&username,
|
||||
userid,
|
||||
false,
|
||||
move |worker| async move {
|
||||
// move inside the worker so that it survives and does not close the port
|
||||
@ -170,7 +170,6 @@ async fn termproxy(
|
||||
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
|
||||
|
||||
cmd.args(&arguments)
|
||||
.kill_on_drop(true)
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped());
|
||||
|
||||
@ -197,8 +196,9 @@ async fn termproxy(
|
||||
Ok::<(), Error>(())
|
||||
};
|
||||
|
||||
select!{
|
||||
res = child.fuse() => {
|
||||
let mut needs_kill = false;
|
||||
let res = tokio::select!{
|
||||
res = &mut child => {
|
||||
let exit_code = res?;
|
||||
if !exit_code.success() {
|
||||
match exit_code.code() {
|
||||
@ -208,13 +208,33 @@ async fn termproxy(
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
res = stdout_fut.fuse() => res,
|
||||
res = stderr_fut.fuse() => res,
|
||||
res = worker.abort_future().fuse() => res.map_err(Error::from),
|
||||
res = stdout_fut => res,
|
||||
res = stderr_fut => res,
|
||||
res = worker.abort_future() => {
|
||||
needs_kill = true;
|
||||
res.map_err(Error::from)
|
||||
}
|
||||
};
|
||||
|
||||
if needs_kill {
|
||||
if res.is_ok() {
|
||||
child.kill()?;
|
||||
child.await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = child.kill() {
|
||||
worker.warn(format!("error killing termproxy: {}", err));
|
||||
} else if let Err(err) = child.await {
|
||||
worker.warn(format!("error awaiting termproxy: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
},
|
||||
)?;
|
||||
|
||||
// FIXME: We're returning the user NAME only?
|
||||
Ok(json!({
|
||||
"user": username,
|
||||
"ticket": ticket,
|
||||
@ -252,18 +272,17 @@ fn upgrade_to_websocket(
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
async move {
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let ticket = tools::required_string_param(¶m, "vncticket")?.to_owned();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||
|
||||
// will be checked again by termproxy
|
||||
tools::ticket::verify_term_ticket(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
&username,
|
||||
&"/system",
|
||||
port,
|
||||
&ticket,
|
||||
)?;
|
||||
Ticket::<Empty>::parse(ticket)?
|
||||
.verify(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&ticket::term_aad(&userid, "/system", port)),
|
||||
)?;
|
||||
|
||||
let (ws, response) = WebSocket::new(parts.headers)?;
|
||||
|
||||
|
@ -9,7 +9,7 @@ use proxmox::api::router::{Router, SubdirMap};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
||||
|
||||
const_regex! {
|
||||
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||
@ -233,11 +233,11 @@ pub fn apt_update_database(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, &username.clone(), to_stdout, move |worker| {
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
|
||||
if !quiet { worker.log("starting apt-get update") }
|
||||
|
||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||
|
@ -13,7 +13,7 @@ use crate::tools::disks::{
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
@ -140,7 +140,7 @@ pub fn initialize_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -149,7 +149,7 @@ pub fn initialize_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
|
@ -16,6 +16,7 @@ use crate::tools::systemd::{self, types::*};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::datastore::DataStoreConfig;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
@ -133,7 +134,7 @@ pub fn create_datastore_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -142,7 +143,7 @@ pub fn create_datastore_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
@ -175,9 +176,69 @@ pub fn create_datastore_disk(
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
||||
|
||||
let path = format!("/mnt/datastore/{}", name);
|
||||
// path of datastore cannot be changed
|
||||
let (config, _) = crate::config::datastore::config()?;
|
||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||
.filter(|ds| ds.path == path)
|
||||
.next();
|
||||
|
||||
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||
conflicting_datastore.path, conflicting_datastore.name);
|
||||
}
|
||||
|
||||
// disable systemd mount-unit
|
||||
let mut mount_unit_name = systemd::escape_unit(&path, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
systemd::disable_unit(&mount_unit_name)?;
|
||||
|
||||
// delete .mount-file
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
let full_path = std::path::Path::new(&mount_unit_path);
|
||||
log::info!("removing systemd mount unit {:?}", full_path);
|
||||
std::fs::remove_file(&full_path)?;
|
||||
|
||||
// try to unmount, if that fails tell the user to reboot or unmount manually
|
||||
let mut command = std::process::Command::new("umount");
|
||||
command.arg(&path);
|
||||
match crate::tools::run_command(command, None) {
|
||||
Err(_) => bail!(
|
||||
"Could not umount '{}' since it is busy. It will stay mounted \
|
||||
until the next reboot or until unmounted manually!",
|
||||
path
|
||||
),
|
||||
Ok(_) => Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.delete(&API_METHOD_DELETE_DATASTORE_DISK);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||
.match_all("name", &ITEM_ROUTER);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
|
@ -254,7 +254,7 @@ pub fn create_zpool(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
@ -314,7 +314,7 @@ pub fn create_zpool(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::parse_property_string;
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::config::network::{self, NetworkConfig};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
@ -230,7 +231,7 @@ pub fn create_interface(
|
||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = network::config()?;
|
||||
|
||||
@ -463,7 +464,7 @@ pub fn update_interface(
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
@ -586,7 +587,7 @@ pub fn update_interface(
|
||||
/// Remove network interface configuration.
|
||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
@ -624,9 +625,9 @@ pub async fn reload_network_config(
|
||||
|
||||
network::assert_ifupdown2_installed()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
|
||||
|
||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||
|
||||
|
@ -4,12 +4,13 @@ use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::{api, Router, Permission, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
static SERVICE_NAME_LIST: [&str; 7] = [
|
||||
"proxmox-backup",
|
||||
@ -181,30 +182,43 @@ fn get_service_state(
|
||||
Ok(json_service_state(&service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
|
||||
|
||||
// fixme: run background worker (fork_worker) ???
|
||||
let workerid = format!("srv{}", &cmd);
|
||||
|
||||
match cmd {
|
||||
"start"|"stop"|"restart"|"reload" => {},
|
||||
let cmd = match cmd {
|
||||
"start"|"stop"|"restart"=> cmd.to_string(),
|
||||
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
|
||||
_ => bail!("unknown service command '{}'", cmd),
|
||||
}
|
||||
};
|
||||
let service = service.to_string();
|
||||
|
||||
if service == "proxmox-backup" && cmd != "restart" {
|
||||
bail!("invalid service cmd '{} {}'", service, cmd);
|
||||
}
|
||||
let upid = WorkerTask::new_thread(
|
||||
&workerid,
|
||||
Some(service.clone()),
|
||||
userid,
|
||||
false,
|
||||
move |_worker| {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
if service == "proxmox-backup" && cmd == "stop" {
|
||||
bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
|
||||
}
|
||||
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
let real_service_name = real_service_name(&service);
|
||||
|
||||
if !status.success() {
|
||||
bail!("systemctl {} failed with {}", cmd, status);
|
||||
}
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[&cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
Ok(Value::Null)
|
||||
if !status.success() {
|
||||
bail!("systemctl {} failed with {}", cmd, status);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -227,11 +241,14 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
fn start_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("starting service {}", service);
|
||||
|
||||
run_service_command(&service, "start")
|
||||
run_service_command(&service, "start", userid)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -254,11 +271,14 @@ fn start_service(
|
||||
fn stop_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
run_service_command(&service, "stop")
|
||||
run_service_command(&service, "stop", userid)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -281,15 +301,18 @@ fn stop_service(
|
||||
fn restart_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("re-starting service {}", service);
|
||||
|
||||
if &service == "proxmox-backup-proxy" {
|
||||
// special case, avoid aborting running tasks
|
||||
run_service_command(&service, "reload")
|
||||
run_service_command(&service, "reload", userid)
|
||||
} else {
|
||||
run_service_command(&service, "restart")
|
||||
run_service_command(&service, "restart", userid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -313,11 +336,14 @@ fn restart_service(
|
||||
fn reload_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("reloading service {}", service);
|
||||
|
||||
run_service_command(&service, "reload")
|
||||
run_service_command(&service, "reload", userid)
|
||||
}
|
||||
|
||||
|
||||
|
@ -4,13 +4,13 @@ use std::io::{BufRead, BufReader};
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{self, UPID};
|
||||
use crate::server::{self, UPID, TaskState};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
@ -84,11 +84,11 @@ async fn get_task_status(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let mut result = json!({
|
||||
@ -99,15 +99,15 @@ async fn get_task_status(
|
||||
"starttime": upid.starttime,
|
||||
"type": upid.worker_type,
|
||||
"id": upid.worker_id,
|
||||
"user": upid.username,
|
||||
"user": upid.userid,
|
||||
});
|
||||
|
||||
if crate::server::worker_is_active(&upid).await? {
|
||||
result["status"] = Value::from("running");
|
||||
} else {
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(TaskState::Unknown { endtime: 0 });
|
||||
result["status"] = Value::from("stopped");
|
||||
result["exitstatus"] = Value::from(exitstatus);
|
||||
result["exitstatus"] = Value::from(exitstatus.to_string());
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
@ -161,11 +161,11 @@ async fn read_task_log(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||
@ -234,11 +234,11 @@ fn stop_task(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
}
|
||||
|
||||
server::abort_worker_async(upid);
|
||||
@ -281,7 +281,7 @@ fn stop_task(
|
||||
default: false,
|
||||
},
|
||||
userfilter: {
|
||||
optional:true,
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Only list tasks from this user.",
|
||||
},
|
||||
@ -307,9 +307,9 @@ pub fn list_tasks(
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
@ -324,11 +324,11 @@ pub fn list_tasks(
|
||||
let mut count = 0;
|
||||
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
if !list_all && info.upid.userid != userid { continue; }
|
||||
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
if let Some(userid) = userfilter {
|
||||
if !info.upid.userid.as_str().contains(userid) { continue; }
|
||||
}
|
||||
|
||||
if let Some(store) = store {
|
||||
@ -352,8 +352,9 @@ pub fn list_tasks(
|
||||
|
||||
if let Some(ref state) = info.state {
|
||||
if running { continue; }
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
match state {
|
||||
crate::server::TaskState::OK { .. } if errors => continue,
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
use std::sync::{Arc};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::{select, future::FutureExt};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||
@ -12,13 +13,15 @@ use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_
|
||||
use crate::api2::types::*;
|
||||
use crate::config::{
|
||||
remote,
|
||||
sync::SyncJobConfig,
|
||||
jobstate::Job,
|
||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
pub fn check_pull_privs(
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
@ -27,11 +30,11 @@ pub fn check_pull_privs(
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -62,6 +65,68 @@ pub async fn get_pull_parameters(
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
|
||||
pub fn do_sync_job(
|
||||
mut job: Job,
|
||||
sync_job: SyncJobConfig,
|
||||
userid: &Userid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let job_id = job.jobname().to_string();
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let upid_str = WorkerTask::spawn(
|
||||
&worker_type,
|
||||
Some(job.jobname().to_string()),
|
||||
userid.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let worker2 = worker.clone();
|
||||
|
||||
let worker_future = async move {
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
if let Some(event_str) = schedule {
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
}
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
sync_job.store, sync_job.remote, sync_job.remote_store));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &job_id));
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
|
||||
|
||||
let res = select!{
|
||||
worker = worker_future.fuse() => worker,
|
||||
abort = abort_future => abort,
|
||||
};
|
||||
|
||||
let status = worker2.create_state(&res);
|
||||
|
||||
match job.finish(status) {
|
||||
Ok(_) => {},
|
||||
Err(err) => {
|
||||
eprintln!("could not finish job state: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -99,19 +164,19 @@ async fn pull (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid).await?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -90,9 +90,14 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
||||
env_type,
|
||||
userid,
|
||||
worker.clone(),
|
||||
datastore,
|
||||
backup_dir,
|
||||
);
|
||||
|
||||
env.debug = debug;
|
||||
|
||||
@ -225,8 +230,8 @@ fn download_chunk(
|
||||
env.debug(format!("download chunk {:?}", path));
|
||||
|
||||
let data = tokio::fs::read(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
.await
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||
|
||||
let body = Body::from(data);
|
||||
|
||||
@ -260,7 +265,7 @@ fn download_chunk_old(
|
||||
let path3 = path.clone();
|
||||
|
||||
let response_future = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "open file {:?} failed: {}", path2, err))
|
||||
.and_then(move |file| {
|
||||
env2.debug(format!("download chunk {:?}", path3));
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
|
@ -5,9 +5,10 @@ use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::backup::*;
|
||||
use crate::server::formatter::*;
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
//use proxmox::tools;
|
||||
|
||||
@ -16,7 +17,7 @@ use crate::server::formatter::*;
|
||||
pub struct ReaderEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
user: Userid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -28,7 +29,7 @@ pub struct ReaderEnvironment {
|
||||
impl ReaderEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: String,
|
||||
user: Userid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -77,7 +78,7 @@ impl RpcEnvironment for ReaderEnvironment {
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.clone())
|
||||
Some(self.user.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,14 +10,14 @@ use proxmox::api::{
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
TaskListItem,
|
||||
Userid,
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
@ -84,13 +84,13 @@ fn datastore_status(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
@ -202,9 +202,9 @@ pub fn list_tasks(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
@ -212,7 +212,7 @@ pub fn list_tasks(
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.filter(|entry| list_all || entry.user == userid)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
|
4
src/api2/types/macros.rs
Normal file
4
src/api2/types/macros.rs
Normal file
@ -0,0 +1,4 @@
|
||||
//! Macros exported from api2::types.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
@ -1,11 +1,22 @@
|
||||
use anyhow::{bail};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use anyhow::bail;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::backup::CryptMode;
|
||||
use crate::server::UPID;
|
||||
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
|
||||
#[macro_use]
|
||||
mod userid;
|
||||
pub use userid::{Realm, RealmRef};
|
||||
pub use userid::{Username, UsernameRef};
|
||||
pub use userid::Userid;
|
||||
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
@ -21,19 +32,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
||||
|
||||
// we only allow a limited set of characters
|
||||
// colon is not allowed, because we store usernames in
|
||||
// colon separated lists)!
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
|
||||
@ -67,12 +65,8 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
@ -115,12 +109,6 @@ pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PASSWORD_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||
|
||||
@ -343,24 +331,6 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
|
||||
.format(&DNS_NAME_OR_IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = StringSchema::new("Authentication domain ID")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.format(&PROXMOX_USER_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
@ -388,6 +358,10 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -403,7 +377,26 @@ pub struct GroupListItem {
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: {
|
||||
schema: UPID_SCHEMA
|
||||
},
|
||||
state: {
|
||||
type: String
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Task properties.
|
||||
pub struct SnapshotVerifyState {
|
||||
/// UPID of the verify task
|
||||
pub upid: UPID,
|
||||
/// State of the verification. "failed" or "ok"
|
||||
pub state: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -417,11 +410,23 @@ pub struct GroupListItem {
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
verification: {
|
||||
type: SnapshotVerifyState,
|
||||
optional: true,
|
||||
},
|
||||
files: {
|
||||
items: {
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -431,6 +436,12 @@ pub struct SnapshotListItem {
|
||||
pub backup_type: String, // enum
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// The first line from manifest "notes"
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// The result of the last run verify task
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verification: Option<SnapshotVerifyState>,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
@ -438,7 +449,7 @@ pub struct SnapshotListItem {
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -581,7 +592,8 @@ pub struct StorageStatus {
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"upid": { schema: UPID_SCHEMA },
|
||||
upid: { schema: UPID_SCHEMA },
|
||||
user: { type: Userid },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -601,7 +613,7 @@ pub struct TaskListItem {
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The user who started the task
|
||||
pub user: String,
|
||||
pub user: Userid,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
@ -614,7 +626,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
.map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
@ -624,7 +636,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
user: info.upid.userid,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
@ -890,9 +902,6 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
#[test]
|
||||
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = PROXMOX_USER_ID_SCHEMA;
|
||||
|
||||
let invalid_user_ids = [
|
||||
"x", // too short
|
||||
"xx", // too short
|
||||
@ -906,7 +915,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
@ -920,7 +929,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for name in valid_user_ids.iter() {
|
||||
let v = match parse_simple_value(name, &schema) {
|
||||
let v = match parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
bail!("unable to parse userid '{}' - {}", name, err);
|
420
src/api2/types/userid.rs
Normal file
420
src/api2/types/userid.rs
Normal file
@ -0,0 +1,420 @@
|
||||
//! Types for user handling.
|
||||
//!
|
||||
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
|
||||
//!
|
||||
//! Since they're all string types, they're organized as follows:
|
||||
//!
|
||||
//! * [`Username`]: an owned user name. Internally a `String`.
|
||||
//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs
|
||||
//! with `String`, meaning you can only make references to it.
|
||||
//! * [`Realm`]: an owned realm (`String` equivalent).
|
||||
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
||||
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
|
||||
//! borrowed type.
|
||||
//!
|
||||
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
||||
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
|
||||
//! each other, as in those two cases the comparison has meaning.
|
||||
|
||||
use std::borrow::Borrow;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
|
||||
// we only allow a limited set of characters
|
||||
// colon is not allowed, because we store usernames in
|
||||
// colon separated lists)!
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
const_regex! {
|
||||
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
}
|
||||
|
||||
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
|
||||
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
|
||||
StringSchema::new("Authentication domain ID")
|
||||
.format(&super::PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32);
|
||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
type: String,
|
||||
format: &PROXMOX_USER_NAME_FORMAT,
|
||||
)]
|
||||
/// The user name part of a user id.
|
||||
///
|
||||
/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order
|
||||
/// to compare user names directly, they need to be explicitly compared as strings by calling
|
||||
/// `.as_str()`.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// fn test(a: Username, b: Username) -> bool {
|
||||
/// a == b // illegal and does not compile
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
|
||||
pub struct Username(String);
|
||||
|
||||
/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user.
|
||||
///
|
||||
/// This is like a `str` to the `String` of a [`Username`].
|
||||
#[derive(Debug, Hash)]
|
||||
pub struct UsernameRef(str);
|
||||
|
||||
#[doc(hidden)]
|
||||
/// ```compile_fail
|
||||
/// let a: Username = unsafe { std::mem::zeroed() };
|
||||
/// let b: Username = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <Username as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
struct _AssertNoEqImpl;
|
||||
|
||||
impl UsernameRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const UsernameRef) }
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Username {
|
||||
type Target = UsernameRef;
|
||||
|
||||
fn deref(&self) -> &UsernameRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<UsernameRef> for Username {
|
||||
fn borrow(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<UsernameRef> for Username {
|
||||
fn as_ref(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl ToOwned for UsernameRef {
|
||||
type Owned = Username;
|
||||
|
||||
fn to_owned(&self) -> Self::Owned {
|
||||
Username(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Username {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Error> {
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(&s) {
|
||||
bail!("invalid user name");
|
||||
}
|
||||
|
||||
Ok(Self(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for &'a UsernameRef {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> {
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(s) {
|
||||
bail!("invalid name in user id");
|
||||
}
|
||||
|
||||
Ok(UsernameRef::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)]
|
||||
/// An authentication realm.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||
pub struct Realm(String);
|
||||
|
||||
/// A reference to an authentication realm.
|
||||
///
|
||||
/// This is like a `str` to the `String` of a `Realm`.
|
||||
#[derive(Debug, Hash, Eq, PartialEq)]
|
||||
pub struct RealmRef(str);
|
||||
|
||||
impl RealmRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const RealmRef) }
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Realm {
|
||||
type Target = RealmRef;
|
||||
|
||||
fn deref(&self) -> &RealmRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<RealmRef> for Realm {
|
||||
fn borrow(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<RealmRef> for Realm {
|
||||
fn as_ref(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl ToOwned for RealmRef {
|
||||
type Owned = Realm;
|
||||
|
||||
fn to_owned(&self) -> Self::Owned {
|
||||
Realm(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Realm {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Error> {
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s)
|
||||
.map_err(|_| format_err!("invalid realm"))?;
|
||||
|
||||
Ok(Self(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for &'a RealmRef {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s)
|
||||
.map_err(|_| format_err!("invalid realm"))?;
|
||||
|
||||
Ok(RealmRef::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for Realm {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
self.0 == rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for Realm {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
self.0 == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for RealmRef {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
self.0 == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for RealmRef {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
self.0 == **rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<RealmRef> for Realm {
|
||||
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||
self.0 == &rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Realm> for RealmRef {
|
||||
fn eq(&self, rhs: &Realm) -> bool {
|
||||
self.0 == rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Realm> for &RealmRef {
|
||||
fn eq(&self, rhs: &Realm) -> bool {
|
||||
(*self).0 == rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete user id consting of a user name and a realm.
|
||||
#[derive(Clone, Debug, Hash)]
|
||||
pub struct Userid {
|
||||
data: String,
|
||||
name_len: usize,
|
||||
//name: Username,
|
||||
//realm: Realm,
|
||||
}
|
||||
|
||||
impl Userid {
|
||||
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.format(&PROXMOX_USER_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
const fn new(data: String, name_len: usize) -> Self {
|
||||
Self { data, name_len }
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &UsernameRef {
|
||||
UsernameRef::new(&self.data[..self.name_len])
|
||||
}
|
||||
|
||||
pub fn realm(&self) -> &RealmRef {
|
||||
RealmRef::new(&self.data[(self.name_len + 1)..])
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Get the "backup@pam" user id.
|
||||
pub fn backup_userid() -> &'static Self {
|
||||
&*BACKUP_USERID
|
||||
}
|
||||
|
||||
/// Get the "root@pam" user id.
|
||||
pub fn root_userid() -> &'static Self {
|
||||
&*ROOT_USERID
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BACKUP_USERID: Userid = Userid::new("backup@pam".to_string(), 6);
|
||||
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
|
||||
}
|
||||
|
||||
impl Eq for Userid {}
|
||||
|
||||
impl PartialEq for Userid {
|
||||
fn eq(&self, rhs: &Self) -> bool {
|
||||
self.data == rhs.data && self.name_len == rhs.name_len
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(Username, Realm)> for Userid {
|
||||
fn from(parts: (Username, Realm)) -> Self {
|
||||
Self::from((parts.0.as_ref(), parts.1.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(&UsernameRef, &RealmRef)> for Userid {
|
||||
fn from(parts: (&UsernameRef, &RealmRef)) -> Self {
|
||||
let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str());
|
||||
let name_len = parts.0.as_str().len();
|
||||
Self { data, name_len }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Userid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.data.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Userid {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(id: &str) -> Result<Self, Error> {
|
||||
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
|
||||
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
|
||||
None => bail!("not a valid user id"),
|
||||
};
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
|
||||
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Userid {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(data: String) -> Result<Self, Error> {
|
||||
let name_len = data
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
|
||||
Ok(Self { data, name_len })
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for Userid {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
rhs.len() > self.name_len + 2 // make sure range access below is allowed
|
||||
&& rhs.starts_with(self.name().as_str())
|
||||
&& rhs.as_bytes()[self.name_len] == b'@'
|
||||
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for Userid {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
*self == **rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<String> for Userid {
|
||||
fn eq(&self, rhs: &String) -> bool {
|
||||
self == rhs.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(Userid);
|
||||
proxmox::forward_serialize_to_display!(Userid);
|
67
src/auth.rs
67
src/auth.rs
@ -10,39 +10,54 @@ use base64;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::api2::types::{Userid, UsernameRef, RealmRef};
|
||||
|
||||
pub trait ProxmoxAuthenticator {
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct PAM();
|
||||
|
||||
impl ProxmoxAuthenticator for PAM {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials(username, password);
|
||||
auth.get_handler().set_credentials(username.as_str(), password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let mut child = Command::new("passwd")
|
||||
.arg(username)
|
||||
.arg(username.as_str())
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
|
||||
.map_err(|err| format_err!(
|
||||
"unable to set password for '{}' - execute passwd failed: {}",
|
||||
username.as_str(),
|
||||
err,
|
||||
))?;
|
||||
|
||||
// Note: passwd reads password twice from stdin (for verify)
|
||||
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
||||
|
||||
let output = child.wait_with_output()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.map_err(|err| format_err!(
|
||||
"unable to set password for '{}' - wait failed: {}",
|
||||
username.as_str(),
|
||||
err,
|
||||
))?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
|
||||
bail!(
|
||||
"unable to set password for '{}' - {}",
|
||||
username.as_str(),
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -90,23 +105,23 @@ pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
|
||||
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
|
||||
|
||||
impl ProxmoxAuthenticator for PBS {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
match data[username].as_str() {
|
||||
match data[username.as_str()].as_str() {
|
||||
None => bail!("no password set"),
|
||||
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let enc_password = encrypt_pw(password)?;
|
||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
data[username] = enc_password.into();
|
||||
data[username.as_str()] = enc_password.into();
|
||||
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||
let options = proxmox::tools::fs::CreateOptions::new()
|
||||
@ -121,28 +136,18 @@ impl ProxmoxAuthenticator for PBS {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
|
||||
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
|
||||
|
||||
if data.len() != 2 {
|
||||
bail!("userid '{}' has no realm", userid);
|
||||
}
|
||||
Ok((data[1].to_owned(), data[0].to_owned()))
|
||||
}
|
||||
|
||||
/// Lookup the autenticator for the specified realm
|
||||
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm {
|
||||
pub fn lookup_authenticator(realm: &RealmRef) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm.as_str() {
|
||||
"pam" => Ok(Box::new(PAM())),
|
||||
"pbs" => Ok(Box::new(PBS())),
|
||||
_ => bail!("unknown realm '{}'", realm),
|
||||
_ => bail!("unknown realm '{}'", realm.as_str()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authenticate users
|
||||
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
|
||||
let (username, realm) = parse_userid(userid)?;
|
||||
pub fn authenticate_user(userid: &Userid, password: &str) -> Result<(), Error> {
|
||||
|
||||
lookup_authenticator(&realm)?
|
||||
.authenticate_user(&username, password)
|
||||
lookup_authenticator(userid.realm())?
|
||||
.authenticate_user(userid.name(), password)
|
||||
}
|
||||
|
@ -10,16 +10,17 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let mut hasher = sha::Sha256::new();
|
||||
let data = format!("{:08X}:{}:", timestamp, username);
|
||||
let data = format!("{:08X}:{}:", timestamp, userid);
|
||||
hasher.update(data.as_bytes());
|
||||
hasher.update(secret);
|
||||
|
||||
@ -28,19 +29,19 @@ fn compute_csrf_secret_digest(
|
||||
|
||||
pub fn assemble_csrf_prevention_token(
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||
|
||||
format!("{:08X}:{}", epoch, digest)
|
||||
}
|
||||
|
||||
pub fn verify_csrf_prevention_token(
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
token: &str,
|
||||
min_age: i64,
|
||||
max_age: i64,
|
||||
@ -62,7 +63,7 @@ pub fn verify_csrf_prevention_token(
|
||||
let ttime = i64::from_str_radix(timestamp, 16).
|
||||
map_err(|err| format_err!("timestamp format error - {}", err))?;
|
||||
|
||||
let digest = compute_csrf_secret_digest(ttime, secret, username);
|
||||
let digest = compute_csrf_secret_digest(ttime, secret, userid);
|
||||
|
||||
if digest != sig {
|
||||
bail!("invalid signature.");
|
||||
|
@ -120,6 +120,8 @@ macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
|
||||
|
||||
/// Unix system user used by proxmox-backup-proxy
|
||||
pub const BACKUP_USER_NAME: &str = "backup";
|
||||
/// Unix system group used by proxmox-backup-proxy
|
||||
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||
|
||||
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
||||
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||
@ -129,6 +131,14 @@ pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return Group info for the 'backup' group (``getgrnam(3)``)
|
||||
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||
match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
|
||||
Some(group) => Ok(group),
|
||||
None => bail!("Unable to lookup backup user."),
|
||||
}
|
||||
}
|
||||
|
||||
mod file_formats;
|
||||
pub use file_formats::*;
|
||||
|
||||
|
@ -45,6 +45,31 @@ pub struct BackupGroup {
|
||||
backup_id: String,
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for BackupGroup {
|
||||
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||
if type_order != std::cmp::Ordering::Equal {
|
||||
return type_order;
|
||||
}
|
||||
// try to compare IDs numerically
|
||||
let id_self = self.backup_id.parse::<u64>();
|
||||
let id_other = other.backup_id.parse::<u64>();
|
||||
match (id_self, id_other) {
|
||||
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||
_ => self.backup_id.cmp(&other.backup_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialOrd for BackupGroup {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl BackupGroup {
|
||||
|
||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||
@ -173,7 +198,7 @@ impl std::str::FromStr for BackupGroup {
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct BackupDir {
|
||||
/// Backup group
|
||||
group: BackupGroup,
|
||||
@ -272,9 +297,13 @@ impl BackupInfo {
|
||||
}
|
||||
|
||||
/// Finds the latest backup inside a backup group
|
||||
pub fn last_backup(base_path: &Path, group: &BackupGroup) -> Result<Option<BackupInfo>, Error> {
|
||||
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
||||
-> Result<Option<BackupInfo>, Error>
|
||||
{
|
||||
let backups = group.list_backups(base_path)?;
|
||||
Ok(backups.into_iter().max_by_key(|item| item.backup_dir.backup_time()))
|
||||
Ok(backups.into_iter()
|
||||
.filter(|item| !only_finished || item.is_finished())
|
||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||
}
|
||||
|
||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||
@ -317,6 +346,11 @@ impl BackupInfo {
|
||||
})?;
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn is_finished(&self) -> bool {
|
||||
// backup is considered unfinished if there is no manifest
|
||||
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
||||
|
@ -3,7 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::future::Future;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
|
||||
@ -1073,6 +1073,7 @@ impl<'a> ExtractorState<'a> {
|
||||
}
|
||||
self.path.extend(&entry.name);
|
||||
|
||||
self.extractor.set_path(OsString::from_vec(self.path.clone()));
|
||||
self.handle_entry(entry).await?;
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i*100)/(64*1024);
|
||||
if percentage != last_percentage {
|
||||
eprintln!("Percentage done: {}", percentage);
|
||||
eprintln!("{}%", percentage);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -184,22 +184,6 @@ impl ChunkStore {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let (chunk_path, digest_str) = self.chunk_path(digest);
|
||||
let mut file = std::fs::File::open(&chunk_path)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"store '{}', unable to read chunk '{}' - {}",
|
||||
self.name,
|
||||
digest_str,
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
|
||||
DataBlob::load(&mut file)
|
||||
}
|
||||
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
@ -291,14 +275,13 @@ impl ChunkStore {
|
||||
pub fn sweep_unused_chunks(
|
||||
&self,
|
||||
oldest_writer: i64,
|
||||
phase1_start_time: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
|
||||
let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
|
||||
|
||||
if oldest_writer < min_atime {
|
||||
min_atime = oldest_writer;
|
||||
@ -312,7 +295,7 @@ impl ChunkStore {
|
||||
for (entry, percentage) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
||||
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
@ -36,6 +36,11 @@ impl DataBlob {
|
||||
&self.raw_data
|
||||
}
|
||||
|
||||
/// Returns raw_data size
|
||||
pub fn raw_size(&self) -> u64 {
|
||||
self.raw_data.len() as u64
|
||||
}
|
||||
|
||||
/// Consume self and returns raw_data
|
||||
pub fn into_inner(self) -> Vec<u8> {
|
||||
self.raw_data
|
||||
@ -66,8 +71,8 @@ impl DataBlob {
|
||||
hasher.finalize()
|
||||
}
|
||||
|
||||
/// verify the CRC32 checksum
|
||||
pub fn verify_crc(&self) -> Result<(), Error> {
|
||||
// verify the CRC32 checksum
|
||||
fn verify_crc(&self) -> Result<(), Error> {
|
||||
let expected_crc = self.compute_crc();
|
||||
if expected_crc != self.crc() {
|
||||
bail!("Data blob has wrong CRC checksum.");
|
||||
@ -180,16 +185,23 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>, digest: Option<&[u8; 32]>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
Ok(self.raw_data[data_start..].to_vec())
|
||||
let data = self.raw_data[data_start..].to_vec();
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
let header_len = std::mem::size_of::<EncryptedDataBlobHeader>();
|
||||
@ -203,6 +215,9 @@ impl DataBlob {
|
||||
} else {
|
||||
config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)?
|
||||
};
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, Some(config), digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else {
|
||||
bail!("unable to decrypt blob - missing CryptConfig");
|
||||
@ -212,13 +227,17 @@ impl DataBlob {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load blob from ``reader``
|
||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
/// Load blob from ``reader``, verify CRC
|
||||
pub fn load_from_reader(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
|
||||
let mut data = Vec::with_capacity(1024*1024);
|
||||
reader.read_to_end(&mut data)?;
|
||||
|
||||
Self::from_raw(data)
|
||||
let blob = Self::from_raw(data)?;
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Create Instance from raw data
|
||||
@ -254,7 +273,7 @@ impl DataBlob {
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
/// Note: This does not call verify_crc, because this is usually done in load
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -267,12 +286,26 @@ impl DataBlob {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data = self.decode(None)?;
|
||||
// verifies digest!
|
||||
let data = self.decode(None, Some(expected_digest))?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_digest(
|
||||
data: &[u8],
|
||||
config: Option<&CryptConfig>,
|
||||
expected_digest: &[u8; 32],
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let digest = match config {
|
||||
Some(config) => config.compute_digest(data),
|
||||
None => openssl::sha::sha256(data),
|
||||
};
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
|
@ -7,6 +7,9 @@ use std::convert::TryFrom;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
|
||||
use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
@ -15,11 +18,12 @@ use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::backup::CryptMode;
|
||||
use crate::config::datastore;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::api2::types::GarbageCollectionStatus;
|
||||
use crate::tools::format::HumanByte;
|
||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||
|
||||
lazy_static! {
|
||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||
@ -197,6 +201,8 @@ impl DataStore {
|
||||
|
||||
let full_path = self.group_path(backup_group);
|
||||
|
||||
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
||||
|
||||
log::info!("removing backup group {:?}", full_path);
|
||||
std::fs::remove_dir_all(&full_path)
|
||||
.map_err(|err| {
|
||||
@ -211,10 +217,15 @@ impl DataStore {
|
||||
}
|
||||
|
||||
/// Remove a backup directory including all content
|
||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> {
|
||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
|
||||
|
||||
let full_path = self.snapshot_path(backup_dir);
|
||||
|
||||
let _guard;
|
||||
if !force {
|
||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
|
||||
}
|
||||
|
||||
log::info!("removing backup snapshot {:?}", full_path);
|
||||
std::fs::remove_dir_all(&full_path)
|
||||
.map_err(|err| {
|
||||
@ -246,16 +257,21 @@ impl DataStore {
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the user who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.group_path());
|
||||
full_path.push("owner");
|
||||
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
||||
Ok(owner.trim_end().to_string()) // remove trailing newline
|
||||
Ok(owner.trim_end().parse()?) // remove trailing newline
|
||||
}
|
||||
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
|
||||
pub fn set_owner(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_group.group_path());
|
||||
path.push("owner");
|
||||
@ -279,12 +295,17 @@ impl DataStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a backup group if it does not already exists.
|
||||
/// Create (if it does not already exists) and lock a backup group
|
||||
///
|
||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||
/// current owner (instead of setting the owner).
|
||||
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
|
||||
|
||||
///
|
||||
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||
pub fn create_locked_backup_group(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
) -> Result<(Userid, DirLockGuard), Error> {
|
||||
// create intermediate path first:
|
||||
let base_path = self.base_path();
|
||||
|
||||
@ -297,13 +318,15 @@ impl DataStore {
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => {
|
||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||
self.set_owner(backup_group, userid, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||
}
|
||||
@ -312,15 +335,20 @@ impl DataStore {
|
||||
/// Creates a new backup snapshot inside a BackupGroup
|
||||
///
|
||||
/// The BackupGroup directory needs to exist.
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
|
||||
-> Result<(PathBuf, bool, DirLockGuard), Error>
|
||||
{
|
||||
let relative_path = backup_dir.relative_path();
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(&relative_path);
|
||||
|
||||
let lock = ||
|
||||
lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
|
||||
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => Ok((relative_path, true)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
|
||||
Err(e) => Err(e)
|
||||
Ok(_) => Ok((relative_path, true, lock()?)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
|
||||
Err(e) => Err(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,8 +419,8 @@ impl DataStore {
|
||||
tools::fail_on_shutdown()?;
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||
bail!("unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest), file_name, err);
|
||||
worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest), file_name, err));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -402,6 +430,12 @@ impl DataStore {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
let image_count = image_list.len();
|
||||
|
||||
let mut done = 0;
|
||||
|
||||
let mut last_percentage: usize = 0;
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
@ -416,6 +450,14 @@ impl DataStore {
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
done += 1;
|
||||
|
||||
let percentage = done*100/image_count;
|
||||
if percentage > last_percentage {
|
||||
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
||||
percentage, done, image_count));
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -435,9 +477,8 @@ impl DataStore {
|
||||
|
||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
||||
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||
|
||||
let mut gc_status = GarbageCollectionStatus::default();
|
||||
gc_status.upid = Some(worker.to_string());
|
||||
@ -447,26 +488,26 @@ impl DataStore {
|
||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||
|
||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
||||
|
||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
||||
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
||||
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
||||
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
|
||||
worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
|
||||
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
||||
}
|
||||
|
||||
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
|
||||
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||
worker.log(&format!("Average chunk size: {}", avg_chunk));
|
||||
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
||||
}
|
||||
|
||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||
@ -498,31 +539,69 @@ impl DataStore {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
proxmox::try_block!({
|
||||
let mut file = std::fs::File::open(&path)?;
|
||||
DataBlob::load_from_reader(&mut file)
|
||||
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
||||
}
|
||||
|
||||
|
||||
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||
|
||||
proxmox::try_block!({
|
||||
let mut file = std::fs::File::open(&chunk_path)?;
|
||||
DataBlob::load_from_reader(&mut file)
|
||||
}).map_err(|err| format_err!(
|
||||
"store '{}', unable to load chunk '{}' - {}",
|
||||
self.name(),
|
||||
digest_str,
|
||||
err,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn load_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, CryptMode, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let crypt_mode = blob.crypt_mode()?;
|
||||
) -> Result<(BackupManifest, u64), Error> {
|
||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let raw_size = blob.raw_size();
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, crypt_mode, raw_size))
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest_json(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<Value, Error> {
|
||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
// no expected digest available
|
||||
let manifest_data = blob.decode(None, None)?;
|
||||
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
pub fn store_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
manifest: Value,
|
||||
) -> Result<(), Error> {
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?;
|
||||
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||
let raw_data = blob.raw_data();
|
||||
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
@ -41,6 +40,24 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
impl DynamicIndexHeader {
|
||||
/// Convenience method to allocate a zero-initialized header struct.
|
||||
pub fn zeroed() -> Box<Self> {
|
||||
unsafe {
|
||||
Box::from_raw(std::alloc::alloc_zeroed(std::alloc::Layout::new::<Self>()) as *mut Self)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
self as *const Self as *const u8,
|
||||
std::mem::size_of::<Self>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
@ -489,27 +506,16 @@ impl DynamicIndexWriter {
|
||||
|
||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
|
||||
// todo: use static assertion when available in rust
|
||||
if header_size != 4096 {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
let mut buffer = vec::zeroed(header_size);
|
||||
let header = crate::tools::map_struct_mut::<DynamicIndexHeader>(&mut buffer)?;
|
||||
|
||||
let mut header = DynamicIndexHeader::zeroed();
|
||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
|
||||
header.index_csum = [0u8; 32];
|
||||
|
||||
writer.write_all(&buffer)?;
|
||||
// header.index_csum = [0u8; 32];
|
||||
writer.write_all(header.as_bytes())?;
|
||||
|
||||
let csum = Some(openssl::sha::Sha256::new());
|
||||
|
||||
|
@ -49,6 +49,20 @@ pub struct FileInfo {
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
|
||||
/// Return expected CryptMode of referenced chunks
|
||||
///
|
||||
/// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
|
||||
/// should only reference plain chunks.
|
||||
pub fn chunk_crypt_mode (&self) -> CryptMode {
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => CryptMode::Encrypt,
|
||||
CryptMode::SignOnly | CryptMode::None => CryptMode::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct BackupManifest {
|
||||
@ -58,6 +72,7 @@ pub struct BackupManifest {
|
||||
files: Vec<FileInfo>,
|
||||
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||
pub unprotected: Value,
|
||||
pub signature: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
@ -91,6 +106,7 @@ impl BackupManifest {
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,7 +145,7 @@ impl BackupManifest {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Generate cannonical json
|
||||
// Generate canonical json
|
||||
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||
let mut data = Vec::new();
|
||||
Self::write_canonical_json(value, &mut data)?;
|
||||
@ -160,12 +176,12 @@ impl BackupManifest {
|
||||
keys.sort();
|
||||
let mut iter = keys.into_iter();
|
||||
if let Some(key) = iter.next() {
|
||||
Self::write_canonical_json(&key.into(), output)?;
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
for key in iter {
|
||||
output.push(b',');
|
||||
Self::write_canonical_json(&key.into(), output)?;
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
}
|
||||
@ -238,7 +254,8 @@ impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
|
||||
let data = blob.decode(None)
|
||||
// no expected digest available
|
||||
let data = blob.decode(None, None)
|
||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])
|
||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||
|
@ -53,7 +53,7 @@ fn remove_incomplete_snapshots(
|
||||
let mut keep_unfinished = true;
|
||||
for info in list.iter() {
|
||||
// backup is considered unfinished if there is no manifest
|
||||
if info.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME) {
|
||||
if info.is_finished() {
|
||||
// There is a new finished backup, so there is no need
|
||||
// to keep older unfinished backups.
|
||||
keep_unfinished = false;
|
||||
|
@ -2,9 +2,9 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::crypt_config::{CryptConfig, CryptMode};
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
@ -21,33 +21,47 @@ pub trait ReadChunk {
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>, crypt_mode: CryptMode) -> Self {
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_crypt_mode(&self, chunk_mode: CryptMode) -> Result<(), Error> {
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => {
|
||||
match chunk_mode {
|
||||
CryptMode::Encrypt => Ok(()),
|
||||
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
|
||||
}
|
||||
},
|
||||
CryptMode::SignOnly | CryptMode::None => {
|
||||
match chunk_mode {
|
||||
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
|
||||
CryptMode::SignOnly | CryptMode::None => Ok(()),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
let chunk = self.store.load_chunk(digest)?;
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
@ -76,8 +90,9 @@ impl AsyncReadChunk for LocalChunkReader {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
let chunk = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
@ -90,7 +105,7 @@ impl AsyncReadChunk for LocalChunkReader {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
|
@ -1,58 +1,186 @@
|
||||
use anyhow::{bail, Error};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
CryptMode,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
let raw_size = blob.raw_size();
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
let magic = blob.magic();
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
match blob.crypt_mode()? {
|
||||
CryptMode::Encrypt => Ok(()),
|
||||
CryptMode::None => {
|
||||
// digest already verified above
|
||||
blob.decode(None, None)?;
|
||||
Ok(())
|
||||
},
|
||||
CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
|
||||
}
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
// We use a separate thread to read/load chunks, so that we can do
|
||||
// load and verify in parallel to increase performance.
|
||||
fn chunk_reader_thread(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
errors: Arc<AtomicUsize>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
||||
|
||||
Ok(())
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
||||
|
||||
std::thread::spawn(move|| {
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
|
||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
}
|
||||
|
||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
match datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
Ok(chunk) => {
|
||||
if sender.send((chunk, info.digest, size)).is_err() {
|
||||
break; // receiver gone - simply stop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
crypt_mode: CryptMode,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let errors = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let chunk_channel = chunk_reader_thread(
|
||||
datastore,
|
||||
index,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
errors.clone(),
|
||||
worker.clone(),
|
||||
);
|
||||
|
||||
let mut read_bytes = 0;
|
||||
let mut decoded_bytes = 0;
|
||||
|
||||
loop {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
let (chunk, digest, size) = match chunk_channel.recv() {
|
||||
Ok(tuple) => tuple,
|
||||
Err(std::sync::mpsc::RecvError) => break,
|
||||
};
|
||||
|
||||
read_bytes += chunk.raw_size();
|
||||
decoded_bytes += size;
|
||||
|
||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
},
|
||||
Ok(mode) => mode,
|
||||
};
|
||||
|
||||
if chunk_crypt_mode != crypt_mode {
|
||||
worker.log(format!(
|
||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||
chunk_crypt_mode,
|
||||
crypt_mode
|
||||
));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("{}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
} else {
|
||||
verified_chunks.lock().unwrap().insert(digest);
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = start_time.elapsed().as_secs_f64();
|
||||
|
||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
||||
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
||||
|
||||
let read_speed = read_bytes_mib/elapsed;
|
||||
let decode_speed = decoded_bytes_mib/elapsed;
|
||||
|
||||
let error_count = errors.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!(" verified {:.2}/{:.2} Mib in {:.2} seconds, speed {:.2}/{:.2} Mib/s ({} errors)",
|
||||
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
||||
|
||||
if errors.load(Ordering::SeqCst) > 0 {
|
||||
bail!("chunks could not be verified");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn verify_fixed_index(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
@ -68,10 +196,18 @@ fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &File
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn verify_dynamic_index(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
@ -86,7 +222,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
@ -98,10 +234,16 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_backup_dir(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _crypt_mode, _)) => manifest,
|
||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
@ -112,24 +254,53 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
let mut verify_result = "ok";
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
ArchiveType::FixedIndex =>
|
||||
verify_fixed_index(
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::DynamicIndex =>
|
||||
verify_dynamic_index(
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
verify_result = "failed";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let verify_state = SnapshotVerifyState {
|
||||
state: verify_result.to_string(),
|
||||
upid: worker.upid().clone(),
|
||||
};
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
@ -138,31 +309,45 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_backup_group(
|
||||
datastore: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
return Ok((0, errors));
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||
|
||||
let mut count = 0;
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
count += 1;
|
||||
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
if snapshot_count != 0 {
|
||||
let pos = done + count;
|
||||
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
Ok((count, errors))
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
@ -170,27 +355,49 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
return Ok(errors);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
list.sort_unstable();
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
let mut snapshot_count = 0;
|
||||
for group in list.iter() {
|
||||
snapshot_count += group.list_backups(&datastore.base_path())?.len();
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
|
||||
// start with 64 chunks since we assume there are few corrupt ones
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
||||
|
||||
let mut done = 0;
|
||||
for group in list {
|
||||
let (count, mut group_errors) = verify_backup_group(
|
||||
datastore.clone(),
|
||||
&group,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
Some((done, snapshot_count)),
|
||||
worker.clone(),
|
||||
)?;
|
||||
errors.append(&mut group_errors);
|
||||
|
||||
done += count;
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ async fn run() -> Result<(), Error> {
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
proxmox_backup::config::jobstate::create_jobstate_dir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
|
@ -184,7 +184,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
||||
result
|
||||
}
|
||||
|
||||
fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
|
||||
fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
|
||||
@ -1120,12 +1120,12 @@ async fn create_backup(
|
||||
}
|
||||
|
||||
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
||||
let target = "rsa-encrypted.key";
|
||||
let target = "rsa-encrypted.key.blob";
|
||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, false)
|
||||
.await?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, crypt_mode)?;
|
||||
manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
|
||||
|
||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||
/*
|
||||
@ -1136,7 +1136,6 @@ async fn create_backup(
|
||||
println!("TEST {} {:?}", len, buffer2);
|
||||
*/
|
||||
}
|
||||
|
||||
// create manifest (index.json)
|
||||
// manifests are never encrypted, but include a signature
|
||||
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
|
||||
@ -1183,6 +1182,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
|
||||
async fn dump_image<W: Write>(
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
index: FixedIndexReader,
|
||||
mut writer: W,
|
||||
verbose: bool,
|
||||
@ -1190,7 +1190,7 @@ async fn dump_image<W: Write>(
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
|
||||
|
||||
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
||||
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
||||
@ -1341,7 +1341,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if archive_type == ArchiveType::Blob {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
||||
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||
|
||||
if archive_type == ArchiveType::Blob {
|
||||
|
||||
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
||||
|
||||
@ -1366,7 +1371,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
@ -1375,6 +1380,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&[],
|
||||
true,
|
||||
proxmox_backup::pxar::Flags::DEFAULT,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
@ -1382,6 +1388,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
||||
} else {
|
||||
@ -1411,7 +1418,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
||||
};
|
||||
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
|
||||
dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
|
@ -9,7 +9,7 @@ use proxmox_backup::tools;
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::tools::ticket::*;
|
||||
use proxmox_backup::tools::ticket::Ticket;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
mod proxmox_backup_manager;
|
||||
@ -59,12 +59,13 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
.verify_cert(false); // not required for connection to localhost
|
||||
|
||||
let client = if uid.is_root() {
|
||||
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some("root@pam"), None)?;
|
||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||
.sign(private_auth_key(), None)?;
|
||||
options = options.password(Some(ticket));
|
||||
HttpClient::new("localhost", "root@pam", options)?
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
} else {
|
||||
options = options.ticket_cache(true).interactive(true);
|
||||
HttpClient::new("localhost", "root@pam", options)?
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
};
|
||||
|
||||
Ok(client)
|
||||
|
@ -9,6 +9,7 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
@ -17,13 +18,21 @@ use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
||||
fn main() {
|
||||
use proxmox_backup::api2::pull::do_sync_job;
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
let backup_uid = proxmox_backup::backup::backup_user()?.uid;
|
||||
let backup_gid = proxmox_backup::backup::backup_group()?.gid;
|
||||
let running_uid = nix::unistd::Uid::effective();
|
||||
let running_gid = nix::unistd::Gid::effective();
|
||||
|
||||
if running_uid != backup_uid || running_gid != backup_gid {
|
||||
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
|
||||
}
|
||||
|
||||
proxmox_backup::tools::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
@ -40,11 +49,6 @@ async fn run() -> Result<(), Error> {
|
||||
let mut config = ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
||||
|
||||
// add default dirs which includes jquery and bootstrap
|
||||
// my $base = '/usr/share/libpve-http-server-perl';
|
||||
// add_dirs($self->{dirs}, '/css/' => "$base/css/");
|
||||
// add_dirs($self->{dirs}, '/js/' => "$base/js/");
|
||||
// add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
|
||||
config.add_alias("novnc", "/usr/share/novnc-pve");
|
||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
||||
@ -318,7 +322,7 @@ async fn schedule_datastore_garbage_collection() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
@ -429,7 +433,7 @@ async fn schedule_datastore_prune() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
@ -455,7 +459,7 @@ async fn schedule_datastore_prune() {
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -471,10 +475,7 @@ async fn schedule_datastore_prune() {
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
backup::DataStore,
|
||||
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
|
||||
server::{ WorkerTask },
|
||||
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
|
||||
config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
|
||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
||||
};
|
||||
|
||||
@ -486,14 +487,6 @@ async fn schedule_datastore_sync_jobs() {
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
let remote_config = match remote::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read remote config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (job_id, (_, job_config)) in config.sections {
|
||||
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
|
||||
Ok(c) => c,
|
||||
@ -518,16 +511,10 @@ async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
let worker_type = "syncjob";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &job_id) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
},
|
||||
Ok(None) => 0,
|
||||
let last = match jobstate::last_run_time(worker_type, &job_id) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -549,57 +536,15 @@ async fn schedule_datastore_sync_jobs() {
|
||||
};
|
||||
if next > now { continue; }
|
||||
|
||||
|
||||
let job_id2 = job_id.clone();
|
||||
|
||||
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
|
||||
continue;
|
||||
}
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
|
||||
Ok(remote) => remote,
|
||||
Err(err) => {
|
||||
eprintln!("remote_config lookup failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let userid = Userid::backup_userid().clone();
|
||||
|
||||
let username = String::from("backup@pam");
|
||||
|
||||
let delete = job_config.remove_vanished.unwrap_or(true);
|
||||
|
||||
if let Err(err) = WorkerTask::spawn(
|
||||
worker_type,
|
||||
Some(job_id.clone()),
|
||||
&username.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
job_config.store, job_config.remote, job_config.remote_store));
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
|
||||
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ struct Speed {
|
||||
struct BenchmarkResult {
|
||||
/// TLS upload speed
|
||||
tls: Speed,
|
||||
/// SHA256 checksum comptation speed
|
||||
/// SHA256 checksum computation speed
|
||||
sha256: Speed,
|
||||
/// ZStd level 1 compression speed
|
||||
compress: Speed,
|
||||
@ -187,7 +187,7 @@ fn render_result(
|
||||
.header("TLS (maximal backup upload speed)")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("sha256")
|
||||
.header("SHA256 checksum comptation speed")
|
||||
.header("SHA256 checksum computation speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("compress")
|
||||
.header("ZStd level 1 compression speed")
|
||||
|
@ -97,7 +97,9 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
@ -200,7 +202,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||
|
||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
@ -216,7 +220,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
|
||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let mut catalogfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
|
@ -141,10 +141,12 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
|
@ -239,7 +239,7 @@ pub fn zpool_commands() -> CommandLineInterface {
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
||||
.completion_cb("devices", complete_disk_name) // fixme: complete the list
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
|
@ -3,8 +3,10 @@ use std::ffi::OsStr;
|
||||
use std::fs::OpenOptions;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
@ -24,11 +26,14 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
||||
allow_existing_dirs: bool,
|
||||
verbose: bool,
|
||||
match_list: &[MatchEntry],
|
||||
extract_match_default: bool,
|
||||
on_error: Option<Box<dyn FnMut(Error) -> Result<(), Error> + Send>>,
|
||||
) -> Result<(), Error> {
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&match_list,
|
||||
extract_match_default,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
@ -36,6 +41,7 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
on_error,
|
||||
)
|
||||
}
|
||||
|
||||
@ -102,6 +108,11 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
strict: {
|
||||
description: "Stop on errors. Otherwise most errors will simply warn.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
@ -119,6 +130,7 @@ fn extract_archive(
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
strict: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
@ -162,6 +174,22 @@ fn extract_archive(
|
||||
);
|
||||
}
|
||||
|
||||
let extract_match_default = match_list.is_empty();
|
||||
|
||||
let was_ok = Arc::new(AtomicBool::new(true));
|
||||
let on_error = if strict {
|
||||
// by default errors are propagated up
|
||||
None
|
||||
} else {
|
||||
let was_ok = Arc::clone(&was_ok);
|
||||
// otherwise we want to log them but not act on them
|
||||
Some(Box::new(move |err| {
|
||||
was_ok.store(false, Ordering::Release);
|
||||
eprintln!("error: {}", err);
|
||||
Ok(())
|
||||
}) as Box<dyn FnMut(Error) -> Result<(), Error> + Send>)
|
||||
};
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
@ -172,6 +200,8 @@ fn extract_archive(
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
extract_match_default,
|
||||
on_error,
|
||||
)?;
|
||||
} else {
|
||||
if verbose {
|
||||
@ -186,9 +216,15 @@ fn extract_archive(
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
extract_match_default,
|
||||
on_error,
|
||||
)?;
|
||||
}
|
||||
|
||||
if !was_ok.load(Ordering::Acquire) {
|
||||
bail!("there were errors");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -129,9 +129,9 @@ impl BackupReader {
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(None)?;
|
||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
// no expected digest available
|
||||
let data = blob.decode(None, None)?;
|
||||
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
@ -15,7 +16,7 @@ pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_RE
|
||||
#[derive(Debug)]
|
||||
pub struct BackupRepository {
|
||||
/// The user name used for Authentication
|
||||
user: Option<String>,
|
||||
user: Option<Userid>,
|
||||
/// The host name or IP address
|
||||
host: Option<String>,
|
||||
/// The name of the datastore
|
||||
@ -24,15 +25,15 @@ pub struct BackupRepository {
|
||||
|
||||
impl BackupRepository {
|
||||
|
||||
pub fn new(user: Option<String>, host: Option<String>, store: String) -> Self {
|
||||
pub fn new(user: Option<Userid>, host: Option<String>, store: String) -> Self {
|
||||
Self { user, host, store }
|
||||
}
|
||||
|
||||
pub fn user(&self) -> &str {
|
||||
pub fn user(&self) -> &Userid {
|
||||
if let Some(ref user) = self.user {
|
||||
return user;
|
||||
return &user;
|
||||
}
|
||||
"root@pam"
|
||||
Userid::root_userid()
|
||||
}
|
||||
|
||||
pub fn host(&self) -> &str {
|
||||
@ -73,7 +74,7 @@ impl std::str::FromStr for BackupRepository {
|
||||
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
|
||||
|
||||
Ok(Self {
|
||||
user: cap.get(1).map(|m| m.as_str().to_owned()),
|
||||
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
||||
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
||||
store: cap[3].to_owned(),
|
||||
})
|
||||
|
@ -266,7 +266,7 @@ impl BackupWriter {
|
||||
if archive_name != CATALOG_NAME {
|
||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||
let uploaded: HumanByte = uploaded.into();
|
||||
println!("{}: had to upload {} of {} in {:.2}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
||||
println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
||||
} else {
|
||||
println!("Uploaded backup catalog ({})", vsize_h);
|
||||
}
|
||||
@ -479,9 +479,9 @@ impl BackupWriter {
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
// no expected digest available
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
||||
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
@ -629,7 +629,7 @@ impl BackupWriter {
|
||||
})
|
||||
}
|
||||
|
||||
/// Upload speed test - prints result ot stderr
|
||||
/// Upload speed test - prints result to stderr
|
||||
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||
|
||||
let mut data = vec![];
|
||||
|
@ -24,6 +24,7 @@ use proxmox::{
|
||||
};
|
||||
|
||||
use super::pipe_to_stream::PipeToSendStream;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::async_io::EitherStream;
|
||||
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
||||
|
||||
@ -104,7 +105,7 @@ pub struct HttpClient {
|
||||
}
|
||||
|
||||
/// Delete stored ticket data (logout)
|
||||
pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<(), Error> {
|
||||
pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
|
||||
|
||||
let base = BaseDirectories::with_prefix(prefix)?;
|
||||
|
||||
@ -116,7 +117,7 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<
|
||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||
|
||||
if let Some(map) = data[server].as_object_mut() {
|
||||
map.remove(username);
|
||||
map.remove(username.as_str());
|
||||
}
|
||||
|
||||
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
|
||||
@ -223,7 +224,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(String, String)> {
|
||||
fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(String, String)> {
|
||||
let base = BaseDirectories::with_prefix(prefix).ok()?;
|
||||
|
||||
// usually /run/user/<uid>/...
|
||||
@ -231,7 +232,7 @@ fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(Strin
|
||||
let data = file_get_json(&path, None).ok()?;
|
||||
let now = Utc::now().timestamp();
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
||||
let uinfo = data[server][username].as_object()?;
|
||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||
let age = now - timestamp;
|
||||
|
||||
@ -245,8 +246,11 @@ fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(Strin
|
||||
}
|
||||
|
||||
impl HttpClient {
|
||||
|
||||
pub fn new(server: &str, username: &str, mut options: HttpClientOptions) -> Result<Self, Error> {
|
||||
pub fn new(
|
||||
server: &str,
|
||||
userid: &Userid,
|
||||
mut options: HttpClientOptions,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let verified_fingerprint = Arc::new(Mutex::new(None));
|
||||
|
||||
@ -306,20 +310,20 @@ impl HttpClient {
|
||||
} else {
|
||||
let mut ticket_info = None;
|
||||
if use_ticket_cache {
|
||||
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, username);
|
||||
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
|
||||
}
|
||||
if let Some((ticket, _token)) = ticket_info {
|
||||
ticket
|
||||
} else {
|
||||
Self::get_password(&username, options.interactive)?
|
||||
Self::get_password(userid, options.interactive)?
|
||||
}
|
||||
};
|
||||
|
||||
let login_future = Self::credentials(
|
||||
client.clone(),
|
||||
server.to_owned(),
|
||||
username.to_owned(),
|
||||
password,
|
||||
userid.to_owned(),
|
||||
password.to_owned(),
|
||||
).map_ok({
|
||||
let server = server.to_string();
|
||||
let prefix = options.prefix.clone();
|
||||
@ -355,7 +359,7 @@ impl HttpClient {
|
||||
(*self.fingerprint.lock().unwrap()).clone()
|
||||
}
|
||||
|
||||
fn get_password(username: &str, interactive: bool) -> Result<String, Error> {
|
||||
fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
|
||||
// If we're on a TTY, query the user for a password
|
||||
if interactive && tty::stdin_isatty() {
|
||||
let msg = format!("Password for \"{}\": ", username);
|
||||
@ -579,7 +583,7 @@ impl HttpClient {
|
||||
async fn credentials(
|
||||
client: Client<HttpsConnector>,
|
||||
server: String,
|
||||
username: String,
|
||||
username: Userid,
|
||||
password: String,
|
||||
) -> Result<AuthInfo, Error> {
|
||||
let data = json!({ "username": username, "password": password });
|
||||
|
@ -27,16 +27,18 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(digest, false)?;
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(&info.digest, false)?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
|
||||
|
||||
target.insert_chunk(&chunk, &info.digest)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -60,15 +62,32 @@ async fn download_manifest(
|
||||
Ok(tmp_manifest_file)
|
||||
}
|
||||
|
||||
fn verify_archive(
|
||||
info: &FileInfo,
|
||||
csum: &[u8; 32],
|
||||
size: u64,
|
||||
) -> Result<(), Error> {
|
||||
if size != info.size {
|
||||
bail!("wrong size for file '{}' ({} != {})", info.filename, info.size, size);
|
||||
}
|
||||
|
||||
if csum != &info.csum {
|
||||
bail!("wrong checksum for file '{}'", info.filename);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_single_archive(
|
||||
worker: &WorkerTask,
|
||||
reader: &BackupReader,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_name: &str,
|
||||
archive_info: &FileInfo,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let archive_name = &archive_info.filename;
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
path.push(archive_name);
|
||||
@ -89,16 +108,23 @@ async fn pull_single_archive(
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::Blob => { /* nothing to do */ }
|
||||
ArchiveType::Blob => {
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
}
|
||||
}
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
@ -174,16 +200,14 @@ async fn pull_snapshot(
|
||||
};
|
||||
},
|
||||
};
|
||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
||||
tmp_manifest_blob.verify_crc()?;
|
||||
let tmp_manifest_blob = DataBlob::load_from_reader(&mut tmp_manifest_file)?;
|
||||
|
||||
if manifest_name.exists() {
|
||||
let manifest_blob = proxmox::try_block!({
|
||||
let mut manifest_file = std::fs::File::open(&manifest_name)
|
||||
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
|
||||
|
||||
let manifest_blob = DataBlob::load(&mut manifest_file)?;
|
||||
manifest_blob.verify_crc()?;
|
||||
let manifest_blob = DataBlob::load_from_reader(&mut manifest_file)?;
|
||||
Ok(manifest_blob)
|
||||
}).map_err(|err: Error| {
|
||||
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
|
||||
@ -200,8 +224,6 @@ async fn pull_snapshot(
|
||||
|
||||
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
||||
|
||||
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
|
||||
|
||||
for item in manifest.files() {
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
@ -242,13 +264,15 @@ async fn pull_snapshot(
|
||||
}
|
||||
}
|
||||
|
||||
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, item.chunk_crypt_mode(), HashMap::new());
|
||||
|
||||
pull_single_archive(
|
||||
worker,
|
||||
&reader,
|
||||
&mut chunk_reader,
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item.filename,
|
||||
&item,
|
||||
).await?;
|
||||
}
|
||||
|
||||
@ -273,13 +297,13 @@ pub async fn pull_snapshot_from(
|
||||
snapshot: &BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?;
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
return Err(err);
|
||||
@ -364,7 +388,7 @@ pub async fn pull_group(
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
if remote_snapshots.contains(&backup_time) { continue; }
|
||||
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
|
||||
tgt_store.remove_backup_dir(&info.backup_dir)?;
|
||||
tgt_store.remove_backup_dir(&info.backup_dir, false)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,7 +401,7 @@ pub async fn pull_store(
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
delete: bool,
|
||||
username: String,
|
||||
userid: Userid,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
@ -408,11 +432,11 @@ pub async fn pull_store(
|
||||
for item in list {
|
||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||
|
||||
let owner = tgt_store.create_backup_group(&group, &username)?;
|
||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
||||
// permission check
|
||||
if owner != username { // only the owner is allowed to create additional snapshots
|
||||
if userid != owner { // only the owner is allowed to create additional snapshots
|
||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||
item.backup_type, item.backup_id, username, owner));
|
||||
item.backup_type, item.backup_id, userid, owner));
|
||||
errors = true;
|
||||
continue; // do not stop here, instead continue
|
||||
}
|
||||
|
@ -3,10 +3,10 @@ use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use super::BackupReader;
|
||||
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
||||
use crate::backup::{AsyncReadChunk, CryptConfig, CryptMode, DataBlob, ReadChunk};
|
||||
use crate::tools::runtime::block_on;
|
||||
|
||||
/// Read chunks from remote host using ``BackupReader``
|
||||
@ -14,6 +14,7 @@ use crate::tools::runtime::block_on;
|
||||
pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||
}
|
||||
@ -25,16 +26,20 @@ impl RemoteChunkReader {
|
||||
pub fn new(
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
) -> Self {
|
||||
Self {
|
||||
client,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
cache_hint,
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
|
||||
/// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
|
||||
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||
|
||||
@ -42,10 +47,22 @@ impl RemoteChunkReader {
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await?;
|
||||
|
||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
||||
chunk.verify_crc()?;
|
||||
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
|
||||
|
||||
Ok(chunk)
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => {
|
||||
match chunk.crypt_mode()? {
|
||||
CryptMode::Encrypt => Ok(chunk),
|
||||
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
|
||||
}
|
||||
},
|
||||
CryptMode::SignOnly | CryptMode::None => {
|
||||
match chunk.crypt_mode()? {
|
||||
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
|
||||
CryptMode::SignOnly | CryptMode::None => Ok(chunk),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,9 +78,7 @@ impl ReadChunk for RemoteChunkReader {
|
||||
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
@ -93,9 +108,7 @@ impl AsyncReadChunk for RemoteChunkReader {
|
||||
|
||||
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
|
@ -15,13 +15,14 @@ use proxmox::try_block;
|
||||
|
||||
use crate::buildcfg;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod remote;
|
||||
pub mod user;
|
||||
pub mod acl;
|
||||
pub mod cached_user_info;
|
||||
pub mod datastore;
|
||||
pub mod jobstate;
|
||||
pub mod network;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
pub mod user;
|
||||
|
||||
/// Check configuration directory permissions
|
||||
///
|
||||
|
@ -15,6 +15,8 @@ use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
use proxmox::constnamemap;
|
||||
use proxmox::api::{api, schema::*};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
constnamemap! {
|
||||
@ -224,7 +226,7 @@ pub struct AclTree {
|
||||
}
|
||||
|
||||
pub struct AclTreeNode {
|
||||
pub users: HashMap<String, HashMap<String, bool>>,
|
||||
pub users: HashMap<Userid, HashMap<String, bool>>,
|
||||
pub groups: HashMap<String, HashMap<String, bool>>,
|
||||
pub children: BTreeMap<String, AclTreeNode>,
|
||||
}
|
||||
@ -239,7 +241,7 @@ impl AclTreeNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extract_roles(&self, user: &str, all: bool) -> HashSet<String> {
|
||||
pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
||||
let user_roles = self.extract_user_roles(user, all);
|
||||
if !user_roles.is_empty() {
|
||||
// user privs always override group privs
|
||||
@ -249,7 +251,7 @@ impl AclTreeNode {
|
||||
self.extract_group_roles(user, all)
|
||||
}
|
||||
|
||||
pub fn extract_user_roles(&self, user: &str, all: bool) -> HashSet<String> {
|
||||
pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
||||
|
||||
let mut set = HashSet::new();
|
||||
|
||||
@ -273,7 +275,7 @@ impl AclTreeNode {
|
||||
set
|
||||
}
|
||||
|
||||
pub fn extract_group_roles(&self, _user: &str, all: bool) -> HashSet<String> {
|
||||
pub fn extract_group_roles(&self, _user: &Userid, all: bool) -> HashSet<String> {
|
||||
|
||||
let mut set = HashSet::new();
|
||||
|
||||
@ -305,7 +307,7 @@ impl AclTreeNode {
|
||||
roles.remove(role);
|
||||
}
|
||||
|
||||
pub fn delete_user_role(&mut self, userid: &str, role: &str) {
|
||||
pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
|
||||
let roles = match self.users.get_mut(userid) {
|
||||
Some(r) => r,
|
||||
None => return,
|
||||
@ -324,7 +326,7 @@ impl AclTreeNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_user_role(&mut self, user: String, role: String, propagate: bool) {
|
||||
pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
|
||||
let map = self.users.entry(user).or_insert_with(|| HashMap::new());
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
map.clear();
|
||||
@ -376,7 +378,7 @@ impl AclTree {
|
||||
node.delete_group_role(group, role);
|
||||
}
|
||||
|
||||
pub fn delete_user_role(&mut self, path: &str, userid: &str, role: &str) {
|
||||
pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
|
||||
let path = split_acl_path(path);
|
||||
let node = match self.get_node(&path) {
|
||||
Some(n) => n,
|
||||
@ -391,10 +393,10 @@ impl AclTree {
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
pub fn insert_user_role(&mut self, path: &str, user: &str, role: &str, propagate: bool) {
|
||||
pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
|
||||
let path = split_acl_path(path);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
node.insert_user_role(user.to_string(), role.to_string(), propagate);
|
||||
node.insert_user_role(user.to_owned(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
fn write_node_config(
|
||||
@ -521,7 +523,7 @@ impl AclTree {
|
||||
let group = &user_or_group[1..];
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
} else {
|
||||
node.insert_user_role(user_or_group.to_string(), role.to_string(), propagate);
|
||||
node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -569,7 +571,7 @@ impl AclTree {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
pub fn roles(&self, userid: &str, path: &[&str]) -> HashSet<String> {
|
||||
pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
|
||||
|
||||
let mut node = &self.root;
|
||||
let mut role_set = node.extract_roles(userid, path.is_empty());
|
||||
@ -665,13 +667,14 @@ pub fn save_config(acl: &AclTree) -> Result<(), Error> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use anyhow::{Error};
|
||||
use super::AclTree;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
fn check_roles(
|
||||
tree: &AclTree,
|
||||
user: &str,
|
||||
user: &Userid,
|
||||
path: &str,
|
||||
expected_roles: &str,
|
||||
) {
|
||||
@ -686,22 +689,23 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acl_line_compression() -> Result<(), Error> {
|
||||
fn test_acl_line_compression() {
|
||||
|
||||
let tree = AclTree::from_raw(r###"
|
||||
acl:0:/store/store2:user1:Admin
|
||||
acl:0:/store/store2:user2:Admin
|
||||
acl:0:/store/store2:user1:DatastoreBackup
|
||||
acl:0:/store/store2:user2:DatastoreBackup
|
||||
"###)?;
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:0:/store/store2:user1@pbs:Admin\n\
|
||||
acl:0:/store/store2:user2@pbs:Admin\n\
|
||||
acl:0:/store/store2:user1@pbs:DatastoreBackup\n\
|
||||
acl:0:/store/store2:user2@pbs:DatastoreBackup\n\
|
||||
",
|
||||
)
|
||||
.expect("failed to parse acl tree");
|
||||
|
||||
let mut raw: Vec<u8> = Vec::new();
|
||||
tree.write_config(&mut raw)?;
|
||||
let raw = std::str::from_utf8(&raw)?;
|
||||
tree.write_config(&mut raw).expect("failed to write acl tree");
|
||||
let raw = std::str::from_utf8(&raw).expect("acl tree is not valid utf8");
|
||||
|
||||
assert_eq!(raw, "acl:0:/store/store2:user1,user2:Admin,DatastoreBackup\n");
|
||||
|
||||
Ok(())
|
||||
assert_eq!(raw, "acl:0:/store/store2:user1@pbs,user2@pbs:Admin,DatastoreBackup\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -712,15 +716,17 @@ acl:1:/storage:user1@pbs:Admin
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
acl:1:/storage/store2:user2@pbs:DatastoreBackup
|
||||
"###)?;
|
||||
check_roles(&tree, "user1@pbs", "/", "");
|
||||
check_roles(&tree, "user1@pbs", "/storage", "Admin");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store2", "Admin");
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "");
|
||||
check_roles(&tree, &user1, "/storage", "Admin");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||
|
||||
check_roles(&tree, "user2@pbs", "/", "");
|
||||
check_roles(&tree, "user2@pbs", "/storage", "");
|
||||
check_roles(&tree, "user2@pbs", "/storage/store1", "");
|
||||
check_roles(&tree, "user2@pbs", "/storage/store2", "DatastoreBackup");
|
||||
let user2: Userid = "user2@pbs".parse()?;
|
||||
check_roles(&tree, &user2, "/", "");
|
||||
check_roles(&tree, &user2, "/storage", "");
|
||||
check_roles(&tree, &user2, "/storage/store1", "");
|
||||
check_roles(&tree, &user2, "/storage/store2", "DatastoreBackup");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -733,22 +739,23 @@ acl:1:/:user1@pbs:Admin
|
||||
acl:1:/storage:user1@pbs:NoAccess
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
"###)?;
|
||||
check_roles(&tree, "user1@pbs", "/", "Admin");
|
||||
check_roles(&tree, "user1@pbs", "/storage", "NoAccess");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store2", "NoAccess");
|
||||
check_roles(&tree, "user1@pbs", "/system", "Admin");
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "Admin");
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "NoAccess");
|
||||
check_roles(&tree, &user1, "/system", "Admin");
|
||||
|
||||
let tree = AclTree::from_raw(r###"
|
||||
acl:1:/:user1@pbs:Admin
|
||||
acl:0:/storage:user1@pbs:NoAccess
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
"###)?;
|
||||
check_roles(&tree, "user1@pbs", "/", "Admin");
|
||||
check_roles(&tree, "user1@pbs", "/storage", "NoAccess");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, "user1@pbs", "/storage/store2", "Admin");
|
||||
check_roles(&tree, "user1@pbs", "/system", "Admin");
|
||||
check_roles(&tree, &user1, "/", "Admin");
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||
check_roles(&tree, &user1, "/system", "Admin");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -758,13 +765,15 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
tree.insert_user_role("/", "user1@pbs", "Admin", true);
|
||||
tree.insert_user_role("/", "user1@pbs", "Audit", true);
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
|
||||
check_roles(&tree, "user1@pbs", "/", "Admin,Audit");
|
||||
tree.insert_user_role("/", &user1, "Admin", true);
|
||||
tree.insert_user_role("/", &user1, "Audit", true);
|
||||
|
||||
tree.insert_user_role("/", "user1@pbs", "NoAccess", true);
|
||||
check_roles(&tree, "user1@pbs", "/", "NoAccess");
|
||||
check_roles(&tree, &user1, "/", "Admin,Audit");
|
||||
|
||||
tree.insert_user_role("/", &user1, "NoAccess", true);
|
||||
check_roles(&tree, &user1, "/", "NoAccess");
|
||||
|
||||
let mut raw: Vec<u8> = Vec::new();
|
||||
tree.write_config(&mut raw)?;
|
||||
@ -780,20 +789,21 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
tree.insert_user_role("/storage", "user1@pbs", "NoAccess", true);
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
|
||||
check_roles(&tree, "user1@pbs", "/storage", "NoAccess");
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
|
||||
tree.insert_user_role("/storage", "user1@pbs", "Admin", true);
|
||||
tree.insert_user_role("/storage", "user1@pbs", "Audit", true);
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
|
||||
check_roles(&tree, "user1@pbs", "/storage", "Admin,Audit");
|
||||
tree.insert_user_role("/storage", &user1, "Admin", true);
|
||||
tree.insert_user_role("/storage", &user1, "Audit", true);
|
||||
|
||||
tree.insert_user_role("/storage", "user1@pbs", "NoAccess", true);
|
||||
check_roles(&tree, &user1, "/storage", "Admin,Audit");
|
||||
|
||||
check_roles(&tree, "user1@pbs", "/storage", "NoAccess");
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ use proxmox::api::UserInformation;
|
||||
|
||||
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
|
||||
use super::user::User;
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
/// Cache User/Group/Acl configuration data for fast permission tests
|
||||
pub struct CachedUserInfo {
|
||||
@ -57,8 +58,8 @@ impl CachedUserInfo {
|
||||
}
|
||||
|
||||
/// Test if a user account is enabled and not expired
|
||||
pub fn is_active_user(&self, userid: &str) -> bool {
|
||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", &userid) {
|
||||
pub fn is_active_user(&self, userid: &Userid) -> bool {
|
||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
||||
if !info.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
@ -77,12 +78,12 @@ impl CachedUserInfo {
|
||||
|
||||
pub fn check_privs(
|
||||
&self,
|
||||
userid: &str,
|
||||
userid: &Userid,
|
||||
path: &[&str],
|
||||
required_privs: u64,
|
||||
partial: bool,
|
||||
) -> Result<(), Error> {
|
||||
let user_privs = self.lookup_privs(userid, path);
|
||||
let user_privs = self.lookup_privs(&userid, path);
|
||||
let allowed = if partial {
|
||||
(user_privs & required_privs) != 0
|
||||
} else {
|
||||
@ -97,18 +98,20 @@ impl CachedUserInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInformation for CachedUserInfo {
|
||||
fn is_superuser(&self, userid: &str) -> bool {
|
||||
impl CachedUserInfo {
|
||||
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
||||
userid == "root@pam"
|
||||
}
|
||||
|
||||
fn is_group_member(&self, _userid: &str, _group: &str) -> bool {
|
||||
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
|
||||
pub fn lookup_privs(&self, userid: &Userid, path: &[&str]) -> u64 {
|
||||
|
||||
if self.is_superuser(userid) { return ROLE_ADMIN; }
|
||||
if self.is_superuser(userid) {
|
||||
return ROLE_ADMIN;
|
||||
}
|
||||
|
||||
let roles = self.acl_tree.roles(userid, path);
|
||||
let mut privs: u64 = 0;
|
||||
@ -120,3 +123,20 @@ impl UserInformation for CachedUserInfo {
|
||||
privs
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInformation for CachedUserInfo {
|
||||
fn is_superuser(&self, userid: &str) -> bool {
|
||||
userid == "root@pam"
|
||||
}
|
||||
|
||||
fn is_group_member(&self, _userid: &str, _group: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
|
||||
match userid.parse::<Userid>() {
|
||||
Ok(userid) => Self::lookup_privs(self, &userid, path),
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
263
src/config/jobstate.rs
Normal file
263
src/config/jobstate.rs
Normal file
@ -0,0 +1,263 @@
|
||||
//! Generic JobState handling
|
||||
//!
|
||||
//! A 'Job' can have 3 states
|
||||
//! - Created, when a schedule was created but never executed
|
||||
//! - Started, when a job is running right now
|
||||
//! - Finished, when a job was running in the past
|
||||
//!
|
||||
//! and is identified by 2 values: jobtype and jobname (e.g. 'syncjob' and 'myfirstsyncjob')
|
||||
//!
|
||||
//! This module Provides 2 helper structs to handle those coniditons
|
||||
//! 'Job' which handles locking and writing to a file
|
||||
//! 'JobState' which is the actual state
|
||||
//!
|
||||
//! an example usage would be
|
||||
//! ```no_run
|
||||
//! # use anyhow::{bail, Error};
|
||||
//! # use proxmox_backup::server::TaskState;
|
||||
//! # use proxmox_backup::config::jobstate::*;
|
||||
//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
|
||||
//! # fn code() -> Result<(), Error> {
|
||||
//! // locks the correct file under /var/lib
|
||||
//! // or fails if someone else holds the lock
|
||||
//! let mut job = match Job::new("jobtype", "jobname") {
|
||||
//! Ok(job) => job,
|
||||
//! Err(err) => bail!("could not lock jobstate"),
|
||||
//! };
|
||||
//!
|
||||
//! // job holds the lock, we can start it
|
||||
//! job.start("someupid")?;
|
||||
//! // do something
|
||||
//! let task_state = some_code();
|
||||
//! job.finish(task_state)?;
|
||||
//!
|
||||
//! // release the lock
|
||||
//! drop(job);
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//!
|
||||
//! ```
|
||||
use std::fs::File;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use proxmox::tools::fs::{
|
||||
create_path, file_read_optional_string, open_file_locked, replace_file, CreateOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Represents the State of a specific Job
|
||||
pub enum JobState {
|
||||
/// A job was created at 'time', but never started/finished
|
||||
Created { time: i64 },
|
||||
/// The Job was last started in 'upid',
|
||||
Started { upid: String },
|
||||
/// The Job was last started in 'upid', which finished with 'state'
|
||||
Finished { upid: String, state: TaskState },
|
||||
}
|
||||
|
||||
/// Represents a Job and holds the correct lock
|
||||
pub struct Job {
|
||||
jobtype: String,
|
||||
jobname: String,
|
||||
/// The State of the job
|
||||
pub state: JobState,
|
||||
_lock: File,
|
||||
}
|
||||
|
||||
const JOB_STATE_BASEDIR: &str = "/var/lib/proxmox-backup/jobstates";
|
||||
|
||||
/// Create jobstate stat dir with correct permission
|
||||
pub fn create_jobstate_dir() -> Result<(), Error> {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let opts = CreateOptions::new()
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid);
|
||||
|
||||
create_path(JOB_STATE_BASEDIR, None, Some(opts))
|
||||
.map_err(|err: Error| format_err!("unable to create rrdb stat dir - {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_path(jobtype: &str, jobname: &str) -> PathBuf {
|
||||
let mut path = PathBuf::from(JOB_STATE_BASEDIR);
|
||||
path.push(format!("{}-{}.json", jobtype, jobname));
|
||||
path
|
||||
}
|
||||
|
||||
fn get_lock<P>(path: P) -> Result<File, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let mut path = path.as_ref().to_path_buf();
|
||||
path.set_extension("lck");
|
||||
let lock = open_file_locked(&path, Duration::new(10, 0))?;
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
Ok(lock)
|
||||
}
|
||||
|
||||
/// Removes the statefile of a job, this is useful if we delete a job
|
||||
pub fn remove_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
|
||||
let mut path = get_path(jobtype, jobname);
|
||||
let _lock = get_lock(&path)?;
|
||||
std::fs::remove_file(&path).map_err(|err| {
|
||||
format_err!(
|
||||
"cannot remove statefile for {} - {}: {}",
|
||||
jobtype,
|
||||
jobname,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
path.set_extension("lck");
|
||||
// ignore errors
|
||||
let _ = std::fs::remove_file(&path).map_err(|err| {
|
||||
format_err!(
|
||||
"cannot remove lockfile for {} - {}: {}",
|
||||
jobtype,
|
||||
jobname,
|
||||
err
|
||||
)
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates the statefile with the state 'Created'
|
||||
/// overwrites if it exists already
|
||||
pub fn create_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
|
||||
let mut job = Job::new(jobtype, jobname)?;
|
||||
job.write_state()
|
||||
}
|
||||
|
||||
/// Returns the last run time of a job by reading the statefile
|
||||
/// Note that this is not locked
|
||||
pub fn last_run_time(jobtype: &str, jobname: &str) -> Result<i64, Error> {
|
||||
match JobState::load(jobtype, jobname)? {
|
||||
JobState::Created { time } => Ok(time),
|
||||
JobState::Started { upid } | JobState::Finished { upid, .. } => {
|
||||
let upid: UPID = upid
|
||||
.parse()
|
||||
.map_err(|err| format_err!("could not parse upid from state: {}", err))?;
|
||||
Ok(upid.starttime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobState {
|
||||
/// Loads and deserializes the jobstate from type and name.
|
||||
/// When the loaded state indicates a started UPID,
|
||||
/// we go and check if it has already stopped, and
|
||||
/// returning the correct state.
|
||||
///
|
||||
/// This does not update the state in the file.
|
||||
pub fn load(jobtype: &str, jobname: &str) -> Result<Self, Error> {
|
||||
if let Some(state) = file_read_optional_string(get_path(jobtype, jobname))? {
|
||||
match serde_json::from_str(&state)? {
|
||||
JobState::Started { upid } => {
|
||||
let parsed: UPID = upid
|
||||
.parse()
|
||||
.map_err(|err| format_err!("error parsing upid: {}", err))?;
|
||||
|
||||
if !worker_is_active_local(&parsed) {
|
||||
let state = upid_read_status(&parsed)
|
||||
.map_err(|err| format_err!("error reading upid log status: {}", err))?;
|
||||
|
||||
Ok(JobState::Finished { upid, state })
|
||||
} else {
|
||||
Ok(JobState::Started { upid })
|
||||
}
|
||||
}
|
||||
other => Ok(other),
|
||||
}
|
||||
} else {
|
||||
Ok(JobState::Created {
|
||||
time: epoch_now_u64()? as i64 - 30,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Job {
|
||||
/// Creates a new instance of a job with the correct lock held
|
||||
/// (will be hold until the job is dropped again).
|
||||
///
|
||||
/// This does not load the state from the file, to do that,
|
||||
/// 'load' must be called
|
||||
pub fn new(jobtype: &str, jobname: &str) -> Result<Self, Error> {
|
||||
let path = get_path(jobtype, jobname);
|
||||
|
||||
let _lock = get_lock(&path)?;
|
||||
|
||||
Ok(Self {
|
||||
jobtype: jobtype.to_string(),
|
||||
jobname: jobname.to_string(),
|
||||
state: JobState::Created {
|
||||
time: epoch_now_u64()? as i64,
|
||||
},
|
||||
_lock,
|
||||
})
|
||||
}
|
||||
|
||||
/// Start the job and update the statefile accordingly
|
||||
/// Fails if the job was already started
|
||||
pub fn start(&mut self, upid: &str) -> Result<(), Error> {
|
||||
match self.state {
|
||||
JobState::Started { .. } => {
|
||||
bail!("cannot start job that is started!");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
self.state = JobState::Started {
|
||||
upid: upid.to_string(),
|
||||
};
|
||||
|
||||
self.write_state()
|
||||
}
|
||||
|
||||
/// Finish the job and update the statefile accordingly with the given taskstate
|
||||
/// Fails if the job was not yet started
|
||||
pub fn finish(&mut self, state: TaskState) -> Result<(), Error> {
|
||||
let upid = match &self.state {
|
||||
JobState::Created { .. } => bail!("cannot finish when not started"),
|
||||
JobState::Started { upid } => upid,
|
||||
JobState::Finished { upid, .. } => upid,
|
||||
}
|
||||
.to_string();
|
||||
|
||||
self.state = JobState::Finished { upid, state };
|
||||
|
||||
self.write_state()
|
||||
}
|
||||
|
||||
pub fn jobtype(&self) -> &str {
|
||||
&self.jobtype
|
||||
}
|
||||
|
||||
pub fn jobname(&self) -> &str {
|
||||
&self.jobname
|
||||
}
|
||||
|
||||
fn write_state(&mut self) -> Result<(), Error> {
|
||||
let serialized = serde_json::to_string(&self.state)?;
|
||||
let path = get_path(&self.jobtype, &self.jobname);
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
|
||||
// set the correct owner/group/permissions while saving file
|
||||
// owner(rw) = backup, group(r)= backup
|
||||
let options = CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid);
|
||||
|
||||
replace_file(path, serialized.as_bytes(), options)
|
||||
}
|
||||
}
|
@ -600,4 +600,101 @@ mod test {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_1() -> Result<(), Error> {
|
||||
let input = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
# comment\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
iface ens21 inet manual\n\
|
||||
iface ens22 inet manual\n";
|
||||
|
||||
let mut parser = NetworkParser::new(&input.as_bytes()[..]);
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
#comment\n\
|
||||
\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
\n\
|
||||
iface ens21 inet manual\n\
|
||||
\n\
|
||||
iface ens22 inet manual\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_2() -> Result<(), Error> {
|
||||
// Adapted from bug 2926
|
||||
let input = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n";
|
||||
|
||||
let mut parser = NetworkParser::new(&input.as_bytes()[..]);
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -210,9 +210,7 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
self.eat(Token::Newline)?;
|
||||
continue;
|
||||
}
|
||||
Token::Newline => break,
|
||||
Token::EOF => break,
|
||||
unexpected => bail!("unexpected token {:?} (expected iface attribute)", unexpected),
|
||||
_ => break,
|
||||
}
|
||||
|
||||
match self.peek()? {
|
||||
|
@ -40,7 +40,7 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: REMOTE_PASSWORD_SCHEMA,
|
||||
@ -58,7 +58,7 @@ pub struct Remote {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
pub host: String,
|
||||
pub userid: String,
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if="String::is_empty")]
|
||||
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||
pub password: String,
|
||||
|
@ -56,7 +56,7 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
@ -87,7 +87,7 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// User properties.
|
||||
pub struct User {
|
||||
pub userid: String,
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
@ -109,7 +109,7 @@ fn init() -> SectionConfig {
|
||||
};
|
||||
|
||||
let plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), obj_schema);
|
||||
let mut config = SectionConfig::new(&PROXMOX_USER_ID_SCHEMA);
|
||||
let mut config = SectionConfig::new(&Userid::API_SCHEMA);
|
||||
|
||||
config.register_plugin(plugin);
|
||||
|
||||
@ -129,7 +129,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
if data.sections.get("root@pam").is_none() {
|
||||
let user: User = User {
|
||||
userid: "root@pam".to_string(),
|
||||
userid: Userid::root_userid().clone(),
|
||||
comment: Some("Superuser".to_string()),
|
||||
enable: None,
|
||||
expire: None,
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{self, Read, Write};
|
||||
@ -259,34 +258,40 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
oflags: OFlag,
|
||||
existed: bool,
|
||||
) -> Result<Option<Fd>, Error> {
|
||||
match Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
oflags,
|
||||
Mode::empty(),
|
||||
) {
|
||||
Ok(fd) => Ok(Some(fd)),
|
||||
Err(nix::Error::Sys(Errno::ENOENT)) => {
|
||||
if existed {
|
||||
self.report_vanished_file()?;
|
||||
// common flags we always want to use:
|
||||
let oflags = oflags | OFlag::O_CLOEXEC | OFlag::O_NOCTTY;
|
||||
|
||||
let mut noatime = OFlag::O_NOATIME;
|
||||
loop {
|
||||
return match Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
oflags | noatime,
|
||||
Mode::empty(),
|
||||
) {
|
||||
Ok(fd) => Ok(Some(fd)),
|
||||
Err(nix::Error::Sys(Errno::ENOENT)) => {
|
||||
if existed {
|
||||
self.report_vanished_file()?;
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Ok(None)
|
||||
Err(nix::Error::Sys(Errno::EACCES)) => {
|
||||
writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
||||
Ok(None)
|
||||
}
|
||||
Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
|
||||
// Retry without O_NOATIME:
|
||||
noatime = OFlag::empty();
|
||||
continue;
|
||||
}
|
||||
Err(other) => Err(Error::from(other)),
|
||||
}
|
||||
Err(nix::Error::Sys(Errno::EACCES)) => {
|
||||
writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
||||
Ok(None)
|
||||
}
|
||||
Err(other) => Err(Error::from(other)),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
|
||||
let fd = self.open_file(
|
||||
parent,
|
||||
c_str!(".pxarexclude"),
|
||||
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||
false,
|
||||
)?;
|
||||
let fd = self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)?;
|
||||
|
||||
let old_pattern_count = self.patterns.len();
|
||||
|
||||
@ -480,7 +485,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
let fd = self.open_file(
|
||||
parent,
|
||||
c_file_name,
|
||||
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW,
|
||||
true,
|
||||
)?;
|
||||
|
||||
@ -696,16 +701,16 @@ fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Resu
|
||||
// required for some of these
|
||||
let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
|
||||
|
||||
let mtime = u64::try_from(stat.st_mtime * 1_000_000_000 + stat.st_mtime_nsec)
|
||||
.map_err(|_| format_err!("file with negative mtime"))?;
|
||||
|
||||
let mut meta = Metadata {
|
||||
stat: pxar::Stat {
|
||||
mode: u64::from(stat.st_mode),
|
||||
flags: 0,
|
||||
uid: stat.st_uid,
|
||||
gid: stat.st_gid,
|
||||
mtime,
|
||||
mtime: pxar::format::StatxTimestamp {
|
||||
secs: stat.st_mtime,
|
||||
nanos: stat.st_mtime_nsec as u32,
|
||||
},
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
@ -6,6 +6,7 @@ use std::io;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
@ -20,16 +21,18 @@ use proxmox::c_result;
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::metadata;
|
||||
use crate::pxar::Flags;
|
||||
|
||||
pub fn extract_archive<T, F>(
|
||||
mut decoder: pxar::decoder::Decoder<T>,
|
||||
destination: &Path,
|
||||
match_list: &[MatchEntry],
|
||||
extract_match_default: bool,
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
mut callback: F,
|
||||
on_error: Option<Box<dyn FnMut(Error) -> Result<(), Error> + Send>>,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
@ -68,8 +71,13 @@ where
|
||||
feature_flags,
|
||||
);
|
||||
|
||||
if let Some(on_error) = on_error {
|
||||
extractor.on_error(on_error);
|
||||
}
|
||||
|
||||
let mut match_stack = Vec::new();
|
||||
let mut current_match = true;
|
||||
let mut err_path_stack = vec![OsString::from("/")];
|
||||
let mut current_match = extract_match_default;
|
||||
while let Some(entry) = decoder.next() {
|
||||
use pxar::EntryKind;
|
||||
|
||||
@ -87,6 +95,8 @@ where
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
extractor.set_path(entry.path().as_os_str().to_owned());
|
||||
|
||||
let match_result = match_list.matches(
|
||||
entry.path().as_os_str().as_bytes(),
|
||||
Some(metadata.file_type() as u32),
|
||||
@ -102,17 +112,32 @@ where
|
||||
callback(entry.path());
|
||||
|
||||
let create = current_match && match_result != Some(MatchType::Exclude);
|
||||
extractor.enter_directory(file_name_os.to_owned(), metadata.clone(), create)?;
|
||||
extractor
|
||||
.enter_directory(file_name_os.to_owned(), metadata.clone(), create)
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
// We're starting a new directory, push our old matching state and replace it with
|
||||
// our new one:
|
||||
match_stack.push(current_match);
|
||||
current_match = did_match;
|
||||
|
||||
// When we hit the goodbye table we'll try to apply metadata to the directory, but
|
||||
// the Goodbye entry will not contain the path, so push it to our path stack for
|
||||
// error messages:
|
||||
err_path_stack.push(extractor.clone_path());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(_, EntryKind::GoodbyeTable) => {
|
||||
// go up a directory
|
||||
|
||||
extractor.set_path(err_path_stack.pop().ok_or_else(|| {
|
||||
format_err!(
|
||||
"error at entry {:?}: unexpected end of directory",
|
||||
file_name_os
|
||||
)
|
||||
})?);
|
||||
|
||||
extractor
|
||||
.leave_directory()
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
@ -181,6 +206,13 @@ pub(crate) struct Extractor {
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
dir_stack: PxarDirStack,
|
||||
|
||||
/// For better error output we need to track the current path in the Extractor state.
|
||||
current_path: Arc<Mutex<OsString>>,
|
||||
|
||||
/// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
|
||||
/// continue extracting or the passed error as `Err` to bail out.
|
||||
on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>,
|
||||
}
|
||||
|
||||
impl Extractor {
|
||||
@ -195,9 +227,30 @@ impl Extractor {
|
||||
dir_stack: PxarDirStack::new(root_dir, metadata),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
current_path: Arc::new(Mutex::new(OsString::new())),
|
||||
on_error: Box::new(|err| Err(err)),
|
||||
}
|
||||
}
|
||||
|
||||
/// We call this on errors. The error will be reformatted to include `current_path`. The
|
||||
/// callback should decide whether this error was fatal (simply return it) to bail out early,
|
||||
/// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
|
||||
/// extracting.
|
||||
pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
|
||||
let path = Arc::clone(&self.current_path);
|
||||
self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
|
||||
on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
|
||||
});
|
||||
}
|
||||
|
||||
pub fn set_path(&mut self, path: OsString) {
|
||||
*self.current_path.lock().unwrap() = path;
|
||||
}
|
||||
|
||||
pub fn clone_path(&self) -> OsString {
|
||||
self.current_path.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
/// When encountering a directory during extraction, this is used to keep track of it. If
|
||||
/// `create` is true it is immediately created and its metadata will be updated once we leave
|
||||
/// it. If `create` is false it will only be created if it is going to have any actual content.
|
||||
@ -216,7 +269,7 @@ impl Extractor {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When done with a directory we need to make sure we're
|
||||
/// When done with a directory we can apply its metadata if it has been created.
|
||||
pub fn leave_directory(&mut self) -> Result<(), Error> {
|
||||
let dir = self
|
||||
.dir_stack
|
||||
@ -230,6 +283,7 @@ impl Extractor {
|
||||
dir.metadata(),
|
||||
fd,
|
||||
&CString::new(dir.file_name().as_bytes())?,
|
||||
&mut self.on_error,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
|
||||
}
|
||||
@ -255,14 +309,16 @@ impl Extractor {
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
nix::unistd::symlinkat(link, Some(parent), file_name)?;
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
metadata::apply_at(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
parent,
|
||||
file_name,
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn extract_hardlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
|
||||
crate::pxar::tools::assert_relative_path(link)?;
|
||||
|
||||
let parent = self.parent_fd()?;
|
||||
@ -306,7 +362,13 @@ impl Extractor {
|
||||
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
|
||||
.map_err(|err| format_err!("failed to create device node: {}", err))?;
|
||||
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
metadata::apply_at(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
parent,
|
||||
file_name,
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn extract_file(
|
||||
@ -318,16 +380,23 @@ impl Extractor {
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||
};
|
||||
|
||||
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||
metadata::apply_initial_flags(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
&mut self.on_error,
|
||||
)?;
|
||||
|
||||
let extracted = io::copy(&mut *contents, &mut file)
|
||||
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||
@ -335,7 +404,13 @@ impl Extractor {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
file_name,
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
|
||||
@ -347,16 +422,23 @@ impl Extractor {
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = tokio::fs::File::from_std(unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||
});
|
||||
|
||||
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||
metadata::apply_initial_flags(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
&mut self.on_error,
|
||||
)?;
|
||||
|
||||
let extracted = tokio::io::copy(&mut *contents, &mut file)
|
||||
.await
|
||||
@ -365,6 +447,12 @@ impl Extractor {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
file_name,
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -673,11 +673,6 @@ fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
let time = i64::try_from(metadata.stat.mtime)
|
||||
.map_err(|_| format_err!("mtime does not fit into a signed 64 bit integer"))?;
|
||||
let sec = time / 1_000_000_000;
|
||||
let nsec = time % 1_000_000_000;
|
||||
|
||||
let mut stat: libc::stat = unsafe { mem::zeroed() };
|
||||
stat.st_ino = inode;
|
||||
stat.st_nlink = nlink;
|
||||
@ -687,11 +682,11 @@ fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
|
||||
.map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
|
||||
stat.st_uid = metadata.stat.uid;
|
||||
stat.st_gid = metadata.stat.gid;
|
||||
stat.st_atime = sec;
|
||||
stat.st_atime_nsec = nsec;
|
||||
stat.st_mtime = sec;
|
||||
stat.st_mtime_nsec = nsec;
|
||||
stat.st_ctime = sec;
|
||||
stat.st_ctime_nsec = nsec;
|
||||
stat.st_atime = metadata.stat.mtime.secs;
|
||||
stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
|
||||
stat.st_mtime = metadata.stat.mtime.secs;
|
||||
stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
|
||||
stat.st_ctime = metadata.stat.mtime.secs;
|
||||
stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
|
||||
Ok(stat)
|
||||
}
|
||||
|
@ -37,26 +37,20 @@ fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<()
|
||||
}
|
||||
}
|
||||
|
||||
fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
|
||||
fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
|
||||
// restore mtime
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
const NANOS_PER_SEC: i64 = 1_000_000_000;
|
||||
|
||||
let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
|
||||
let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
[
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_OMIT,
|
||||
},
|
||||
libc::timespec {
|
||||
tv_sec: sec,
|
||||
tv_nsec: nsec,
|
||||
tv_sec: mtime.secs,
|
||||
tv_nsec: mtime.nanos as _,
|
||||
},
|
||||
];
|
||||
|
||||
times
|
||||
]
|
||||
}
|
||||
|
||||
//
|
||||
@ -68,6 +62,7 @@ pub fn apply_at(
|
||||
metadata: &Metadata,
|
||||
parent: RawFd,
|
||||
file_name: &CStr,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let fd = proxmox::tools::fd::Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
@ -76,20 +71,32 @@ pub fn apply_at(
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||
apply(flags, metadata, fd.as_raw_fd(), file_name, on_error)
|
||||
}
|
||||
|
||||
pub fn apply_initial_flags(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
fd: RawFd,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
|
||||
apply_chattr(fd, entry_flags.to_initial_chattr(), flags.to_initial_chattr())?;
|
||||
apply_chattr(
|
||||
fd,
|
||||
entry_flags.to_initial_chattr(),
|
||||
flags.to_initial_chattr(),
|
||||
)
|
||||
.or_else(on_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||
pub fn apply(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
fd: RawFd,
|
||||
file_name: &CStr,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||
|
||||
unsafe {
|
||||
@ -101,15 +108,18 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
||||
))
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to set ownership: {}", err))?;
|
||||
.map_err(|err| format_err!("failed to set ownership: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
let mut skip_xattrs = false;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
|
||||
.or_else(&mut *on_error)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
|
||||
apply_acls(flags, &c_proc_path, metadata)
|
||||
.map_err(|err| format_err!("failed to apply acls: {}", err))?;
|
||||
apply_quota_project_id(flags, fd, metadata)?;
|
||||
.map_err(|err| format_err!("failed to apply acls: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
|
||||
|
||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||
// affects times.
|
||||
@ -119,31 +129,32 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to change file mode: {}", err))?;
|
||||
.map_err(|err| format_err!("failed to change file mode: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
apply_flags(flags, fd, metadata.stat.flags)?;
|
||||
apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
let res = c_result!(unsafe {
|
||||
libc::utimensat(
|
||||
libc::AT_FDCWD,
|
||||
c_proc_path.as_ptr(),
|
||||
nsec_to_update_timespec(metadata.stat.mtime).as_ptr(),
|
||||
timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
|
||||
0,
|
||||
)
|
||||
});
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EPERM) => {
|
||||
println!(
|
||||
Err(err) => {
|
||||
on_error(format_err!(
|
||||
"failed to restore mtime attribute on {:?}: {}",
|
||||
file_name, err
|
||||
);
|
||||
file_name,
|
||||
err
|
||||
))?;
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -195,7 +206,7 @@ fn apply_xattrs(
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||
println!("skipping invalid xattr named {:?}", xattr.name());
|
||||
eprintln!("skipping invalid xattr named {:?}", xattr.name());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -120,8 +120,7 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = meta.mtime_as_duration();
|
||||
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
|
||||
|
||||
let (size, link) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||
@ -148,8 +147,7 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = meta.mtime_as_duration();
|
||||
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
|
||||
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
|
@ -44,7 +44,7 @@ impl <E: RpcEnvironment + Clone> H2Service<E> {
|
||||
|
||||
let (path, components) = match tools::normalize_uri_path(parts.uri.path()) {
|
||||
Ok((p,c)) => (p, c),
|
||||
Err(err) => return future::err(http_err!(BAD_REQUEST, err.to_string())).boxed(),
|
||||
Err(err) => return future::err(http_err!(BAD_REQUEST, "{}", err)).boxed(),
|
||||
};
|
||||
|
||||
self.debug(format!("{} {}", method, path));
|
||||
@ -55,7 +55,7 @@ impl <E: RpcEnvironment + Clone> H2Service<E> {
|
||||
|
||||
match self.router.find_method(&components, method, &mut uri_param) {
|
||||
None => {
|
||||
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||
let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
|
||||
future::ok((formatter.format_error)(err)).boxed()
|
||||
}
|
||||
Some(api_method) => {
|
||||
|
@ -27,7 +27,9 @@ use super::formatter::*;
|
||||
use super::ApiConfig;
|
||||
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::Ticket;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
extern "C" { fn tzset(); }
|
||||
@ -204,13 +206,13 @@ async fn get_request_parameters<S: 'static + BuildHasher + Send>(
|
||||
}
|
||||
|
||||
let body = req_body
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("Promlems reading request body: {}", err)))
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "Promlems reading request body: {}", err))
|
||||
.try_fold(Vec::new(), |mut acc, chunk| async move {
|
||||
if acc.len() + chunk.len() < 64*1024 { //fimxe: max request body size?
|
||||
acc.extend_from_slice(&*chunk);
|
||||
Ok(acc)
|
||||
} else {
|
||||
Err(http_err!(BAD_REQUEST, "Request body too large".to_string()))
|
||||
Err(http_err!(BAD_REQUEST, "Request body too large"))
|
||||
}
|
||||
}).await?;
|
||||
|
||||
@ -311,10 +313,10 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
fn get_index(username: Option<String>, token: Option<String>, api: &Arc<ApiConfig>, parts: Parts) -> Response<Body> {
|
||||
fn get_index(userid: Option<Userid>, token: Option<String>, api: &Arc<ApiConfig>, parts: Parts) -> Response<Body> {
|
||||
|
||||
let nodename = proxmox::tools::nodename();
|
||||
let username = username.unwrap_or_else(|| String::from(""));
|
||||
let userid = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
|
||||
|
||||
let token = token.unwrap_or_else(|| String::from(""));
|
||||
|
||||
@ -333,7 +335,7 @@ fn get_index(username: Option<String>, token: Option<String>, api: &Arc<ApiConfi
|
||||
|
||||
let data = json!({
|
||||
"NodeName": nodename,
|
||||
"UserName": username,
|
||||
"UserName": userid,
|
||||
"CSRFPreventionToken": token,
|
||||
"debug": debug,
|
||||
});
|
||||
@ -392,12 +394,12 @@ async fn simple_static_file_download(filename: PathBuf) -> Result<Response<Body>
|
||||
|
||||
let mut file = File::open(filename)
|
||||
.await
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
file.read_to_end(&mut data)
|
||||
.await
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File read failed: {}", err)))?;
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
|
||||
|
||||
let mut response = Response::new(data.into());
|
||||
response.headers_mut().insert(
|
||||
@ -411,7 +413,7 @@ async fn chuncked_static_file_download(filename: PathBuf) -> Result<Response<Bod
|
||||
|
||||
let file = File::open(filename)
|
||||
.await
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
@ -429,7 +431,7 @@ async fn chuncked_static_file_download(filename: PathBuf) -> Result<Response<Bod
|
||||
async fn handle_static_file_download(filename: PathBuf) -> Result<Response<Body>, Error> {
|
||||
|
||||
let metadata = tokio::fs::metadata(filename.clone())
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File access problems: {}", err)))
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
|
||||
.await?;
|
||||
|
||||
if metadata.len() < 1024*32 {
|
||||
@ -461,33 +463,27 @@ fn check_auth(
|
||||
ticket: &Option<String>,
|
||||
token: &Option<String>,
|
||||
user_info: &CachedUserInfo,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
) -> Result<Userid, Error> {
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
let username = match ticket {
|
||||
Some(ticket) => match tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", &ticket, None, -300, ticket_lifetime) {
|
||||
Ok((_age, Some(username))) => username.to_owned(),
|
||||
Ok((_, None)) => bail!("ticket without username."),
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
None => bail!("missing ticket"),
|
||||
};
|
||||
let ticket = ticket.as_ref().map(String::as_str);
|
||||
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
|
||||
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
if !user_info.is_active_user(&userid) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
if method != hyper::Method::GET {
|
||||
if let Some(token) = token {
|
||||
println!("CSRF prevention token: {:?}", token);
|
||||
verify_csrf_prevention_token(csrf_secret(), &username, &token, -300, ticket_lifetime)?;
|
||||
verify_csrf_prevention_token(csrf_secret(), &userid, &token, -300, ticket_lifetime)?;
|
||||
} else {
|
||||
bail!("missing CSRF prevention token");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(username)
|
||||
Ok(userid)
|
||||
}
|
||||
|
||||
pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Response<Body>, Error> {
|
||||
@ -532,10 +528,10 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
} else {
|
||||
let (ticket, token) = extract_auth_data(&parts.headers);
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(username) => rpcenv.set_user(Some(username)),
|
||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
||||
Err(err) => {
|
||||
// always delay unauthorized calls by 3 seconds (from start of request)
|
||||
let err = http_err!(UNAUTHORIZED, format!("authentication failed - {}", err));
|
||||
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
|
||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
@ -544,13 +540,13 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
|
||||
match api.find_method(&components[2..], method, &mut uri_param) {
|
||||
None => {
|
||||
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||
let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
Some(api_method) => {
|
||||
let user = rpcenv.get_user();
|
||||
if !check_api_permission(api_method.access.permission, user.as_deref(), &uri_param, user_info.as_ref()) {
|
||||
let err = http_err!(FORBIDDEN, format!("permission check failed"));
|
||||
let err = http_err!(FORBIDDEN, "permission check failed");
|
||||
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
@ -580,9 +576,9 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
let (ticket, token) = extract_auth_data(&parts.headers);
|
||||
if ticket != None {
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(username) => {
|
||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
return Ok(get_index(Some(username), Some(new_token), &api, parts));
|
||||
Ok(userid) => {
|
||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
||||
return Ok(get_index(Some(userid), Some(new_token), &api, parts));
|
||||
}
|
||||
_ => {
|
||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||
@ -598,5 +594,5 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
}
|
||||
}
|
||||
|
||||
Err(http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string()))
|
||||
Err(http_err!(NOT_FOUND, "Path '{}' not found.", path))
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ pub struct ServerState {
|
||||
pub shutdown_listeners: BroadcastData<()>,
|
||||
pub last_worker_listeners: BroadcastData<()>,
|
||||
pub worker_count: usize,
|
||||
pub task_count: usize,
|
||||
pub internal_task_count: usize,
|
||||
pub reload_request: bool,
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ lazy_static! {
|
||||
shutdown_listeners: BroadcastData::new(),
|
||||
last_worker_listeners: BroadcastData::new(),
|
||||
worker_count: 0,
|
||||
task_count: 0,
|
||||
internal_task_count: 0,
|
||||
reload_request: false,
|
||||
});
|
||||
}
|
||||
@ -111,7 +111,7 @@ pub fn set_worker_count(count: usize) {
|
||||
pub fn check_last_worker() {
|
||||
let mut data = SERVER_STATE.lock().unwrap();
|
||||
|
||||
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0 && data.task_count == 0) { return; }
|
||||
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0 && data.internal_task_count == 0) { return; }
|
||||
|
||||
data.last_worker_listeners.notify_listeners(Ok(()));
|
||||
}
|
||||
@ -125,15 +125,15 @@ where
|
||||
T::Output: Send + 'static,
|
||||
{
|
||||
let mut data = SERVER_STATE.lock().unwrap();
|
||||
data.task_count += 1;
|
||||
data.internal_task_count += 1;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _ = tokio::spawn(task).await; // ignore errors
|
||||
|
||||
{ // drop mutex
|
||||
let mut data = SERVER_STATE.lock().unwrap();
|
||||
if data.task_count > 0 {
|
||||
data.task_count -= 1;
|
||||
if data.internal_task_count > 0 {
|
||||
data.internal_task_count -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,23 +1,26 @@
|
||||
use anyhow::{bail, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use chrono::Local;
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::Local;
|
||||
|
||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
/// Unique Process/Task Identifier
|
||||
///
|
||||
/// We use this to uniquely identify worker task. UPIDs have a short
|
||||
/// string repesentaion, which gives additional information about the
|
||||
/// type of the task. for example:
|
||||
/// ```text
|
||||
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{username}:
|
||||
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}:
|
||||
/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam:
|
||||
/// ```
|
||||
/// Please note that we use tokio, so a single thread can run multiple
|
||||
/// tasks.
|
||||
// #[api] - manually implemented API type
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UPID {
|
||||
/// The Unix PID
|
||||
@ -33,15 +36,38 @@ pub struct UPID {
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The user who started the task
|
||||
pub username: String,
|
||||
pub userid: Userid,
|
||||
/// The node name.
|
||||
pub node: String,
|
||||
}
|
||||
|
||||
proxmox::forward_serialize_to_display!(UPID);
|
||||
proxmox::forward_deserialize_to_from_str!(UPID);
|
||||
|
||||
const_regex! {
|
||||
pub PROXMOX_UPID_REGEX = concat!(
|
||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<userid>[^:\s]+):$"
|
||||
);
|
||||
}
|
||||
|
||||
pub const PROXMOX_UPID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX);
|
||||
|
||||
impl UPID {
|
||||
pub const API_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier")
|
||||
.min_length("UPID:N:12345678:12345678:12345678:::".len())
|
||||
.max_length(128) // arbitrary
|
||||
.format(&PROXMOX_UPID_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Create a new UPID
|
||||
pub fn new(worker_type: &str, worker_id: Option<String>, username: &str) -> Result<Self, Error> {
|
||||
pub fn new(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
userid: Userid,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let pid = unsafe { libc::getpid() };
|
||||
|
||||
@ -67,7 +93,7 @@ impl UPID {
|
||||
task_id,
|
||||
worker_type: worker_type.to_owned(),
|
||||
worker_id,
|
||||
username: username.to_owned(),
|
||||
userid,
|
||||
node: proxmox::tools::nodename().to_owned(),
|
||||
})
|
||||
}
|
||||
@ -86,17 +112,7 @@ impl std::str::FromStr for UPID {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
|
||||
lazy_static! {
|
||||
static ref REGEX: Regex = Regex::new(concat!(
|
||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<username>[^:\s]+):$"
|
||||
)).unwrap();
|
||||
}
|
||||
|
||||
if let Some(cap) = REGEX.captures(s) {
|
||||
|
||||
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||
Ok(UPID {
|
||||
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||
@ -104,7 +120,7 @@ impl std::str::FromStr for UPID {
|
||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||
worker_type: cap["wtype"].to_string(),
|
||||
worker_id: if cap["wid"].is_empty() { None } else { Some(cap["wid"].to_string()) },
|
||||
username: cap["username"].to_string(),
|
||||
userid: cap["userid"].parse()?,
|
||||
node: cap["node"].to_string(),
|
||||
})
|
||||
} else {
|
||||
@ -124,6 +140,6 @@ impl std::fmt::Display for UPID {
|
||||
// more that 8 characters for pstart
|
||||
|
||||
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.username)
|
||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.userid)
|
||||
}
|
||||
}
|
||||
|
@ -11,15 +11,17 @@ use futures::*;
|
||||
use lazy_static::lazy_static;
|
||||
use nix::unistd::Pid;
|
||||
use serde_json::{json, Value};
|
||||
use serde::{Serialize, Deserialize};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use proxmox::sys::linux::procfs;
|
||||
use proxmox::try_block;
|
||||
use proxmox::tools::fs::{create_path, replace_file, CreateOptions};
|
||||
use proxmox::tools::fs::{create_path, open_file_locked, replace_file, CreateOptions};
|
||||
|
||||
use super::UPID;
|
||||
|
||||
use crate::tools::FileLogger;
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||
@ -154,7 +156,7 @@ pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
|
||||
super::send_command(socketname, cmd).map_ok(|_| ()).await
|
||||
}
|
||||
|
||||
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<(i64, String)>), Error> {
|
||||
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
|
||||
|
||||
let data = line.splitn(3, ' ').collect::<Vec<&str>>();
|
||||
|
||||
@ -164,7 +166,8 @@ fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<(i64, St
|
||||
1 => Ok((data[0].to_owned(), data[0].parse::<UPID>()?, None)),
|
||||
3 => {
|
||||
let endtime = i64::from_str_radix(data[1], 16)?;
|
||||
Ok((data[0].to_owned(), data[0].parse::<UPID>()?, Some((endtime, data[2].to_owned()))))
|
||||
let state = TaskState::from_endtime_and_message(endtime, data[2])?;
|
||||
Ok((data[0].to_owned(), data[0].parse::<UPID>()?, Some(state)))
|
||||
}
|
||||
_ => bail!("wrong number of components"),
|
||||
}
|
||||
@ -188,9 +191,12 @@ pub fn create_task_log_dirs() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read exits status from task log file
|
||||
pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
||||
let mut status = String::from("unknown");
|
||||
/// Read endtime (time of last log line) and exitstatus from task log file
|
||||
/// If there is not a single line with at valid datetime, we assume the
|
||||
/// starttime to be the endtime
|
||||
pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
||||
let mut endtime = upid.starttime;
|
||||
let mut status = TaskState::Unknown { endtime };
|
||||
|
||||
let path = upid.log_path();
|
||||
|
||||
@ -206,17 +212,19 @@ pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
|
||||
let mut iter = line.splitn(2, ": TASK ");
|
||||
if iter.next() == None { continue; }
|
||||
match iter.next() {
|
||||
let mut iter = line.splitn(2, ": ");
|
||||
if let Some(time_str) = iter.next() {
|
||||
endtime = chrono::DateTime::parse_from_rfc3339(time_str)
|
||||
.map_err(|err| format_err!("cannot parse '{}': {}", time_str, err))?
|
||||
.timestamp();
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
match iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
||||
None => continue,
|
||||
Some(rest) => {
|
||||
if rest == "OK" {
|
||||
status = String::from(rest);
|
||||
} else if rest.starts_with("WARNINGS: ") {
|
||||
status = String::from(rest);
|
||||
} else if rest.starts_with("ERROR: ") {
|
||||
status = String::from(&rest[7..]);
|
||||
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
||||
status = state;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -225,6 +233,76 @@ pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
/// Task State
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum TaskState {
|
||||
/// The Task ended with an undefined state
|
||||
Unknown { endtime: i64 },
|
||||
/// The Task ended and there were no errors or warnings
|
||||
OK { endtime: i64 },
|
||||
/// The Task had 'count' amount of warnings and no errors
|
||||
Warning { count: u64, endtime: i64 },
|
||||
/// The Task ended with the error described in 'message'
|
||||
Error { message: String, endtime: i64 },
|
||||
}
|
||||
|
||||
impl TaskState {
|
||||
pub fn endtime(&self) -> i64 {
|
||||
match *self {
|
||||
TaskState::Unknown { endtime } => endtime,
|
||||
TaskState::OK { endtime } => endtime,
|
||||
TaskState::Warning { endtime, .. } => endtime,
|
||||
TaskState::Error { endtime, .. } => endtime,
|
||||
}
|
||||
}
|
||||
|
||||
fn result_text(&self) -> String {
|
||||
match self {
|
||||
TaskState::Error { message, .. } => format!("TASK ERROR: {}", message),
|
||||
other => format!("TASK {}", other),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_endtime_and_message(endtime: i64, s: &str) -> Result<Self, Error> {
|
||||
if s == "unknown" {
|
||||
Ok(TaskState::Unknown { endtime })
|
||||
} else if s == "OK" {
|
||||
Ok(TaskState::OK { endtime })
|
||||
} else if s.starts_with("WARNINGS: ") {
|
||||
let count: u64 = s[10..].parse()?;
|
||||
Ok(TaskState::Warning{ count, endtime })
|
||||
} else if s.len() > 0 {
|
||||
let message = if s.starts_with("ERROR: ") { &s[7..] } else { s }.to_string();
|
||||
Ok(TaskState::Error{ message, endtime })
|
||||
} else {
|
||||
bail!("unable to parse Task Status '{}'", s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialOrd for TaskState {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.endtime().cmp(&other.endtime()))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for TaskState {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.endtime().cmp(&other.endtime())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TaskState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TaskState::Unknown { .. } => write!(f, "unknown"),
|
||||
TaskState::OK { .. }=> write!(f, "OK"),
|
||||
TaskState::Warning { count, .. } => write!(f, "WARNINGS: {}", count),
|
||||
TaskState::Error { message, .. } => write!(f, "{}", message),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Task details including parsed UPID
|
||||
///
|
||||
/// If there is no `state`, the task is still running.
|
||||
@ -235,9 +313,7 @@ pub struct TaskListInfo {
|
||||
/// UPID string representation
|
||||
pub upid_str: String,
|
||||
/// Task `(endtime, status)` if already finished
|
||||
///
|
||||
/// The `status` is either `unknown`, `OK`, `WARN`, or `ERROR: ...`
|
||||
pub state: Option<(i64, String)>, // endtime, status
|
||||
pub state: Option<TaskState>, // endtime, status
|
||||
}
|
||||
|
||||
// atomically read/update the task list, update status of finished tasks
|
||||
@ -247,7 +323,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
|
||||
let lock = crate::tools::open_file_locked(PROXMOX_BACKUP_TASK_LOCK_FN, std::time::Duration::new(10, 0))?;
|
||||
let lock = open_file_locked(PROXMOX_BACKUP_TASK_LOCK_FN, std::time::Duration::new(10, 0))?;
|
||||
nix::unistd::chown(PROXMOX_BACKUP_TASK_LOCK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
|
||||
let reader = match File::open(PROXMOX_BACKUP_ACTIVE_TASK_FN) {
|
||||
@ -277,14 +353,14 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
None => {
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| String::from("unknown"));
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: Local::now().timestamp() });
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((Local::now().timestamp(), status))
|
||||
upid, upid_str, state: Some(status)
|
||||
});
|
||||
},
|
||||
Some((endtime, status)) => {
|
||||
Some(status) => {
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some((endtime, status))
|
||||
upid, upid_str, state: Some(status)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -320,7 +396,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
|
||||
task_list.sort_unstable_by(|b, a| { // lastest on top
|
||||
match (&a.state, &b.state) {
|
||||
(Some(s1), Some(s2)) => s1.0.cmp(&s2.0),
|
||||
(Some(s1), Some(s2)) => s1.cmp(&s2),
|
||||
(Some(_), None) => std::cmp::Ordering::Less,
|
||||
(None, Some(_)) => std::cmp::Ordering::Greater,
|
||||
_ => a.upid.starttime.cmp(&b.upid.starttime),
|
||||
@ -329,8 +405,8 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
|
||||
let mut raw = String::new();
|
||||
for info in &task_list {
|
||||
if let Some((endtime, status)) = &info.state {
|
||||
raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, endtime, status));
|
||||
if let Some(status) = &info.state {
|
||||
raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, status.endtime(), status));
|
||||
} else {
|
||||
raw.push_str(&info.upid_str);
|
||||
raw.push('\n');
|
||||
@ -394,10 +470,10 @@ impl Drop for WorkerTask {
|
||||
|
||||
impl WorkerTask {
|
||||
|
||||
pub fn new(worker_type: &str, worker_id: Option<String>, username: &str, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
||||
pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
||||
println!("register worker");
|
||||
|
||||
let upid = UPID::new(worker_type, worker_id, username)?;
|
||||
let upid = UPID::new(worker_type, worker_id, userid)?;
|
||||
let task_id = upid.task_id;
|
||||
|
||||
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
||||
@ -442,14 +518,14 @@ impl WorkerTask {
|
||||
pub fn spawn<F, T>(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
username: &str,
|
||||
userid: Userid,
|
||||
to_stdout: bool,
|
||||
f: F,
|
||||
) -> Result<String, Error>
|
||||
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
|
||||
T: Send + 'static + Future<Output = Result<(), Error>>,
|
||||
{
|
||||
let worker = WorkerTask::new(worker_type, worker_id, username, to_stdout)?;
|
||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
||||
let upid_str = worker.upid.to_string();
|
||||
let f = f(worker.clone());
|
||||
tokio::spawn(async move {
|
||||
@ -464,7 +540,7 @@ impl WorkerTask {
|
||||
pub fn new_thread<F>(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
username: &str,
|
||||
userid: Userid,
|
||||
to_stdout: bool,
|
||||
f: F,
|
||||
) -> Result<String, Error>
|
||||
@ -472,9 +548,7 @@ impl WorkerTask {
|
||||
{
|
||||
println!("register worker thread");
|
||||
|
||||
let (p, c) = oneshot::channel::<()>();
|
||||
|
||||
let worker = WorkerTask::new(worker_type, worker_id, username, to_stdout)?;
|
||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
||||
let upid_str = worker.upid.to_string();
|
||||
|
||||
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
|
||||
@ -494,25 +568,30 @@ impl WorkerTask {
|
||||
};
|
||||
|
||||
worker.log_result(&result);
|
||||
p.send(()).unwrap();
|
||||
});
|
||||
|
||||
tokio::spawn(c.map(|_| ()));
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
/// create state from self and a result
|
||||
pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
|
||||
let warn_count = self.data.lock().unwrap().warn_count;
|
||||
|
||||
let endtime = Local::now().timestamp();
|
||||
|
||||
if let Err(err) = result {
|
||||
TaskState::Error { message: err.to_string(), endtime }
|
||||
} else if warn_count > 0 {
|
||||
TaskState::Warning { count: warn_count, endtime }
|
||||
} else {
|
||||
TaskState::OK { endtime }
|
||||
}
|
||||
}
|
||||
|
||||
/// Log task result, remove task from running list
|
||||
pub fn log_result(&self, result: &Result<(), Error>) {
|
||||
|
||||
let warn_count = self.data.lock().unwrap().warn_count;
|
||||
if let Err(err) = result {
|
||||
self.log(&format!("TASK ERROR: {}", err));
|
||||
} else if warn_count > 0 {
|
||||
self.log(format!("TASK WARNINGS: {}", warn_count));
|
||||
} else {
|
||||
self.log("TASK OK");
|
||||
}
|
||||
let state = self.create_state(result);
|
||||
self.log(state.result_text());
|
||||
|
||||
WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
|
||||
let _ = update_active_workers(None);
|
||||
@ -583,4 +662,8 @@ impl WorkerTask {
|
||||
}
|
||||
rx
|
||||
}
|
||||
|
||||
pub fn upid(&self) -> &UPID {
|
||||
&self.upid
|
||||
}
|
||||
}
|
||||
|
85
src/tools.rs
85
src/tools.rs
@ -4,9 +4,9 @@
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::BuildHasher;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead, ErrorKind, Read};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH};
|
||||
@ -31,7 +31,6 @@ pub mod format;
|
||||
pub mod lru_cache;
|
||||
pub mod runtime;
|
||||
pub mod ticket;
|
||||
pub mod timer;
|
||||
pub mod statistics;
|
||||
pub mod systemd;
|
||||
pub mod nom;
|
||||
@ -63,86 +62,6 @@ pub trait BufferedRead {
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
|
||||
}
|
||||
|
||||
/// Directly map a type into a binary buffer. This is mostly useful
|
||||
/// for reading structured data from a byte stream (file). You need to
|
||||
/// make sure that the buffer location does not change, so please
|
||||
/// avoid vec resize while you use such map.
|
||||
///
|
||||
/// This function panics if the buffer is not large enough.
|
||||
pub fn map_struct<T>(buffer: &[u8]) -> Result<&T, Error> {
|
||||
if buffer.len() < ::std::mem::size_of::<T>() {
|
||||
bail!("unable to map struct - buffer too small");
|
||||
}
|
||||
Ok(unsafe { &*(buffer.as_ptr() as *const T) })
|
||||
}
|
||||
|
||||
/// Directly map a type into a mutable binary buffer. This is mostly
|
||||
/// useful for writing structured data into a byte stream (file). You
|
||||
/// need to make sure that the buffer location does not change, so
|
||||
/// please avoid vec resize while you use such map.
|
||||
///
|
||||
/// This function panics if the buffer is not large enough.
|
||||
pub fn map_struct_mut<T>(buffer: &mut [u8]) -> Result<&mut T, Error> {
|
||||
if buffer.len() < ::std::mem::size_of::<T>() {
|
||||
bail!("unable to map struct - buffer too small");
|
||||
}
|
||||
Ok(unsafe { &mut *(buffer.as_ptr() as *mut T) })
|
||||
}
|
||||
|
||||
/// Create a file lock using fntl. This function allows you to specify
|
||||
/// a timeout if you want to avoid infinite blocking.
|
||||
pub fn lock_file<F: AsRawFd>(
|
||||
file: &mut F,
|
||||
exclusive: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<(), Error> {
|
||||
let lockarg = if exclusive {
|
||||
nix::fcntl::FlockArg::LockExclusive
|
||||
} else {
|
||||
nix::fcntl::FlockArg::LockShared
|
||||
};
|
||||
|
||||
let timeout = match timeout {
|
||||
None => {
|
||||
nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
|
||||
return Ok(());
|
||||
}
|
||||
Some(t) => t,
|
||||
};
|
||||
|
||||
// unblock the timeout signal temporarily
|
||||
let _sigblock_guard = timer::unblock_timeout_signal();
|
||||
|
||||
// setup a timeout timer
|
||||
let mut timer = timer::Timer::create(
|
||||
timer::Clock::Realtime,
|
||||
timer::TimerEvent::ThisThreadSignal(timer::SIGTIMEOUT),
|
||||
)?;
|
||||
|
||||
timer.arm(
|
||||
timer::TimerSpec::new()
|
||||
.value(Some(timeout))
|
||||
.interval(Some(Duration::from_millis(10))),
|
||||
)?;
|
||||
|
||||
nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Open or create a lock file (append mode). Then try to
|
||||
/// acquire a lock using `lock_file()`.
|
||||
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
||||
let path = path.as_ref();
|
||||
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => bail!("Unable to open lock {:?} - {}", path, err),
|
||||
};
|
||||
match lock_file(&mut file, true, Some(timeout)) {
|
||||
Ok(_) => Ok(file),
|
||||
Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Split a file into equal sized chunks. The last chunk may be
|
||||
/// smaller. Note: We cannot implement an `Iterator`, because iterators
|
||||
/// cannot return a borrowed buffer ref (we want zero-copy)
|
||||
|
@ -133,7 +133,7 @@ impl DiskManage {
|
||||
})
|
||||
}
|
||||
|
||||
/// Information about file system type and unsed device for a path
|
||||
/// Information about file system type and used device for a path
|
||||
///
|
||||
/// Returns tuple (fs_type, device, mount_source)
|
||||
pub fn find_mounted_device(
|
||||
@ -825,6 +825,10 @@ pub fn get_disks(
|
||||
};
|
||||
}
|
||||
|
||||
if usage == DiskUsageType::Unused && disk.has_holders()? {
|
||||
usage = DiskUsageType::DeviceMapper;
|
||||
}
|
||||
|
||||
let mut status = SmartStatus::Unknown;
|
||||
let mut wearout = None;
|
||||
|
||||
|
@ -8,7 +8,7 @@ use lazy_static::lazy_static;
|
||||
lazy_static!{
|
||||
static ref LVM_UUIDS: HashSet<&'static str> = {
|
||||
let mut set = HashSet::new();
|
||||
set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
|
||||
set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
|
||||
set
|
||||
};
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ pub fn get_smart_data(
|
||||
if let Some(list) = output["nvme_smart_health_information_log"].as_object() {
|
||||
for (name, value) in list {
|
||||
if name == "percentage_used" {
|
||||
// extract wearout from nvme text, allow for decimal values
|
||||
// extract wearout from nvme text, allow for decimal values
|
||||
if let Some(v) = value.as_f64() {
|
||||
if v <= 100.0 {
|
||||
wearout = Some(100.0 - v);
|
||||
|
@ -10,8 +10,8 @@ use super::*;
|
||||
lazy_static!{
|
||||
static ref ZFS_UUIDS: HashSet<&'static str> = {
|
||||
let mut set = HashSet::new();
|
||||
set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
|
||||
set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
|
||||
set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
|
||||
set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
|
||||
set
|
||||
};
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ fn parse_zpool_list_item(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
||||
Ok((i, stat))
|
||||
}
|
||||
|
||||
/// Parse zpool list outout
|
||||
/// Parse zpool list output
|
||||
///
|
||||
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||
/// the zpool list output format is not really defined...
|
||||
|
@ -53,7 +53,7 @@ fn parse_zpool_status_vdev(i: &str) -> IResult<&str, ZFSPoolVDevState> {
|
||||
|
||||
let (i, vdev_name) = notspace1(i)?;
|
||||
|
||||
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // sepecial device
|
||||
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // special device
|
||||
let vdev = ZFSPoolVDevState {
|
||||
name: vdev_name.to_string(),
|
||||
lvl: indent_level,
|
||||
@ -67,6 +67,19 @@ fn parse_zpool_status_vdev(i: &str) -> IResult<&str, ZFSPoolVDevState> {
|
||||
}
|
||||
|
||||
let (i, state) = preceded(multispace1, notspace1)(i)?;
|
||||
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // spares
|
||||
let vdev = ZFSPoolVDevState {
|
||||
name: vdev_name.to_string(),
|
||||
lvl: indent_level,
|
||||
state: Some(state.to_string()),
|
||||
read: None,
|
||||
write: None,
|
||||
cksum: None,
|
||||
msg: None,
|
||||
};
|
||||
return Ok((n, vdev));
|
||||
}
|
||||
|
||||
let (i, read) = preceded(multispace1, parse_u64)(i)?;
|
||||
let (i, write) = preceded(multispace1, parse_u64)(i)?;
|
||||
let (i, cksum) = preceded(multispace1, parse_u64)(i)?;
|
||||
@ -465,3 +478,40 @@ errors: No known data errors
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zpool_status_parser_spares() -> Result<(), Error> {
|
||||
|
||||
let output = r###" pool: tank
|
||||
state: ONLINE
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
tank ONLINE 0 0 0
|
||||
mirror-0 ONLINE 0 0 0
|
||||
/dev/sda1 ONLINE 0 0 0
|
||||
/dev/sda2 ONLINE 0 0 0
|
||||
mirror-1 ONLINE 0 0 0
|
||||
/dev/sda3 ONLINE 0 0 0
|
||||
/dev/sda4 ONLINE 0 0 0
|
||||
logs
|
||||
/dev/sda5 ONLINE 0 0 0
|
||||
spares
|
||||
/dev/sdb AVAIL
|
||||
/dev/sdc AVAIL
|
||||
|
||||
errors: No known data errors
|
||||
"###;
|
||||
|
||||
let key_value_list = parse_zpool_status(&output)?;
|
||||
for (k, v) in key_value_list {
|
||||
println!("{} => {}", k,v);
|
||||
if k == "config" {
|
||||
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||
let _tree = vdev_list_to_tree(&vdev_list);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -80,6 +80,11 @@ impl From<usize> for HumanByte {
|
||||
HumanByte { b: v }
|
||||
}
|
||||
}
|
||||
impl From<u64> for HumanByte {
|
||||
fn from(v: u64) -> Self {
|
||||
HumanByte { b: v as usize }
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn correct_byte_convert() {
|
||||
|
@ -7,10 +7,18 @@ use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use anyhow::{format_err, Error};
|
||||
use nix::dir;
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
use proxmox::sys::error::SysError;
|
||||
|
||||
|
||||
use crate::tools::borrow::Tied;
|
||||
|
||||
pub type DirLockGuard = Dir;
|
||||
|
||||
/// This wraps nix::dir::Entry with the parent directory's file descriptor.
|
||||
pub struct ReadDirEntry {
|
||||
entry: dir::Entry,
|
||||
@ -94,9 +102,6 @@ impl Iterator for ReadDir {
|
||||
/// Create an iterator over sub directory entries.
|
||||
/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path.
|
||||
pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Result<ReadDir> {
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?;
|
||||
let fd = dir.as_raw_fd();
|
||||
let iter = Tied::new(dir, |dir| {
|
||||
@ -259,3 +264,31 @@ impl Default for FSXAttr {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn lock_dir_noblock(
|
||||
path: &std::path::Path,
|
||||
what: &str,
|
||||
would_block_msg: &str,
|
||||
) -> Result<DirLockGuard, Error> {
|
||||
let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
|
||||
.map_err(|err| {
|
||||
format_err!("unable to open {} directory {:?} for locking - {}", what, path, err)
|
||||
})?;
|
||||
|
||||
// acquire in non-blocking mode, no point in waiting here since other
|
||||
// backups could still take a very long time
|
||||
proxmox::tools::fs::lock_file(&mut handle, true, Some(std::time::Duration::from_nanos(0)))
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"unable to acquire lock on {} directory {:?} - {}", what, path,
|
||||
if err.would_block() {
|
||||
String::from(would_block_msg)
|
||||
} else {
|
||||
err.to_string()
|
||||
}
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
@ -83,6 +83,17 @@ pub fn reload_daemon() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn disable_unit(unit: &str) -> Result<(), Error> {
|
||||
|
||||
let mut command = std::process::Command::new("systemctl");
|
||||
command.arg("disable");
|
||||
command.arg(unit);
|
||||
|
||||
crate::tools::run_command(command, None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn enable_unit(unit: &str) -> Result<(), Error> {
|
||||
|
||||
let mut command = std::process::Command::new("systemctl");
|
||||
|
@ -301,6 +301,9 @@ mod test {
|
||||
const THURSDAY_00_00: i64 = make_test_time(0, 0, 0);
|
||||
const THURSDAY_15_00: i64 = make_test_time(0, 15, 0);
|
||||
|
||||
const JUL_31_2020: i64 = 1596153600; // Friday, 2020-07-31 00:00:00
|
||||
const DEC_31_2020: i64 = 1609372800; // Thursday, 2020-12-31 00:00:00
|
||||
|
||||
test_value("*:0", THURSDAY_00_00, THURSDAY_00_00 + HOUR)?;
|
||||
test_value("*:*", THURSDAY_00_00, THURSDAY_00_00 + MIN)?;
|
||||
test_value("*:*:*", THURSDAY_00_00, THURSDAY_00_00 + 1)?;
|
||||
@ -317,6 +320,24 @@ mod test {
|
||||
test_value("sat", THURSDAY_00_00, THURSDAY_00_00 + 2*DAY)?;
|
||||
test_value("sun", THURSDAY_00_00, THURSDAY_00_00 + 3*DAY)?;
|
||||
|
||||
// test month wrapping
|
||||
test_value("sat", JUL_31_2020, JUL_31_2020 + 1*DAY)?;
|
||||
test_value("sun", JUL_31_2020, JUL_31_2020 + 2*DAY)?;
|
||||
test_value("mon", JUL_31_2020, JUL_31_2020 + 3*DAY)?;
|
||||
test_value("tue", JUL_31_2020, JUL_31_2020 + 4*DAY)?;
|
||||
test_value("wed", JUL_31_2020, JUL_31_2020 + 5*DAY)?;
|
||||
test_value("thu", JUL_31_2020, JUL_31_2020 + 6*DAY)?;
|
||||
test_value("fri", JUL_31_2020, JUL_31_2020 + 7*DAY)?;
|
||||
|
||||
// test year wrapping
|
||||
test_value("fri", DEC_31_2020, DEC_31_2020 + 1*DAY)?;
|
||||
test_value("sat", DEC_31_2020, DEC_31_2020 + 2*DAY)?;
|
||||
test_value("sun", DEC_31_2020, DEC_31_2020 + 3*DAY)?;
|
||||
test_value("mon", DEC_31_2020, DEC_31_2020 + 4*DAY)?;
|
||||
test_value("tue", DEC_31_2020, DEC_31_2020 + 5*DAY)?;
|
||||
test_value("wed", DEC_31_2020, DEC_31_2020 + 6*DAY)?;
|
||||
test_value("thu", DEC_31_2020, DEC_31_2020 + 7*DAY)?;
|
||||
|
||||
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
|
||||
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;
|
||||
|
||||
|
@ -123,7 +123,6 @@ impl TmEditor {
|
||||
if self.t.tm_mday < days_in_mon { break; }
|
||||
// Wrap one month
|
||||
self.t.tm_mday -= days_in_mon;
|
||||
self.t.tm_wday += 7 - (days_in_mon % 7);
|
||||
self.t.tm_mon += 1;
|
||||
self.changes.insert(TMChanges::MDAY|TMChanges::WDAY|TMChanges::MON);
|
||||
}
|
||||
|
@ -1,149 +1,321 @@
|
||||
//! Generate and verify Authentication tickets
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use base64;
|
||||
use std::borrow::Cow;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use openssl::pkey::{PKey, Public, Private};
|
||||
use openssl::sign::{Signer, Verifier};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkey::{HasPublic, PKey, Private};
|
||||
use openssl::sign::{Signer, Verifier};
|
||||
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
pub const TICKET_LIFETIME: i64 = 3600*2; // 2 hours
|
||||
pub const TICKET_LIFETIME: i64 = 3600 * 2; // 2 hours
|
||||
|
||||
const TERM_PREFIX: &str = "PBSTERM";
|
||||
pub const TERM_PREFIX: &str = "PBSTERM";
|
||||
|
||||
pub fn assemble_term_ticket(
|
||||
keypair: &PKey<Private>,
|
||||
username: &str,
|
||||
path: &str,
|
||||
port: u16,
|
||||
) -> Result<String, Error> {
|
||||
assemble_rsa_ticket(
|
||||
keypair,
|
||||
TERM_PREFIX,
|
||||
None,
|
||||
Some(&format!("{}{}{}", username, path, port)),
|
||||
)
|
||||
/// Stringified ticket data must not contain colons...
|
||||
const TICKET_ASCIISET: &AsciiSet = &percent_encoding::CONTROLS.add(b':');
|
||||
|
||||
/// An empty type implementing [`ToString`] and [`FromStr`](std::str::FromStr), used for tickets
|
||||
/// with no data.
|
||||
pub struct Empty;
|
||||
|
||||
impl ToString for Empty {
|
||||
fn to_string(&self) -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_term_ticket(
|
||||
keypair: &PKey<Public>,
|
||||
username: &str,
|
||||
path: &str,
|
||||
port: u16,
|
||||
ticket: &str,
|
||||
) -> Result<(i64, Option<String>), Error> {
|
||||
verify_rsa_ticket(
|
||||
keypair,
|
||||
TERM_PREFIX,
|
||||
ticket,
|
||||
Some(&format!("{}{}{}", username, path, port)),
|
||||
-300,
|
||||
TICKET_LIFETIME,
|
||||
)
|
||||
}
|
||||
impl std::str::FromStr for Empty {
|
||||
type Err = Error;
|
||||
|
||||
pub fn assemble_rsa_ticket(
|
||||
keypair: &PKey<Private>,
|
||||
prefix: &str,
|
||||
data: Option<&str>,
|
||||
secret_data: Option<&str>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let epoch = epoch_now_u64()?;
|
||||
|
||||
let timestamp = format!("{:08X}", epoch);
|
||||
|
||||
let mut plain = prefix.to_owned();
|
||||
plain.push(':');
|
||||
|
||||
if let Some(data) = data {
|
||||
plain.push_str(data);
|
||||
plain.push(':');
|
||||
}
|
||||
|
||||
plain.push_str(×tamp);
|
||||
|
||||
let mut full = plain.clone();
|
||||
if let Some(secret) = secret_data {
|
||||
full.push(':');
|
||||
full.push_str(secret);
|
||||
}
|
||||
|
||||
let mut signer = Signer::new(MessageDigest::sha256(), &keypair)?;
|
||||
signer.update(full.as_bytes())?;
|
||||
let sign = signer.sign_to_vec()?;
|
||||
|
||||
let sign_b64 = base64::encode_config(&sign, base64::STANDARD_NO_PAD);
|
||||
|
||||
Ok(format!("{}::{}", plain, sign_b64))
|
||||
}
|
||||
|
||||
pub fn verify_rsa_ticket(
|
||||
keypair: &PKey<Public>,
|
||||
prefix: &str,
|
||||
ticket: &str,
|
||||
secret_data: Option<&str>,
|
||||
min_age: i64,
|
||||
max_age: i64,
|
||||
) -> Result<(i64, Option<String>), Error> {
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
let mut parts: VecDeque<&str> = ticket.split(':').collect();
|
||||
|
||||
match parts.pop_front() {
|
||||
Some(text) => if text != prefix { bail!("ticket with invalid prefix"); }
|
||||
None => bail!("ticket without prefix"),
|
||||
}
|
||||
|
||||
let sign_b64 = match parts.pop_back() {
|
||||
Some(v) => v,
|
||||
None => bail!("ticket without signature"),
|
||||
};
|
||||
|
||||
match parts.pop_back() {
|
||||
Some(text) => if text != "" { bail!("ticket with invalid signature separator"); }
|
||||
None => bail!("ticket without signature separator"),
|
||||
}
|
||||
|
||||
let mut data = None;
|
||||
|
||||
let mut full = match parts.len() {
|
||||
2 => {
|
||||
data = Some(parts[0].to_owned());
|
||||
format!("{}:{}:{}", prefix, parts[0], parts[1])
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
if !s.is_empty() {
|
||||
bail!("unexpected ticket data, should be empty");
|
||||
}
|
||||
1 => format!("{}:{}", prefix, parts[0]),
|
||||
_ => bail!("ticket with invalid number of components"),
|
||||
};
|
||||
|
||||
if let Some(secret) = secret_data {
|
||||
full.push(':');
|
||||
full.push_str(secret);
|
||||
Ok(Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// An API ticket consists of a ticket type (prefix), type-dependent data, optional additional
|
||||
/// authenticaztion data, a timestamp and a signature. We store these values in the form
|
||||
/// `<prefix>:<stringified data>:<timestamp>::<signature>`.
|
||||
///
|
||||
/// The signature is made over the string consisting of prefix, data, timestamp and aad joined
|
||||
/// together by colons. If there is no additional authentication data it will be skipped together
|
||||
/// with the colon separating it from the timestamp.
|
||||
pub struct Ticket<T>
|
||||
where
|
||||
T: ToString + std::str::FromStr,
|
||||
{
|
||||
prefix: Cow<'static, str>,
|
||||
data: String,
|
||||
time: i64,
|
||||
signature: Option<Vec<u8>>,
|
||||
_type_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Ticket<T>
|
||||
where
|
||||
T: ToString + std::str::FromStr,
|
||||
<T as std::str::FromStr>::Err: std::fmt::Debug,
|
||||
{
|
||||
/// Prepare a new ticket for signing.
|
||||
pub fn new(prefix: &'static str, data: &T) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
prefix: Cow::Borrowed(prefix),
|
||||
data: data.to_string(),
|
||||
time: epoch_now_u64()? as i64,
|
||||
signature: None,
|
||||
_type_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the ticket prefix.
|
||||
pub fn prefix(&self) -> &str {
|
||||
&self.prefix
|
||||
}
|
||||
|
||||
/// Get the ticket's time stamp in seconds since the unix epoch.
|
||||
pub fn time(&self) -> i64 {
|
||||
self.time
|
||||
}
|
||||
|
||||
/// Get the raw string data contained in the ticket. The `verify` method will call `parse()`
|
||||
/// this in the end, so using this method directly is discouraged as it does not verify the
|
||||
/// signature.
|
||||
pub fn raw_data(&self) -> &str {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Serialize the ticket into a writer.
|
||||
///
|
||||
/// This only writes a string. We use `io::write` instead of `fmt::Write` so we can reuse the
|
||||
/// same function for openssl's `Verify`, which only implements `io::Write`.
|
||||
fn write_data(&self, f: &mut dyn io::Write) -> Result<(), Error> {
|
||||
write!(
|
||||
f,
|
||||
"{}:{}:{:08X}",
|
||||
percent_encode(self.prefix.as_bytes(), &TICKET_ASCIISET),
|
||||
percent_encode(self.data.as_bytes(), &TICKET_ASCIISET),
|
||||
self.time,
|
||||
)
|
||||
.map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Write additional authentication data to the verifier.
|
||||
fn write_aad(f: &mut dyn io::Write, aad: Option<&str>) -> Result<(), Error> {
|
||||
if let Some(aad) = aad {
|
||||
write!(f, ":{}", percent_encode(aad.as_bytes(), &TICKET_ASCIISET))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Change the ticket's time, used mostly for testing.
|
||||
#[cfg(test)]
|
||||
fn change_time(&mut self, time: i64) -> &mut Self {
|
||||
self.time = time;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sign the ticket.
|
||||
pub fn sign(&mut self, keypair: &PKey<Private>, aad: Option<&str>) -> Result<String, Error> {
|
||||
let mut output = Vec::<u8>::new();
|
||||
let mut signer = Signer::new(MessageDigest::sha256(), &keypair)
|
||||
.map_err(|err| format_err!("openssl error creating signer for ticket: {}", err))?;
|
||||
|
||||
self.write_data(&mut output)
|
||||
.map_err(|err| format_err!("error creating ticket: {}", err))?;
|
||||
|
||||
signer
|
||||
.update(&output)
|
||||
.map_err(Error::from)
|
||||
.and_then(|()| Self::write_aad(&mut signer, aad))
|
||||
.map_err(|err| format_err!("error signing ticket: {}", err))?;
|
||||
|
||||
// See `Self::write_data` for why this is safe
|
||||
let mut output = unsafe { String::from_utf8_unchecked(output) };
|
||||
|
||||
let signature = signer
|
||||
.sign_to_vec()
|
||||
.map_err(|err| format_err!("error finishing ticket signature: {}", err))?;
|
||||
|
||||
use std::fmt::Write;
|
||||
write!(
|
||||
&mut output,
|
||||
"::{}",
|
||||
base64::encode_config(&signature, base64::STANDARD_NO_PAD),
|
||||
)?;
|
||||
|
||||
self.signature = Some(signature);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// `verify` with an additional time frame parameter, not usually required since we always use
|
||||
/// the same time frame.
|
||||
pub fn verify_with_time_frame<P: HasPublic>(
|
||||
&self,
|
||||
keypair: &PKey<P>,
|
||||
prefix: &str,
|
||||
aad: Option<&str>,
|
||||
time_frame: std::ops::Range<i64>,
|
||||
) -> Result<T, Error> {
|
||||
if self.prefix != prefix {
|
||||
bail!("ticket with invalid prefix");
|
||||
}
|
||||
|
||||
let signature = match self.signature.as_ref() {
|
||||
Some(sig) => sig,
|
||||
None => bail!("invalid ticket without signature"),
|
||||
};
|
||||
|
||||
let age = epoch_now_u64()? as i64 - self.time;
|
||||
if age < time_frame.start {
|
||||
bail!("invalid ticket - timestamp newer than expected");
|
||||
}
|
||||
if age > time_frame.end {
|
||||
bail!("invalid ticket - expired");
|
||||
}
|
||||
|
||||
let mut verifier = Verifier::new(MessageDigest::sha256(), &keypair)?;
|
||||
|
||||
self.write_data(&mut verifier)
|
||||
.and_then(|()| Self::write_aad(&mut verifier, aad))
|
||||
.map_err(|err| format_err!("error verifying ticket: {}", err))?;
|
||||
|
||||
let is_valid: bool = verifier
|
||||
.verify(&signature)
|
||||
.map_err(|err| format_err!("openssl error verifying ticket: {}", err))?;
|
||||
|
||||
if !is_valid {
|
||||
bail!("ticket with invalid signature");
|
||||
}
|
||||
|
||||
self.data
|
||||
.parse()
|
||||
.map_err(|err| format_err!("failed to parse contained ticket data: {:?}", err))
|
||||
}
|
||||
|
||||
/// Verify the ticket with the provided key pair. The additional authentication data needs to
|
||||
/// match the one used when generating the ticket, and the ticket's age must fall into the time
|
||||
/// frame.
|
||||
pub fn verify<P: HasPublic>(
|
||||
&self,
|
||||
keypair: &PKey<P>,
|
||||
prefix: &str,
|
||||
aad: Option<&str>,
|
||||
) -> Result<T, Error> {
|
||||
self.verify_with_time_frame(keypair, prefix, aad, -300..TICKET_LIFETIME)
|
||||
}
|
||||
|
||||
/// Parse a ticket string.
|
||||
pub fn parse(ticket: &str) -> Result<Self, Error> {
|
||||
let mut parts = ticket.splitn(4, ':');
|
||||
|
||||
let prefix = percent_decode_str(
|
||||
parts
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("ticket without prefix"))?,
|
||||
)
|
||||
.decode_utf8()
|
||||
.map_err(|err| format_err!("invalid ticket, error decoding prefix: {}", err))?;
|
||||
|
||||
let data = percent_decode_str(
|
||||
parts
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("ticket without data"))?,
|
||||
)
|
||||
.decode_utf8()
|
||||
.map_err(|err| format_err!("invalid ticket, error decoding data: {}", err))?;
|
||||
|
||||
let time = i64::from_str_radix(
|
||||
parts
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("ticket without timestamp"))?,
|
||||
16,
|
||||
)
|
||||
.map_err(|err| format_err!("ticket with bad timestamp: {}", err))?;
|
||||
|
||||
let remainder = parts
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("ticket without signature"))?;
|
||||
// <prefix>:<data>:<time>::signature - the 4th `.next()` swallows the first colon in the
|
||||
// double-colon!
|
||||
if !remainder.starts_with(':') {
|
||||
bail!("ticket without signature separator");
|
||||
}
|
||||
let signature = base64::decode_config(&remainder[1..], base64::STANDARD_NO_PAD)
|
||||
.map_err(|err| format_err!("ticket with bad signature: {}", err))?;
|
||||
|
||||
Ok(Self {
|
||||
prefix: Cow::Owned(prefix.into_owned()),
|
||||
data: data.into_owned(),
|
||||
time,
|
||||
signature: Some(signature),
|
||||
_type_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn term_aad(userid: &Userid, path: &str, port: u16) -> String {
|
||||
format!("{}{}{}", userid, path, port)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use openssl::pkey::{PKey, Private};
|
||||
|
||||
use super::Ticket;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn simple_test<F>(key: &PKey<Private>, aad: Option<&str>, modify: F)
|
||||
where
|
||||
F: FnOnce(&mut Ticket<Userid>) -> bool,
|
||||
{
|
||||
let userid = Userid::root_userid();
|
||||
|
||||
let mut ticket = Ticket::new("PREFIX", userid).expect("failed to create Ticket struct");
|
||||
let should_work = modify(&mut ticket);
|
||||
let ticket = ticket.sign(key, aad).expect("failed to sign test ticket");
|
||||
|
||||
let parsed =
|
||||
Ticket::<Userid>::parse(&ticket).expect("failed to parse generated test ticket");
|
||||
if should_work {
|
||||
let check: Userid = parsed
|
||||
.verify(key, "PREFIX", aad)
|
||||
.expect("failed to verify test ticket");
|
||||
|
||||
assert_eq!(*userid, check);
|
||||
} else {
|
||||
parsed
|
||||
.verify(key, "PREFIX", aad)
|
||||
.expect_err("failed to verify test ticket");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tickets() {
|
||||
// first we need keys, for testing we use small keys for speed...
|
||||
let rsa =
|
||||
openssl::rsa::Rsa::generate(1024).expect("failed to generate RSA key for testing");
|
||||
let key = openssl::pkey::PKey::<openssl::pkey::Private>::from_rsa(rsa)
|
||||
.expect("failed to create PKey for RSA key");
|
||||
|
||||
simple_test(&key, Some("secret aad data"), |_| true);
|
||||
simple_test(&key, None, |_| true);
|
||||
simple_test(&key, None, |t| {
|
||||
t.change_time(0);
|
||||
false
|
||||
});
|
||||
simple_test(&key, None, |t| {
|
||||
t.change_time(epoch_now_u64().unwrap() as i64 + 0x1000_0000);
|
||||
false
|
||||
});
|
||||
}
|
||||
|
||||
let sign = base64::decode_config(sign_b64, base64::STANDARD_NO_PAD)?;
|
||||
|
||||
let mut verifier = Verifier::new(MessageDigest::sha256(), &keypair)?;
|
||||
verifier.update(full.as_bytes())?;
|
||||
|
||||
if !verifier.verify(&sign)? {
|
||||
bail!("ticket with invalid signature");
|
||||
}
|
||||
|
||||
let timestamp = i64::from_str_radix(parts.pop_back().unwrap(), 16)?;
|
||||
let now = epoch_now_u64()? as i64;
|
||||
|
||||
let age = now - timestamp;
|
||||
if age < min_age {
|
||||
bail!("invalid ticket - timestamp newer than expected.");
|
||||
}
|
||||
|
||||
if age > max_age {
|
||||
bail!("invalid ticket - timestamp too old.");
|
||||
}
|
||||
|
||||
Ok((age, data))
|
||||
}
|
||||
|
@ -1,370 +0,0 @@
|
||||
//! POSIX per-process timer interface.
|
||||
//!
|
||||
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
|
||||
//! setup thread-targeted signaling and signal masks.
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
use std::time::Duration;
|
||||
use std::{io, mem};
|
||||
|
||||
use libc::{c_int, clockid_t, pid_t};
|
||||
|
||||
/// Timers can use various clocks. See `timer_create(2)`.
|
||||
pub enum Clock {
|
||||
/// Use `CLOCK_REALTIME` for the timer.
|
||||
Realtime,
|
||||
/// Use `CLOCK_MONOTONIC` for the timer.
|
||||
Monotonic,
|
||||
}
|
||||
|
||||
/// Strong thread-id type to prevent accidental conversion of pid_t.
|
||||
pub struct Tid(pid_t);
|
||||
|
||||
/// Convenience helper to get the current thread ID suitable to pass to a
|
||||
/// `TimerEvent::ThreadSignal` entry.
|
||||
pub fn gettid() -> Tid {
|
||||
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
|
||||
}
|
||||
|
||||
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
|
||||
/// it doesn't prevent you from using signals that the nix crate is unaware
|
||||
/// of...!
|
||||
pub struct Signal(c_int);
|
||||
|
||||
impl Into<c_int> for Signal {
|
||||
fn into(self) -> c_int {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<c_int> for Signal {
|
||||
fn from(v: c_int) -> Signal {
|
||||
Signal(v)
|
||||
}
|
||||
}
|
||||
|
||||
/// When instantiating a Timer, it needs to have an event type associated with
|
||||
/// it to be fired whenever the timer expires. Most of the time this will be a
|
||||
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
|
||||
pub enum TimerEvent {
|
||||
/// This will act like passing `NULL` to `timer_create()`, which maps to
|
||||
/// using the same as `Signal(SIGALRM)`.
|
||||
None,
|
||||
|
||||
/// When the timer expires, send a specific signal to the current process.
|
||||
Signal(Signal),
|
||||
|
||||
/// When the timer expires, send a specific signal to a specific thread.
|
||||
ThreadSignal(Tid, Signal),
|
||||
|
||||
/// Convenience value to send a signal to the current thread. This is
|
||||
/// equivalent to using `ThreadSignal(gettid(), signal)`.
|
||||
ThisThreadSignal(Signal),
|
||||
}
|
||||
|
||||
// timer_t is a pointer type, so we create a strongly typed internal handle
|
||||
// type for it
|
||||
#[repr(C)]
|
||||
struct InternalTimerT(u32);
|
||||
type TimerT = *mut InternalTimerT;
|
||||
|
||||
// These wrappers are defined in -lrt.
|
||||
#[link(name = "rt")]
|
||||
extern "C" {
|
||||
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
|
||||
fn timer_delete(timer: TimerT) -> c_int;
|
||||
fn timer_settime(
|
||||
timerid: TimerT,
|
||||
flags: c_int,
|
||||
new_value: *const libc::itimerspec,
|
||||
old_value: *mut libc::itimerspec,
|
||||
) -> c_int;
|
||||
}
|
||||
|
||||
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
|
||||
pub struct Timer {
|
||||
timer: TimerT,
|
||||
}
|
||||
|
||||
/// Timer specification used to arm a `Timer`.
|
||||
#[derive(Default)]
|
||||
pub struct TimerSpec {
|
||||
/// The timeout to the next timer event.
|
||||
pub value: Option<Duration>,
|
||||
|
||||
/// When a timer expires, it may be automatically rearmed with another
|
||||
/// timeout. This will keep happening until this is explicitly disabled
|
||||
/// or the timer deleted.
|
||||
pub interval: Option<Duration>,
|
||||
}
|
||||
|
||||
// Helpers to convert between libc::timespec and Option<Duration>
|
||||
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
|
||||
match v {
|
||||
None => libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: 0,
|
||||
},
|
||||
Some(value) => libc::timespec {
|
||||
tv_sec: value.as_secs() as i64,
|
||||
tv_nsec: value.subsec_nanos() as i64,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
|
||||
if v.tv_sec == 0 && v.tv_nsec == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
|
||||
}
|
||||
}
|
||||
|
||||
impl TimerSpec {
|
||||
// Helpers to convert between TimerSpec and libc::itimerspec
|
||||
fn to_itimerspec(&self) -> libc::itimerspec {
|
||||
libc::itimerspec {
|
||||
it_value: opt_duration_to_timespec(self.value),
|
||||
it_interval: opt_duration_to_timespec(self.interval),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_itimerspec(ts: libc::itimerspec) -> Self {
|
||||
TimerSpec {
|
||||
value: timespec_to_opt_duration(ts.it_value),
|
||||
interval: timespec_to_opt_duration(ts.it_interval),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an empty timer specification representing a disabled timer.
|
||||
pub fn new() -> Self {
|
||||
TimerSpec {
|
||||
value: None,
|
||||
interval: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Change the specification to have a specific value.
|
||||
pub fn value(self, value: Option<Duration>) -> Self {
|
||||
TimerSpec {
|
||||
value,
|
||||
interval: self.interval,
|
||||
}
|
||||
}
|
||||
|
||||
/// Change the specification to have a specific interval.
|
||||
pub fn interval(self, interval: Option<Duration>) -> Self {
|
||||
TimerSpec {
|
||||
value: self.value,
|
||||
interval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Timer {
|
||||
/// Create a Timer object governing a POSIX timer.
|
||||
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
|
||||
// Map from our clock type to the libc id
|
||||
let clkid = match clock {
|
||||
Clock::Realtime => libc::CLOCK_REALTIME,
|
||||
Clock::Monotonic => libc::CLOCK_MONOTONIC,
|
||||
} as clockid_t;
|
||||
|
||||
// Map the TimerEvent to libc::sigevent
|
||||
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
|
||||
match event {
|
||||
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
|
||||
TimerEvent::Signal(signo) => {
|
||||
ev.sigev_signo = signo.0;
|
||||
ev.sigev_notify = libc::SIGEV_SIGNAL;
|
||||
}
|
||||
TimerEvent::ThreadSignal(tid, signo) => {
|
||||
ev.sigev_signo = signo.0;
|
||||
ev.sigev_notify = libc::SIGEV_THREAD_ID;
|
||||
ev.sigev_notify_thread_id = tid.0;
|
||||
}
|
||||
TimerEvent::ThisThreadSignal(signo) => {
|
||||
ev.sigev_signo = signo.0;
|
||||
ev.sigev_notify = libc::SIGEV_THREAD_ID;
|
||||
ev.sigev_notify_thread_id = gettid().0;
|
||||
}
|
||||
}
|
||||
|
||||
// Create the timer
|
||||
let mut timer: TimerT = unsafe { mem::zeroed() };
|
||||
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
|
||||
if rc != 0 {
|
||||
Err(io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(Timer { timer })
|
||||
}
|
||||
}
|
||||
|
||||
/// Arm a timer. This returns the previous timer specification.
|
||||
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
|
||||
let newspec = spec.to_itimerspec();
|
||||
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
|
||||
|
||||
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
|
||||
if rc != 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
|
||||
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Timer {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
timer_delete(self.timer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the signal number we use in our timeout implementations. We expect
|
||||
/// the signal handler for this signal to never be replaced by some other
|
||||
/// library. If this does happen, we need to find another signal. There should
|
||||
/// be plenty.
|
||||
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
|
||||
/// first two for pthread internals.
|
||||
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
|
||||
|
||||
// Our timeout handler does exactly nothing. We only need it to interrupt
|
||||
// system calls.
|
||||
extern "C" fn sig_timeout_handler(_: c_int) {}
|
||||
|
||||
// See setup_timeout_handler().
|
||||
fn do_setup_timeout_handler() -> io::Result<()> {
|
||||
// Unfortunately nix::sys::signal::Signal cannot represent real time
|
||||
// signals, so we need to use libc instead...
|
||||
//
|
||||
// This WOULD be a nicer impl though:
|
||||
//nix::sys::signal::sigaction(
|
||||
// SIGTIMEOUT,
|
||||
// nix::sys::signal::SigAction::new(
|
||||
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
|
||||
// nix::sys::signal::SaFlags::empty(),
|
||||
// nix::sys::signal::SigSet::all()))
|
||||
// .map(|_|())
|
||||
|
||||
unsafe {
|
||||
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
|
||||
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr()) != 0
|
||||
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|
||||
{
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
|
||||
let sa = libc::sigaction {
|
||||
sa_sigaction:
|
||||
// libc::sigaction uses `usize` for the function pointer...
|
||||
sig_timeout_handler as *const extern "C" fn(i32) as usize,
|
||||
sa_mask: sa_mask.assume_init(),
|
||||
sa_flags: 0,
|
||||
sa_restorer: None,
|
||||
};
|
||||
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut()) != 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
|
||||
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
|
||||
|
||||
/// Setup our timeout-signal workflow. This establishes the signal handler for
|
||||
/// our `SIGTIMEOUT` and should be called once during initialization.
|
||||
#[inline]
|
||||
pub fn setup_timeout_handler() {
|
||||
SETUP_TIMEOUT_HANDLER.call_once(|| {
|
||||
// We unwrap here.
|
||||
// If setting up this handler fails you have other problems already,
|
||||
// plus, if setting up fails you can't *use* it either, so everything
|
||||
// goes to die.
|
||||
do_setup_timeout_handler().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
/// This guards the state of the timeout signal: We want it blocked usually.
|
||||
pub struct TimeoutBlockGuard(bool);
|
||||
impl Drop for TimeoutBlockGuard {
|
||||
fn drop(&mut self) {
|
||||
if self.0 {
|
||||
block_timeout_signal();
|
||||
} else {
|
||||
unblock_timeout_signal().forget();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TimeoutBlockGuard {
|
||||
/// Convenience helper to "forget" to restore the signal block mask.
|
||||
#[inline(always)]
|
||||
pub fn forget(self) {
|
||||
std::mem::forget(self);
|
||||
}
|
||||
|
||||
/// Convenience helper to trigger the guard behavior immediately.
|
||||
#[inline(always)]
|
||||
pub fn trigger(self) {
|
||||
std::mem::drop(self); // be explicit here...
|
||||
}
|
||||
}
|
||||
|
||||
/// Unblock the timeout signal for the current thread. By default we block the
|
||||
/// signal this behavior should be restored when done using timeouts, therefor this
|
||||
/// returns a guard:
|
||||
#[inline(always)]
|
||||
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
|
||||
// This calls std::sync::Once:
|
||||
setup_timeout_handler();
|
||||
//let mut set = nix::sys::signal::SigSet::empty();
|
||||
//set.add(SIGTIMEOUT.0);
|
||||
//set.thread_unblock()?;
|
||||
//Ok(TimeoutBlockGuard{})
|
||||
// Again, nix crate and its signal limitations...
|
||||
|
||||
// NOTE:
|
||||
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
|
||||
// passed to the kernel, or signal numbers are "invalid", since we know
|
||||
// neither is the case we will panic on error...
|
||||
let was_blocked = unsafe {
|
||||
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
|
||||
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
|
||||
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|
||||
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|
||||
|| libc::pthread_sigmask(
|
||||
libc::SIG_UNBLOCK,
|
||||
&mask.assume_init(),
|
||||
&mut *oldset.as_mut_ptr(),
|
||||
) != 0
|
||||
{
|
||||
panic!("Impossibly failed to unblock SIGTIMEOUT");
|
||||
//return Err(io::Error::last_os_error());
|
||||
}
|
||||
|
||||
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
|
||||
};
|
||||
TimeoutBlockGuard(was_blocked)
|
||||
}
|
||||
|
||||
/// Block the timeout signal for the current thread. This is the default.
|
||||
#[inline(always)]
|
||||
pub fn block_timeout_signal() {
|
||||
//let mut set = nix::sys::signal::SigSet::empty();
|
||||
//set.add(SIGTIMEOUT);
|
||||
//set.thread_block()
|
||||
unsafe {
|
||||
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
|
||||
if libc::sigemptyset(&mut *mask.as_mut_ptr()) != 0
|
||||
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0) != 0
|
||||
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
|
||||
!= 0
|
||||
{
|
||||
panic!("Impossibly failed to block SIGTIMEOUT");
|
||||
//return Err(io::Error::last_os_error());
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user