Compare commits
124 Commits
Author | SHA1 | Date | |
---|---|---|---|
beaa683a52 | |||
33a88dafb9 | |||
224c65f8de | |||
f2b4b4b9fe | |||
ea9e559fc4 | |||
0cf14984cc | |||
7d07b73def | |||
3d3670d786 | |||
14291179ce | |||
e744de0eb0 | |||
98b1733760 | |||
fdac28fcec | |||
653e2031d2 | |||
01ca99da2d | |||
1c2f842a98 | |||
a4d1675513 | |||
2ab5acac5a | |||
27fde64794 | |||
fa3f0584bb | |||
d12720c796 | |||
a4e86972a4 | |||
3a3af6e2b6 | |||
482409641f | |||
9688f6de0f | |||
5b32820e93 | |||
f40b4fb05a | |||
6e1deb158a | |||
50ec1a8712 | |||
a74b026baa | |||
7e42ccdaf2 | |||
e713ee5c56 | |||
ec5f9d3525 | |||
d0463b67ca | |||
2ff4c2cd5f | |||
c3b090ac8a | |||
c47e294ea7 | |||
25455bd06d | |||
c1c4a18f48 | |||
91f5594c08 | |||
86f6f74114 | |||
13d9fe3a6c | |||
41e4388005 | |||
06a94edcf6 | |||
ef496e2c20 | |||
113c9b5981 | |||
956295cefe | |||
a26c27c8e6 | |||
0c1c492d48 | |||
255ed62166 | |||
b96b11cdb7 | |||
faa8e6948a | |||
8314ca9c10 | |||
538c2b6dcf | |||
e9b44bec01 | |||
65418a0763 | |||
aef4976801 | |||
295d4f4116 | |||
c47a900ceb | |||
1b1110581a | |||
eb13d9151a | |||
449e4a66fe | |||
217c22c754 | |||
ba5b8a3e76 | |||
ac5e9e770b | |||
b25deec0be | |||
cdf1da2872 | |||
3cfc56f5c2 | |||
37e53b4c07 | |||
77d634710e | |||
5c5181a252 | |||
67042466e8 | |||
757d0ccc76 | |||
4a55fa87d5 | |||
032cd1b862 | |||
ec2434fe3c | |||
34389132d9 | |||
78ee20d72d | |||
601e42ac35 | |||
e1897b363b | |||
cf063c1973 | |||
f58233a73a | |||
d257c2ecbd | |||
e4ee7b7ac8 | |||
1f0d23f792 | |||
bfcef26a99 | |||
ec01eeadc6 | |||
660a34892d | |||
d86034afec | |||
62593aba1e | |||
0eaef8eb84 | |||
e39974afbf | |||
dde18bbb85 | |||
a40e1b0e8b | |||
a0eb0cd372 | |||
41067870c6 | |||
33a87bc39a | |||
bed3e15f16 | |||
c687da9e8e | |||
be30e7d269 | |||
106603c58f | |||
7ba2c1c386 | |||
4327a8462a | |||
e193544b8e | |||
323b2f3dd6 | |||
7884e7ef4f | |||
fae11693f0 | |||
22231524e2 | |||
9634ca07db | |||
62f6a7e3d9 | |||
86443141b5 | |||
f6e964b96e | |||
c8bed1b4d7 | |||
a3970d6c1e | |||
cc83c13660 | |||
bf7e2a4648 | |||
e284073e4a | |||
3ec99affc8 | |||
a9649ddc44 | |||
4f9096a211 | |||
c3a4b5e2e1 | |||
7957fabff2 | |||
20a4e4e252 | |||
2774566b03 | |||
4459ffe30e |
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,3 +3,4 @@ local.mak
|
|||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
/etc/proxmox-backup.service
|
/etc/proxmox-backup.service
|
||||||
/etc/proxmox-backup-proxy.service
|
/etc/proxmox-backup-proxy.service
|
||||||
|
build/
|
||||||
|
15
Cargo.toml
15
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.8.2"
|
version = "0.8.9"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -14,6 +14,7 @@ name = "proxmox_backup"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
apt-pkg-native = "0.3.1" # custom patched version
|
||||||
base64 = "0.12"
|
base64 = "0.12"
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
bytes = "0.5"
|
bytes = "0.5"
|
||||||
@ -37,12 +38,12 @@ pam = "0.7"
|
|||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.1"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
proxmox = { version = "0.2.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.2.1", features = [ "tokio-io", "futures-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "6"
|
||||||
@ -50,11 +51,11 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||||
tokio-openssl = "0.4.0"
|
tokio-openssl = "0.4.0"
|
||||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = "0.3"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
|
13
Makefile
13
Makefile
@ -60,7 +60,7 @@ $(SUBDIRS):
|
|||||||
test:
|
test:
|
||||||
#cargo test test_broadcast_future
|
#cargo test test_broadcast_future
|
||||||
#cargo test $(CARGO_BUILD_ARGS)
|
#cargo test $(CARGO_BUILD_ARGS)
|
||||||
#$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
doc:
|
doc:
|
||||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
||||||
@ -80,18 +80,21 @@ build:
|
|||||||
|
|
||||||
|
|
||||||
.PHONY: proxmox-backup-docs
|
.PHONY: proxmox-backup-docs
|
||||||
proxmox-backup-docs: $(DOC_DEB)
|
$(DOC_DEB) $(DEBS): proxmox-backup-docs
|
||||||
$(DOC_DEB): build
|
proxmox-backup-docs: build
|
||||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||||
lintian $(DOC_DEB)
|
lintian $(DOC_DEB)
|
||||||
|
|
||||||
# copy the local target/ dir as a build-cache
|
# copy the local target/ dir as a build-cache
|
||||||
.PHONY: deb
|
.PHONY: deb
|
||||||
deb: $(DEBS)
|
$(DEBS): deb
|
||||||
$(DEBS): build
|
deb: build
|
||||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
||||||
lintian $(DEBS)
|
lintian $(DEBS)
|
||||||
|
|
||||||
|
.PHONY: deb-all
|
||||||
|
deb-all: $(DOC_DEB) $(DEBS)
|
||||||
|
|
||||||
.PHONY: dsc
|
.PHONY: dsc
|
||||||
dsc: $(DSC)
|
dsc: $(DSC)
|
||||||
$(DSC): build
|
$(DSC): build
|
||||||
|
120
debian/changelog
vendored
120
debian/changelog
vendored
@ -1,3 +1,123 @@
|
|||||||
|
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve termprocy (console) behavior on updating proxmox-backup-server and
|
||||||
|
other daemon restarts
|
||||||
|
|
||||||
|
* client: improve upload log output and speed calculation
|
||||||
|
|
||||||
|
* fix #2885: client upload: bail on duplicate backup targets
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 24 Jul 2020 11:24:07 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: .pxarexclude: match behavior from absolute paths to the one described
|
||||||
|
in the documentation and use byte based paths
|
||||||
|
|
||||||
|
* catalog shell: add exit command
|
||||||
|
|
||||||
|
* manifest: revert signature canonicalization to old behaviour. Fallout from
|
||||||
|
encrypted older backups is expected and was ignored due to the beta status
|
||||||
|
of Proxmox Backup.
|
||||||
|
|
||||||
|
* documentation: various improvements and additions
|
||||||
|
|
||||||
|
* cached user info: print privilege path in error message
|
||||||
|
|
||||||
|
* docs: fix #2851 Add note about GC grace period
|
||||||
|
|
||||||
|
* api2/status: fix datastore full estimation bug if there where (almost) no
|
||||||
|
change for several days
|
||||||
|
|
||||||
|
* schedules, calendar event: support the 'weekly' special expression
|
||||||
|
|
||||||
|
* ui: sync job: group remote fields and use "Source" in labels
|
||||||
|
|
||||||
|
* ui: add calendar event selector
|
||||||
|
|
||||||
|
* ui: sync job: change default to false for "remove-vanished" for new jobs
|
||||||
|
|
||||||
|
* fix #2860: skip in-progress snapshots when syncing
|
||||||
|
|
||||||
|
* fix #2865: detect and skip vanished snapshots
|
||||||
|
|
||||||
|
* fix #2871: close FDs when scanning backup group, avoid leaking
|
||||||
|
|
||||||
|
* backup: list images: handle walkdir error, catch "lost+found" special
|
||||||
|
directory
|
||||||
|
|
||||||
|
* implement AsyncSeek for AsyncIndexReader
|
||||||
|
|
||||||
|
* client: rework logging upload info like size or bandwidth
|
||||||
|
|
||||||
|
* client writer: do not output chunklist for now on verbose=true
|
||||||
|
|
||||||
|
* add initial API for listing available updates and updating the APT
|
||||||
|
database
|
||||||
|
|
||||||
|
* ui: add xterm.js console implementation
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 23 Jul 2020 12:16:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.7-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* support restoring file attributes from pxar archives
|
||||||
|
|
||||||
|
* docs: additions and fixes
|
||||||
|
|
||||||
|
* ui: running tasks: update limit to 100
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 14 Jul 2020 12:05:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: add button for easily showing the server fingerprint dashboard
|
||||||
|
|
||||||
|
* proxmox-backup-client benchmark: add --verbose flag and improve output
|
||||||
|
format
|
||||||
|
|
||||||
|
* docs: reference PDF variant in HTML output
|
||||||
|
|
||||||
|
* proxmox-backup-client: add simple version command
|
||||||
|
|
||||||
|
* improve keyfile and signature handling in catalog and manifest
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 10 Jul 2020 11:34:14 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix cross process task listing
|
||||||
|
|
||||||
|
* docs: expand datastore documentation
|
||||||
|
|
||||||
|
* docs: add remotes and sync-jobs and schedules
|
||||||
|
|
||||||
|
* bump pathpatterns to 0.1.2
|
||||||
|
|
||||||
|
* ui: align version and user-menu spacing with pve/pmg
|
||||||
|
|
||||||
|
* ui: make username a menu-button
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 15:32:39 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* add TaskButton in header
|
||||||
|
|
||||||
|
* simpler lost+found pattern
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 14:28:24 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* get_disks: don't fail on zfs_devices
|
||||||
|
|
||||||
|
* allow some more characters for zpool list
|
||||||
|
|
||||||
|
* ui: adapt for new sign-only crypt mode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 13:55:06 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
* buildsys: also upload debug packages
|
* buildsys: also upload debug packages
|
||||||
|
1
debian/control.in
vendored
1
debian/control.in
vendored
@ -7,6 +7,7 @@ Depends: fonts-font-awesome,
|
|||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.2-4),
|
||||||
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
1
debian/lintian-overrides
vendored
1
debian/lintian-overrides
vendored
@ -1 +1,2 @@
|
|||||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
||||||
|
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
||||||
|
@ -1 +1 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/docs/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
@ -139,6 +139,12 @@ or ``zfs``) to store the backup data.
|
|||||||
Datastores are identified by a simple *ID*. You can configure it
|
Datastores are identified by a simple *ID*. You can configure it
|
||||||
when setting up the backup server.
|
when setting up the backup server.
|
||||||
|
|
||||||
|
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||||
|
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||||
|
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||||
|
directory entries. This requirement excludes certain filesystems and
|
||||||
|
filesystem configuration from being supported for a datastore. For example,
|
||||||
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
|
|
||||||
Datastore Configuration
|
Datastore Configuration
|
||||||
@ -146,7 +152,12 @@ Datastore Configuration
|
|||||||
|
|
||||||
You can configure multiple datastores. Minimum one datastore needs to be
|
You can configure multiple datastores. Minimum one datastore needs to be
|
||||||
configured. The datastore is identified by a simple `name` and points to a
|
configured. The datastore is identified by a simple `name` and points to a
|
||||||
directory on the filesystem.
|
directory on the filesystem. Each datastore also has associated retention
|
||||||
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
|
number of backups to keep in that store. :ref:`Pruning <pruning>` and
|
||||||
|
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||||
|
periodically based on a configured :term:`schedule` per datastore.
|
||||||
|
|
||||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||||
|
|
||||||
@ -165,6 +176,30 @@ To list existing datastores run:
|
|||||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||||
└────────┴──────────────────────┴─────────────────────────────┘
|
└────────┴──────────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
You can change settings of a datastore, for example to set a prune and garbage
|
||||||
|
collection schedule or retention settings using ``update`` subcommand and view
|
||||||
|
a datastore with the ``show`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore update store1 --keep-last 7 --prune-schedule daily --gc-schedule 'Tue 04:27'
|
||||||
|
# proxmox-backup-manager datastore show store1
|
||||||
|
┌────────────────┬─────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞════════════════╪═════════════════════════════╡
|
||||||
|
│ name │ store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ path │ /backup/disk1/store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ comment │ This is my default storage. │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ gc-schedule │ Tue 04:27 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ keep-last │ 7 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ prune-schedule │ daily │
|
||||||
|
└────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
Finally, it is possible to remove the datastore configuration:
|
Finally, it is possible to remove the datastore configuration:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -340,14 +375,72 @@ following roles exist:
|
|||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
|
|
||||||
|
:term:`Remote`
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||||
|
installation, from which you can `sync` datastores to a local datastore with a
|
||||||
|
`Sync Job`.
|
||||||
|
|
||||||
|
To add a remote, you need its hostname or ip, a userid and password on the
|
||||||
|
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||||
|
``proxmox-backup-manager cert info`` command on the remote.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager cert info |grep Fingerprint
|
||||||
|
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Using the information specified above, add the remote with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote create pbs2 --host pbs2.mydomain.example --userid sync@pam --password 'SECRET' --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||||
|
``proxmox-backup-manager remote`` to manage your remotes:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote update pbs2 --host pbs2.example
|
||||||
|
# proxmox-backup-manager remote list
|
||||||
|
┌──────┬──────────────┬──────────┬───────────────────────────────────────────┬─────────┐
|
||||||
|
│ name │ host │ userid │ fingerprint │ comment │
|
||||||
|
╞══════╪══════════════╪══════════╪═══════════════════════════════════════════╪═════════╡
|
||||||
|
│ pbs2 │ pbs2.example │ sync@pam │64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe │ │
|
||||||
|
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||||
|
# proxmox-backup-manager remote remove pbs2
|
||||||
|
|
||||||
|
|
||||||
|
Sync Jobs
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Sync jobs are configured to pull the contents of a datastore on a `Remote` to a
|
||||||
|
local datastore. You can either start the sync job manually on the GUI or
|
||||||
|
provide it with a :term:`schedule` to run regularly. The
|
||||||
|
``proxmox-backup-manager sync-job`` command is used to manage sync jobs:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job create pbs2-local --remote pbs2 --remote-store local --store local --schedule 'Wed 02:30'
|
||||||
|
# proxmox-backup-manager sync-job update pbs2-local --comment 'offsite'
|
||||||
|
# proxmox-backup-manager sync-job list
|
||||||
|
┌────────────┬───────┬────────┬──────────────┬───────────┬─────────┐
|
||||||
|
│ id │ store │ remote │ remote-store │ schedule │ comment │
|
||||||
|
╞════════════╪═══════╪════════╪══════════════╪═══════════╪═════════╡
|
||||||
|
│ pbs2-local │ local │ pbs2 │ local │ Wed 02:30 │ offsite │
|
||||||
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
|
|
||||||
Backup Client usage
|
Backup Client usage
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
|
||||||
Respository Locations
|
Repository Locations
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following notation to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server.
|
||||||
@ -454,7 +547,7 @@ environment variable ``PBS_REPOSITORY``.
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# export PBS_REPOSTORY=backup-server:store1
|
# export PBS_REPOSITORY=backup-server:store1
|
||||||
|
|
||||||
After this you can execute all commands without specifying the ``--repository``
|
After this you can execute all commands without specifying the ``--repository``
|
||||||
option.
|
option.
|
||||||
@ -507,17 +600,17 @@ the given patterns. It is only possible to match files in this directory and its
|
|||||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||||
following pattern ``**/*.tmp``.
|
following pattern ``**/*.tmp``.
|
||||||
``[...]`` matches a single character from any of the provided characters within
|
``[...]`` matches a single character from any of the provided characters within
|
||||||
the brackets. ``[!...]`` does the complementary and matches any singe character
|
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||||
not contained within the brackets. It is also possible to specify ranges with two
|
not contained within the brackets. It is also possible to specify ranges with two
|
||||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||||
|
|
||||||
The order of the glob match patterns defines if a file is included or
|
The order of the glob match patterns defines whether a file is included or
|
||||||
excluded, later entries win over previous ones.
|
excluded, that is to say later entries override previous ones.
|
||||||
This is also true for match patterns encountered deeper down the directory tree,
|
This is also true for match patterns encountered deeper down the directory tree,
|
||||||
which can override a previous exclusion.
|
which can override a previous exclusion.
|
||||||
Be aware that excluded directories will **not** be read by the backup client.
|
Be aware that excluded directories will **not** be read by the backup client.
|
||||||
A ``.pxarexclude`` file in a subdirectory will have no effect.
|
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||||
backup archive.
|
backup archive.
|
||||||
|
|
||||||
@ -569,8 +662,8 @@ Restoring this backup will result in:
|
|||||||
Encryption
|
Encryption
|
||||||
^^^^^^^^^^
|
^^^^^^^^^^
|
||||||
|
|
||||||
Proxmox backup supports client side encryption with AES-256 in GCM_
|
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||||
mode. First you need to create an encryption key:
|
mode. To set this up, you first need to create an encryption key:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -602,13 +695,13 @@ variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
|||||||
Restoring Data
|
Restoring Data
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The regular creation of backups is a necessary step to avoid data
|
The regular creation of backups is a necessary step to avoiding data
|
||||||
loss. More important, however, is the restoration. It is good practice to perform
|
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||||
periodic recovery tests to ensure that you can access the data in
|
periodic recovery tests to ensure that you can access the data in
|
||||||
case of problems.
|
case of problems.
|
||||||
|
|
||||||
First, you need to find the snapshot which you want to restore. The snapshot
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
command gives a list of all snapshots on the server:
|
command provides a list of all the snapshots on the server:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -640,8 +733,8 @@ backup.
|
|||||||
|
|
||||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||||
|
|
||||||
To get the contents of any archive you can restore the ``ìndex.json`` file in the
|
To get the contents of any archive, you can restore the ``ìndex.json`` file in the
|
||||||
repository and restore it to '-'. This will dump the content to the standard output.
|
repository to the target path '-'. This will dump the contents to the standard output.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -678,7 +771,7 @@ working directory and list directory contents in the archive.
|
|||||||
``pwd`` shows the full path of the current working directory with respect to the
|
``pwd`` shows the full path of the current working directory with respect to the
|
||||||
archive root.
|
archive root.
|
||||||
|
|
||||||
Being able to quickly search the contents of the archive is a often needed feature.
|
Being able to quickly search the contents of the archive is a commmonly needed feature.
|
||||||
That's where the catalog is most valuable.
|
That's where the catalog is most valuable.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
@ -727,10 +820,10 @@ file archive as a read-only filesystem to a mountpoint on your host.
|
|||||||
bin dev home lib32 libx32 media opt root sbin sys usr
|
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||||
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||||
|
|
||||||
This allows you to access the full content of the archive in a seamless manner.
|
This allows you to access the full contents of the archive in a seamless manner.
|
||||||
|
|
||||||
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
||||||
backup servers datastore, this can cause some additional network and CPU
|
backup server's datastore, this can cause some additional network and CPU
|
||||||
load on your host, depending on the operations you perform on the mounted
|
load on your host, depending on the operations you perform on the mounted
|
||||||
filesystem.
|
filesystem.
|
||||||
|
|
||||||
@ -764,6 +857,8 @@ To remove the ticket, issue a logout:
|
|||||||
# proxmox-backup-client logout
|
# proxmox-backup-client logout
|
||||||
|
|
||||||
|
|
||||||
|
.. _pruning:
|
||||||
|
|
||||||
Pruning and Removing Backups
|
Pruning and Removing Backups
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -825,7 +920,7 @@ backup is retained.
|
|||||||
|
|
||||||
|
|
||||||
You can use the ``--dry-run`` option to test your settings. This only
|
You can use the ``--dry-run`` option to test your settings. This only
|
||||||
shows the list of existing snapshots and which action prune would take.
|
shows the list of existing snapshots and what actions prune would take.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -867,6 +962,17 @@ unused data blocks are removed.
|
|||||||
depending on the number of chunks and the speed of the underlying
|
depending on the number of chunks and the speed of the underlying
|
||||||
disks.
|
disks.
|
||||||
|
|
||||||
|
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||||
|
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||||
|
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||||
|
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||||
|
by default. This results in a better performance by only updating the
|
||||||
|
``atime`` property if the last access has been at least 24 hours ago. The
|
||||||
|
downside is, that touching a chunk within these 24 hours will not always
|
||||||
|
update its ``atime`` property.
|
||||||
|
|
||||||
|
Chunks in the grace period will be logged at the end of the garbage
|
||||||
|
collection task as *Pending removals*.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page // FIXME
|
||||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
.. _Rust: https://www.rust-lang.org/
|
.. _Rust: https://www.rust-lang.org/
|
||||||
|
@ -16,7 +16,7 @@ Glossary
|
|||||||
Datastore
|
Datastore
|
||||||
|
|
||||||
A place to store backups. A directory which contains the backup data.
|
A place to store backups. A directory which contains the backup data.
|
||||||
The current implemenation is file-system based.
|
The current implementation is file-system based.
|
||||||
|
|
||||||
`Rust`_
|
`Rust`_
|
||||||
|
|
||||||
@ -46,3 +46,19 @@ Glossary
|
|||||||
kernel driver handles filesystem requests and sends them to a
|
kernel driver handles filesystem requests and sends them to a
|
||||||
userspace application.
|
userspace application.
|
||||||
|
|
||||||
|
Remote
|
||||||
|
|
||||||
|
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||||
|
You can pull datastores from a remote to a local datastore in order to
|
||||||
|
have redundant backups.
|
||||||
|
|
||||||
|
Schedule
|
||||||
|
|
||||||
|
Certain tasks, for example pruning and garbage collection, need to be
|
||||||
|
performed on a regular basis. Proxmox Backup Server uses a subset of the
|
||||||
|
`systemd Time and Date Specification
|
||||||
|
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
|
||||||
|
The subset currently supports time of day specifications and weekdays, in
|
||||||
|
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
|
||||||
|
There is no support for specifying timezones, the tasks are run in the
|
||||||
|
timezone configured on the server.
|
||||||
|
@ -12,6 +12,10 @@ Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
|||||||
in the section entitled "GNU Free Documentation License".
|
in the section entitled "GNU Free Documentation License".
|
||||||
|
|
||||||
|
|
||||||
|
.. only:: html
|
||||||
|
|
||||||
|
A `PDF` version of the documentation is `also available here <./proxmox-backup.pdf>`_
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 3
|
:maxdepth: 3
|
||||||
:caption: Table of Contents
|
:caption: Table of Contents
|
||||||
|
@ -83,6 +83,10 @@ In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
|||||||
|
|
||||||
The network configuration is completely up to you as well.
|
The network configuration is completely up to you as well.
|
||||||
|
|
||||||
|
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||||
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Install Proxmox Backup server on `Proxmox VE`_
|
Install Proxmox Backup server on `Proxmox VE`_
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -99,6 +103,10 @@ After configuring the
|
|||||||
server to store backups. Should the hypervisor server fail, you can
|
server to store backups. Should the hypervisor server fail, you can
|
||||||
still access the backups.
|
still access the backups.
|
||||||
|
|
||||||
|
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||||
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Client installation
|
Client installation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
@ -4,17 +4,17 @@ Introduction
|
|||||||
What is Proxmox Backup Server
|
What is Proxmox Backup Server
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
Proxmox Backup Server is an enterprise-class client-server backup software that
|
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||||
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||||
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||||
allows you to backup your data securely, even between remote sites, providing
|
platform and allows you to back up your data securely, even between remote
|
||||||
easy management with a web-based user interface.
|
sites, providing easy management with a web-based user interface.
|
||||||
|
|
||||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||||
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||||
performance, low resource usage, and a safe, high quality code base.
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
It features strong encryption done on the client side. Thus, it's possible to
|
It features strong client-side encryption. Thus, it's possible to
|
||||||
backup data to not fully trusted targets.
|
backup data to not fully trusted targets.
|
||||||
|
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ Main Features
|
|||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mde
|
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
||||||
provides very high performance on modern hardware.
|
provides very high performance on modern hardware.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||||
@ -102,8 +102,30 @@ Therefore, ensure that you perform regular backups and run restore tests.
|
|||||||
Software Stack
|
Software Stack
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
.. todo:: Eplain why we use Rust (and Flutter)
|
Proxmox Backup Server consists of multiple components:
|
||||||
|
|
||||||
|
* server-daemon providing, among others, a RESTfull API, super-fast
|
||||||
|
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||||
|
events, strict separation of privileged and unprivileged execution
|
||||||
|
environments, ...
|
||||||
|
* JavaScript management webinterface
|
||||||
|
* management CLI tool for the server (`proxmox-backup-manager`)
|
||||||
|
* client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
|
any `Linux amd64` environment.
|
||||||
|
|
||||||
|
Everything outside of the web interface is written in the Rust programming
|
||||||
|
language.
|
||||||
|
|
||||||
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
|
language design; Rust challenges that conflict. Through balancing powerful
|
||||||
|
technical capacity and a great developer experience, Rust gives you the option
|
||||||
|
to control low-level details (such as memory usage) without all the hassle
|
||||||
|
traditionally associated with such control."
|
||||||
|
|
||||||
|
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
||||||
|
|
||||||
|
.. todo:: further explain the software stack
|
||||||
|
|
||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
@ -24,7 +24,6 @@ General ZFS advantages
|
|||||||
* Self healing
|
* Self healing
|
||||||
* Continuous integrity checking
|
* Continuous integrity checking
|
||||||
* Designed for high storage capacities
|
* Designed for high storage capacities
|
||||||
* Protection against data corruption
|
|
||||||
* Asynchronous replication over network
|
* Asynchronous replication over network
|
||||||
* Open Source
|
* Open Source
|
||||||
* Encryption
|
* Encryption
|
||||||
|
@ -33,6 +33,46 @@ During the Proxmox Backup beta phase only one repository (pbstest) will be
|
|||||||
available. Once released, a Enterprise repository for production use and a
|
available. Once released, a Enterprise repository for production use and a
|
||||||
no-subscription repository will be provided.
|
no-subscription repository will be provided.
|
||||||
|
|
||||||
|
SecureApt
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||||
|
these signatures to verify that all packages are from a trusted source.
|
||||||
|
|
||||||
|
If you install Proxmox Backup Server from an official ISO image, the key for
|
||||||
|
verification is already installed.
|
||||||
|
|
||||||
|
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||||
|
key with the following commands:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
Verify the SHA512 checksum afterwards with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
The output should be:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
and the md5sum:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
Here, the output should be:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
.. comment
|
.. comment
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -24,7 +24,7 @@ This daemon is normally started and managed as ``systemd`` service::
|
|||||||
|
|
||||||
systemctl status proxmox-backup-proxy
|
systemctl status proxmox-backup-proxy
|
||||||
|
|
||||||
For debugging, you can start the daemon in forground using::
|
For debugging, you can start the daemon in foreground using::
|
||||||
|
|
||||||
proxmox-backup-proxy
|
proxmox-backup-proxy
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use anyhow::{Error};
|
|||||||
|
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
async fn upload_speed() -> Result<usize, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
let datastore = "store2";
|
let datastore = "store2";
|
||||||
@ -20,7 +20,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
|||||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||||
|
|
||||||
println!("start upload speed test");
|
println!("start upload speed test");
|
||||||
let res = client.upload_speedtest().await?;
|
let res = client.upload_speedtest(true).await?;
|
||||||
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@ pub mod backup;
|
|||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod node;
|
pub mod node;
|
||||||
pub mod reader;
|
pub mod reader;
|
||||||
mod subscription;
|
|
||||||
pub mod status;
|
pub mod status;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
@ -26,7 +25,6 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
("subscription", &subscription::ROUTER),
|
|
||||||
("version", &version::ROUTER),
|
("version", &version::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -13,15 +13,22 @@ use crate::auth_helpers::*;
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||||
|
|
||||||
pub mod user;
|
pub mod user;
|
||||||
pub mod domain;
|
pub mod domain;
|
||||||
pub mod acl;
|
pub mod acl;
|
||||||
pub mod role;
|
pub mod role;
|
||||||
|
|
||||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
/// returns Ok(true) if a ticket has to be created
|
||||||
|
/// and Ok(false) if not
|
||||||
|
fn authenticate_user(
|
||||||
|
username: &str,
|
||||||
|
password: &str,
|
||||||
|
path: Option<String>,
|
||||||
|
privs: Option<String>,
|
||||||
|
port: Option<u16>,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&username) {
|
if !user_info.is_active_user(&username) {
|
||||||
@ -33,14 +40,43 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
|||||||
if password.starts_with("PBS:") {
|
if password.starts_with("PBS:") {
|
||||||
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
||||||
if ticket_username == username {
|
if ticket_username == username {
|
||||||
return Ok(());
|
return Ok(true);
|
||||||
} else {
|
} else {
|
||||||
bail!("ticket login failed - wrong username");
|
bail!("ticket login failed - wrong username");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if password.starts_with("PBSTERM:") {
|
||||||
|
if path.is_none() || privs.is_none() || port.is_none() {
|
||||||
|
bail!("cannot check termnal ticket without path, priv and port");
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = path.unwrap();
|
||||||
|
let privilege_name = privs.unwrap();
|
||||||
|
let port = port.unwrap();
|
||||||
|
|
||||||
|
if let Ok((_age, _data)) =
|
||||||
|
tools::ticket::verify_term_ticket(public_auth_key(), &username, &path, port, password)
|
||||||
|
{
|
||||||
|
for (name, privilege) in PRIVILEGES {
|
||||||
|
if *name == privilege_name {
|
||||||
|
let mut path_vec = Vec::new();
|
||||||
|
for part in path.split('/') {
|
||||||
|
if part != "" {
|
||||||
|
path_vec.push(part);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_info.check_privs(username, &path_vec, *privilege, false)?;
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("No such privilege");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crate::auth::authenticate_user(username, password)
|
let _ = crate::auth::authenticate_user(username, password)?;
|
||||||
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -52,6 +88,21 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
|||||||
password: {
|
password: {
|
||||||
schema: PASSWORD_SCHEMA,
|
schema: PASSWORD_SCHEMA,
|
||||||
},
|
},
|
||||||
|
path: {
|
||||||
|
type: String,
|
||||||
|
description: "Path for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
privs: {
|
||||||
|
type: String,
|
||||||
|
description: "Privilege for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
type: Integer,
|
||||||
|
description: "Port for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -78,11 +129,16 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
|||||||
/// Create or verify authentication ticket.
|
/// Create or verify authentication ticket.
|
||||||
///
|
///
|
||||||
/// Returns: An authentication ticket with additional infos.
|
/// Returns: An authentication ticket with additional infos.
|
||||||
fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
fn create_ticket(
|
||||||
match authenticate_user(&username, &password) {
|
username: String,
|
||||||
Ok(_) => {
|
password: String,
|
||||||
|
path: Option<String>,
|
||||||
let ticket = assemble_rsa_ticket( private_auth_key(), "PBS", Some(&username), None)?;
|
privs: Option<String>,
|
||||||
|
port: Option<u16>,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
match authenticate_user(&username, &password, path, privs, port) {
|
||||||
|
Ok(true) => {
|
||||||
|
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some(&username), None)?;
|
||||||
|
|
||||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
|
|
||||||
@ -94,6 +150,9 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
|||||||
"CSRFPreventionToken": token,
|
"CSRFPreventionToken": token,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
Ok(false) => Ok(json!({
|
||||||
|
"username": username,
|
||||||
|
})),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||||
|
@ -535,7 +535,7 @@ macro_rules! add_common_prune_prameters {
|
|||||||
|
|
||||||
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
||||||
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||||
PruneListItem::API_SCHEMA
|
&PruneListItem::API_SCHEMA
|
||||||
).schema();
|
).schema();
|
||||||
|
|
||||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||||
|
@ -6,7 +6,12 @@ use proxmox::http_err;
|
|||||||
|
|
||||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||||
let file = tokio::fs::File::open(path.clone())
|
let file = tokio::fs::File::open(path.clone())
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
.map_err(move |err| {
|
||||||
|
match err.kind() {
|
||||||
|
std::io::ErrorKind::NotFound => http_err!(NOT_FOUND, format!("open file {:?} failed - not found", path.clone())),
|
||||||
|
_ => http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)),
|
||||||
|
}
|
||||||
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
|
293
src/api2/node.rs
293
src/api2/node.rs
@ -1,18 +1,289 @@
|
|||||||
use proxmox::api::router::{Router, SubdirMap};
|
use std::net::TcpListener;
|
||||||
use proxmox::list_subdirs_api_method;
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
pub mod tasks;
|
use anyhow::{bail, format_err, Error};
|
||||||
mod time;
|
use futures::{
|
||||||
pub mod network;
|
future::{FutureExt, TryFutureExt},
|
||||||
|
select,
|
||||||
|
};
|
||||||
|
use hyper::body::Body;
|
||||||
|
use hyper::http::request::Parts;
|
||||||
|
use hyper::upgrade::Upgraded;
|
||||||
|
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||||
|
|
||||||
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
use proxmox::api::{
|
||||||
|
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||||
|
};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
use proxmox::tools::websocket::WebSocket;
|
||||||
|
use proxmox::{identity, sortable};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::config::acl::PRIV_SYS_CONSOLE;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
use crate::tools;
|
||||||
|
|
||||||
|
pub mod disks;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
mod syslog;
|
|
||||||
mod journal;
|
mod journal;
|
||||||
|
pub mod network;
|
||||||
|
pub(crate) mod rrd;
|
||||||
mod services;
|
mod services;
|
||||||
mod status;
|
mod status;
|
||||||
pub(crate) mod rrd;
|
mod subscription;
|
||||||
pub mod disks;
|
mod apt;
|
||||||
|
mod syslog;
|
||||||
|
pub mod tasks;
|
||||||
|
mod time;
|
||||||
|
|
||||||
|
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("login", "Login"),
|
||||||
|
EnumEntry::new("upgrade", "Upgrade"),
|
||||||
|
]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
cmd: {
|
||||||
|
schema: SHELL_CMD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: Object,
|
||||||
|
description: "Object with the user, ticket, port and upid",
|
||||||
|
properties: {
|
||||||
|
user: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
ticket: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Restricted to users on realm 'pam'",
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Call termproxy and return shell ticket
|
||||||
|
async fn termproxy(
|
||||||
|
cmd: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
let userid = rpcenv
|
||||||
|
.get_user()
|
||||||
|
.ok_or_else(|| format_err!("unknown user"))?;
|
||||||
|
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||||
|
|
||||||
|
if realm != "pam" {
|
||||||
|
bail!("only pam users can use the console");
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = "/system";
|
||||||
|
|
||||||
|
// use port 0 and let the kernel decide which port is free
|
||||||
|
let listener = TcpListener::bind("localhost:0")?;
|
||||||
|
let port = listener.local_addr()?.port();
|
||||||
|
|
||||||
|
let ticket = tools::ticket::assemble_term_ticket(
|
||||||
|
crate::auth_helpers::private_auth_key(),
|
||||||
|
&userid,
|
||||||
|
&path,
|
||||||
|
port,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut command = Vec::new();
|
||||||
|
match cmd.as_ref().map(|x| x.as_str()) {
|
||||||
|
Some("login") | None => {
|
||||||
|
command.push("login");
|
||||||
|
if userid == "root@pam" {
|
||||||
|
command.push("-f");
|
||||||
|
command.push("root");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some("upgrade") => {
|
||||||
|
if userid != "root@pam" {
|
||||||
|
bail!("only root@pam can upgrade");
|
||||||
|
}
|
||||||
|
// TODO: add nicer/safer wrapper like in PVE instead
|
||||||
|
command.push("sh");
|
||||||
|
command.push("-c");
|
||||||
|
command.push("apt full-upgrade; bash -l");
|
||||||
|
}
|
||||||
|
_ => bail!("invalid command"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let upid = WorkerTask::spawn(
|
||||||
|
"termproxy",
|
||||||
|
None,
|
||||||
|
&username,
|
||||||
|
false,
|
||||||
|
move |worker| async move {
|
||||||
|
// move inside the worker so that it survives and does not close the port
|
||||||
|
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
||||||
|
let fd = listener.as_raw_fd();
|
||||||
|
let mut flags = match fcntl(fd, FcntlArg::F_GETFD) {
|
||||||
|
Ok(bits) => FdFlag::from_bits_truncate(bits),
|
||||||
|
Err(err) => bail!("could not get fd: {}", err),
|
||||||
|
};
|
||||||
|
flags.remove(FdFlag::FD_CLOEXEC);
|
||||||
|
if let Err(err) = fcntl(fd, FcntlArg::F_SETFD(flags)) {
|
||||||
|
bail!("could not set fd: {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut arguments: Vec<&str> = Vec::new();
|
||||||
|
let fd_string = fd.to_string();
|
||||||
|
arguments.push(&fd_string);
|
||||||
|
arguments.extend_from_slice(&[
|
||||||
|
"--path",
|
||||||
|
&path,
|
||||||
|
"--perm",
|
||||||
|
"Sys.Console",
|
||||||
|
"--authport",
|
||||||
|
"82",
|
||||||
|
"--port-as-fd",
|
||||||
|
"--",
|
||||||
|
]);
|
||||||
|
arguments.extend_from_slice(&command);
|
||||||
|
|
||||||
|
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
|
||||||
|
|
||||||
|
cmd.args(&arguments)
|
||||||
|
.kill_on_drop(true)
|
||||||
|
.stdout(std::process::Stdio::piped())
|
||||||
|
.stderr(std::process::Stdio::piped());
|
||||||
|
|
||||||
|
let mut child = cmd.spawn().expect("error executing termproxy");
|
||||||
|
|
||||||
|
let stdout = child.stdout.take().expect("no child stdout handle");
|
||||||
|
let stderr = child.stderr.take().expect("no child stderr handle");
|
||||||
|
|
||||||
|
let worker_stdout = worker.clone();
|
||||||
|
let stdout_fut = async move {
|
||||||
|
let mut reader = BufReader::new(stdout).lines();
|
||||||
|
while let Some(line) = reader.next_line().await? {
|
||||||
|
worker_stdout.log(line);
|
||||||
|
}
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let worker_stderr = worker.clone();
|
||||||
|
let stderr_fut = async move {
|
||||||
|
let mut reader = BufReader::new(stderr).lines();
|
||||||
|
while let Some(line) = reader.next_line().await? {
|
||||||
|
worker_stderr.warn(line);
|
||||||
|
}
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
};
|
||||||
|
|
||||||
|
select!{
|
||||||
|
res = child.fuse() => {
|
||||||
|
let exit_code = res?;
|
||||||
|
if !exit_code.success() {
|
||||||
|
match exit_code.code() {
|
||||||
|
Some(code) => bail!("termproxy exited with {}", code),
|
||||||
|
None => bail!("termproxy exited by signal"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
res = stdout_fut.fuse() => res,
|
||||||
|
res = stderr_fut.fuse() => res,
|
||||||
|
res = worker.abort_future().fuse() => res.map_err(Error::from),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(json!({
|
||||||
|
"user": username,
|
||||||
|
"ticket": ticket,
|
||||||
|
"port": port,
|
||||||
|
"upid": upid,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_WEBSOCKET: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::AsyncHttp(&upgrade_to_websocket),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Upgraded to websocket",
|
||||||
|
&sorted!([
|
||||||
|
("node", false, &NODE_SCHEMA),
|
||||||
|
(
|
||||||
|
"vncticket",
|
||||||
|
false,
|
||||||
|
&StringSchema::new("Terminal ticket").schema()
|
||||||
|
),
|
||||||
|
("port", false, &IntegerSchema::new("Terminal port").schema()),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.access(
|
||||||
|
Some("The user needs Sys.Console on /system."),
|
||||||
|
&Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||||
|
);
|
||||||
|
|
||||||
|
fn upgrade_to_websocket(
|
||||||
|
parts: Parts,
|
||||||
|
req_body: Body,
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: Box<dyn RpcEnvironment>,
|
||||||
|
) -> ApiResponseFuture {
|
||||||
|
async move {
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let ticket = tools::required_string_param(¶m, "vncticket")?.to_owned();
|
||||||
|
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||||
|
|
||||||
|
// will be checked again by termproxy
|
||||||
|
tools::ticket::verify_term_ticket(
|
||||||
|
crate::auth_helpers::public_auth_key(),
|
||||||
|
&username,
|
||||||
|
&"/system",
|
||||||
|
port,
|
||||||
|
&ticket,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (ws, response) = WebSocket::new(parts.headers)?;
|
||||||
|
|
||||||
|
crate::server::spawn_internal_task(async move {
|
||||||
|
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
|
||||||
|
Ok(upgraded) => upgraded,
|
||||||
|
_ => bail!("error"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let local = tokio::net::TcpStream::connect(format!("localhost:{}", port)).await?;
|
||||||
|
ws.serve_connection(conn, local).await
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
pub const SUBDIRS: SubdirMap = &[
|
||||||
|
("apt", &apt::ROUTER),
|
||||||
("disks", &disks::ROUTER),
|
("disks", &disks::ROUTER),
|
||||||
("dns", &dns::ROUTER),
|
("dns", &dns::ROUTER),
|
||||||
("journal", &journal::ROUTER),
|
("journal", &journal::ROUTER),
|
||||||
@ -20,9 +291,15 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("rrd", &rrd::ROUTER),
|
("rrd", &rrd::ROUTER),
|
||||||
("services", &services::ROUTER),
|
("services", &services::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
|
("subscription", &subscription::ROUTER),
|
||||||
("syslog", &syslog::ROUTER),
|
("syslog", &syslog::ROUTER),
|
||||||
("tasks", &tasks::ROUTER),
|
("tasks", &tasks::ROUTER),
|
||||||
|
("termproxy", &Router::new().post(&API_METHOD_TERMPROXY)),
|
||||||
("time", &time::ROUTER),
|
("time", &time::ROUTER),
|
||||||
|
(
|
||||||
|
"vncwebsocket",
|
||||||
|
&Router::new().upgrade(&API_METHOD_WEBSOCKET),
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
268
src/api2/node/apt.rs
Normal file
268
src/api2/node/apt.rs
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
use apt_pkg_native::Cache;
|
||||||
|
use anyhow::{Error, bail};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::{list_subdirs_api_method, const_regex};
|
||||||
|
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||||
|
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
||||||
|
// not possible as our packages do not have a URI set in their Release file
|
||||||
|
fn get_changelog_url(
|
||||||
|
package: &str,
|
||||||
|
filename: &str,
|
||||||
|
source_pkg: &str,
|
||||||
|
version: &str,
|
||||||
|
source_version: &str,
|
||||||
|
origin: &str,
|
||||||
|
component: &str,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
if origin == "" {
|
||||||
|
bail!("no origin available for package {}", package);
|
||||||
|
}
|
||||||
|
|
||||||
|
if origin == "Debian" {
|
||||||
|
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
||||||
|
|
||||||
|
let prefix = if source_pkg.starts_with("lib") {
|
||||||
|
source_pkg.get(0..4)
|
||||||
|
} else {
|
||||||
|
source_pkg.get(0..1)
|
||||||
|
};
|
||||||
|
|
||||||
|
let prefix = match prefix {
|
||||||
|
Some(p) => p,
|
||||||
|
None => bail!("cannot get starting characters of package name '{}'", package)
|
||||||
|
};
|
||||||
|
|
||||||
|
// note: security updates seem to not always upload a changelog for
|
||||||
|
// their package version, so this only works *most* of the time
|
||||||
|
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
||||||
|
prefix, source_pkg, source_pkg, source_version));
|
||||||
|
|
||||||
|
} else if origin == "Proxmox" {
|
||||||
|
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||||
|
|
||||||
|
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||||
|
Some(captures) => {
|
||||||
|
let base_capture = captures.get(1);
|
||||||
|
match base_capture {
|
||||||
|
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
||||||
|
None => bail!("incompatible filename, cannot find regex group")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => bail!("incompatible filename, doesn't match regex")
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
|
||||||
|
base, package, version));
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||||
|
-> Vec<APTUpdateInfo> {
|
||||||
|
|
||||||
|
let mut ret = Vec::new();
|
||||||
|
|
||||||
|
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||||
|
let mut cache = Cache::get_singleton();
|
||||||
|
cache.reload();
|
||||||
|
|
||||||
|
let mut cache_iter = cache.iter();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let view = match cache_iter.next() {
|
||||||
|
Some(view) => view,
|
||||||
|
None => break
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_version = match view.current_version() {
|
||||||
|
Some(vers) => vers,
|
||||||
|
None => continue
|
||||||
|
};
|
||||||
|
let candidate_version = match view.candidate_version() {
|
||||||
|
Some(vers) => vers,
|
||||||
|
// if there's no candidate (i.e. no update) get info of currently
|
||||||
|
// installed version instead
|
||||||
|
None => current_version.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let package = view.name();
|
||||||
|
if filter(&package, ¤t_version, &candidate_version) {
|
||||||
|
let mut origin_res = "unknown".to_owned();
|
||||||
|
let mut section_res = "unknown".to_owned();
|
||||||
|
let mut priority_res = "unknown".to_owned();
|
||||||
|
let mut change_log_url = "".to_owned();
|
||||||
|
let mut short_desc = package.clone();
|
||||||
|
let mut long_desc = "".to_owned();
|
||||||
|
|
||||||
|
// get additional information via nested APT 'iterators'
|
||||||
|
let mut view_iter = view.versions();
|
||||||
|
while let Some(ver) = view_iter.next() {
|
||||||
|
if ver.version() == candidate_version {
|
||||||
|
if let Some(section) = ver.section() {
|
||||||
|
section_res = section;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(prio) = ver.priority_type() {
|
||||||
|
priority_res = prio;
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume every package has only one origin file (not
|
||||||
|
// origin, but origin *file*, for some reason those seem to
|
||||||
|
// be different concepts in APT)
|
||||||
|
let mut origin_iter = ver.origin_iter();
|
||||||
|
let origin = origin_iter.next();
|
||||||
|
if let Some(origin) = origin {
|
||||||
|
|
||||||
|
if let Some(sd) = origin.short_desc() {
|
||||||
|
short_desc = sd;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ld) = origin.long_desc() {
|
||||||
|
long_desc = ld;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the package files appear in priority order, meaning
|
||||||
|
// the one for the candidate version is first
|
||||||
|
let mut pkg_iter = origin.file();
|
||||||
|
let pkg_file = pkg_iter.next();
|
||||||
|
if let Some(pkg_file) = pkg_file {
|
||||||
|
if let Some(origin_name) = pkg_file.origin() {
|
||||||
|
origin_res = origin_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
let filename = pkg_file.file_name();
|
||||||
|
let source_pkg = ver.source_package();
|
||||||
|
let source_ver = ver.source_version();
|
||||||
|
let component = pkg_file.component();
|
||||||
|
|
||||||
|
// build changelog URL from gathered information
|
||||||
|
// ignore errors, use empty changelog instead
|
||||||
|
let url = get_changelog_url(&package, &filename, &source_pkg,
|
||||||
|
&candidate_version, &source_ver, &origin_res, &component);
|
||||||
|
if let Ok(url) = url {
|
||||||
|
change_log_url = url;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = APTUpdateInfo {
|
||||||
|
package,
|
||||||
|
title: short_desc,
|
||||||
|
arch: view.arch(),
|
||||||
|
description: long_desc,
|
||||||
|
change_log_url,
|
||||||
|
origin: origin_res,
|
||||||
|
version: candidate_version,
|
||||||
|
old_version: current_version,
|
||||||
|
priority: priority_res,
|
||||||
|
section: section_res,
|
||||||
|
};
|
||||||
|
ret.push(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "A list of packages with available updates.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: APTUpdateInfo },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List available APT updates
|
||||||
|
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||||
|
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
||||||
|
Ok(json!(ret))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
quiet: {
|
||||||
|
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||||
|
type: bool,
|
||||||
|
default: false,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the APT database
|
||||||
|
pub fn apt_update_database(
|
||||||
|
quiet: Option<bool>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread("aptupdate", None, &username.clone(), to_stdout, move |worker| {
|
||||||
|
if !quiet { worker.log("starting apt-get update") }
|
||||||
|
|
||||||
|
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("apt-get");
|
||||||
|
command.arg("update");
|
||||||
|
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
if !quiet { worker.log(output) }
|
||||||
|
|
||||||
|
// TODO: add mail notify for new updates like PVE
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("update", &Router::new()
|
||||||
|
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||||
|
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
@ -41,6 +41,9 @@ pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
|||||||
.default(12)
|
.default(12)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const ZPOOL_NAME_SCHEMA: Schema =StringSchema::new("ZFS Pool Name")
|
||||||
|
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
default: "On",
|
default: "On",
|
||||||
@ -157,7 +160,7 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
|||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
},
|
},
|
||||||
name: {
|
name: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: ZPOOL_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -10,6 +10,7 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
|||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||||
|
use crate::tools::cert::CertInfo;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -46,14 +47,24 @@ use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
|||||||
description: "Total CPU usage since last query.",
|
description: "Total CPU usage since last query.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
}
|
info: {
|
||||||
|
type: Object,
|
||||||
|
description: "contains node information",
|
||||||
|
properties: {
|
||||||
|
fingerprint: {
|
||||||
|
description: "The SSL Fingerprint",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read node memory, CPU and (root) disk usage
|
/// Read node memory, CPU and (root) disk usage
|
||||||
fn get_usage(
|
fn get_status(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -63,6 +74,10 @@ fn get_usage(
|
|||||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||||
|
|
||||||
|
// get fingerprint
|
||||||
|
let cert = CertInfo::new()?;
|
||||||
|
let fp = cert.fingerprint()?;
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"memory": {
|
"memory": {
|
||||||
"total": meminfo.memtotal,
|
"total": meminfo.memtotal,
|
||||||
@ -74,7 +89,10 @@ fn get_usage(
|
|||||||
"total": disk_usage.total,
|
"total": disk_usage.total,
|
||||||
"used": disk_usage.used,
|
"used": disk_usage.used,
|
||||||
"free": disk_usage.avail,
|
"free": disk_usage.avail,
|
||||||
}
|
},
|
||||||
|
"info": {
|
||||||
|
"fingerprint": fp,
|
||||||
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,5 +140,5 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_GET_USAGE)
|
.get(&API_METHOD_GET_STATUS)
|
||||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||||
|
@ -5,8 +5,16 @@ use proxmox::api::{api, Router, Permission};
|
|||||||
|
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||||
|
use crate::api2::types::NODE_SCHEMA;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "Subscription status.",
|
description: "Subscription status.",
|
||||||
properties: {
|
properties: {
|
@ -161,6 +161,8 @@ fn datastore_status(
|
|||||||
if b != 0.0 {
|
if b != 0.0 {
|
||||||
let estimate = (1.0 - a) / b;
|
let estimate = (1.0 - a) / b;
|
||||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||||
|
} else {
|
||||||
|
entry["estimated-full-date"] = Value::from(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,6 +78,8 @@ const_regex!{
|
|||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
|
||||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
|
|
||||||
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -960,3 +962,30 @@ pub enum RRDTimeFrameResolution {
|
|||||||
/// 1 week => last 490 days
|
/// 1 week => last 490 days
|
||||||
Year = 60*10080,
|
Year = 60*10080,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
/// Describes a package for which an update is available.
|
||||||
|
pub struct APTUpdateInfo {
|
||||||
|
/// Package name
|
||||||
|
pub package: String,
|
||||||
|
/// Package title
|
||||||
|
pub title: String,
|
||||||
|
/// Package architecture
|
||||||
|
pub arch: String,
|
||||||
|
/// Human readable package description
|
||||||
|
pub description: String,
|
||||||
|
/// New version to be updated to
|
||||||
|
pub version: String,
|
||||||
|
/// Old version currently installed
|
||||||
|
pub old_version: String,
|
||||||
|
/// Package origin
|
||||||
|
pub origin: String,
|
||||||
|
/// Package priority in human-readable form
|
||||||
|
pub priority: String,
|
||||||
|
/// Package section
|
||||||
|
pub section: String,
|
||||||
|
/// URL under which the package's changelog can be retrieved
|
||||||
|
pub change_log_url: String,
|
||||||
|
}
|
||||||
|
@ -40,21 +40,21 @@
|
|||||||
//!
|
//!
|
||||||
//! Acquire shared lock for ChunkStore (process wide).
|
//! Acquire shared lock for ChunkStore (process wide).
|
||||||
//!
|
//!
|
||||||
//! Note: When creating .idx files, we create temporary (.tmp) file,
|
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
||||||
//! then do an atomic rename ...
|
//! then do an atomic rename ...
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! * Garbage Collect:
|
//! * Garbage Collect:
|
||||||
//!
|
//!
|
||||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
||||||
//! already an shared lock for ChunkStore, try to updraged that
|
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||||
//! lock.
|
//! lock.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! * Server Restart
|
//! * Server Restart
|
||||||
//!
|
//!
|
||||||
//! Try to abort running garbage collection to release exclusive
|
//! Try to abort the running garbage collection to release exclusive
|
||||||
//! ChunkStore lock asap. Start new service with existing listening
|
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
||||||
//! socket.
|
//! socket.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
@ -62,10 +62,10 @@
|
|||||||
//!
|
//!
|
||||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
//! Deleting backups is as easy as deleting the corresponding .idx
|
||||||
//! files. Unfortunately, this does not free up any storage, because
|
//! files. Unfortunately, this does not free up any storage, because
|
||||||
//! those files just contains references to chunks.
|
//! those files just contain references to chunks.
|
||||||
//!
|
//!
|
||||||
//! To free up some storage, we run a garbage collection process at
|
//! To free up some storage, we run a garbage collection process at
|
||||||
//! regular intervals. The collector uses an mark and sweep
|
//! regular intervals. The collector uses a mark and sweep
|
||||||
//! approach. In the first phase, it scans all .idx files to mark used
|
//! approach. In the first phase, it scans all .idx files to mark used
|
||||||
//! chunks. The second phase then removes all unmarked chunks from the
|
//! chunks. The second phase then removes all unmarked chunks from the
|
||||||
//! store.
|
//! store.
|
||||||
@ -90,12 +90,12 @@
|
|||||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
//! amount of time ago (by default 24h). So we may only delete chunks
|
||||||
//! with `atime` older than 24 hours.
|
//! with `atime` older than 24 hours.
|
||||||
//!
|
//!
|
||||||
//! Another problem arise from running backups. The mark phase does
|
//! Another problem arises from running backups. The mark phase does
|
||||||
//! not find any chunks from those backups, because there is no .idx
|
//! not find any chunks from those backups, because there is no .idx
|
||||||
//! file for them (created after the backup). Chunks created or
|
//! file for them (created after the backup). Chunks created or
|
||||||
//! touched by those backups may have an `atime` as old as the start
|
//! touched by those backups may have an `atime` as old as the start
|
||||||
//! time of those backup. Please not that the backup start time may
|
//! time of those backups. Please note that the backup start time may
|
||||||
//! predate the GC start time. Se we may only delete chunk older than
|
//! predate the GC start time. So we may only delete chunks older than
|
||||||
//! the start time of those running backup jobs.
|
//! the start time of those running backup jobs.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
|
@ -1,30 +1,35 @@
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::task::{Poll, Context};
|
use std::task::{Poll, Context};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::io::SeekFrom;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
use futures::ready;
|
use futures::ready;
|
||||||
use tokio::io::AsyncRead;
|
use tokio::io::{AsyncRead, AsyncSeek};
|
||||||
|
|
||||||
use proxmox::sys::error::io_err_other;
|
use proxmox::sys::error::io_err_other;
|
||||||
use proxmox::io_format_err;
|
use proxmox::io_format_err;
|
||||||
|
|
||||||
use super::IndexFile;
|
use super::IndexFile;
|
||||||
use super::read_chunk::AsyncReadChunk;
|
use super::read_chunk::AsyncReadChunk;
|
||||||
|
use super::index::ChunkReadInfo;
|
||||||
|
|
||||||
enum AsyncIndexReaderState<S> {
|
enum AsyncIndexReaderState<S> {
|
||||||
NoData,
|
NoData,
|
||||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||||
HaveData(usize),
|
HaveData,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||||
store: Option<S>,
|
store: Option<S>,
|
||||||
index: I,
|
index: I,
|
||||||
read_buffer: Vec<u8>,
|
read_buffer: Vec<u8>,
|
||||||
|
current_chunk_offset: u64,
|
||||||
current_chunk_idx: usize,
|
current_chunk_idx: usize,
|
||||||
current_chunk_digest: [u8; 32],
|
current_chunk_info: Option<ChunkReadInfo>,
|
||||||
|
position: u64,
|
||||||
|
seek_to_pos: i64,
|
||||||
state: AsyncIndexReaderState<S>,
|
state: AsyncIndexReaderState<S>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,8 +42,11 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
store: Some(store),
|
store: Some(store),
|
||||||
index,
|
index,
|
||||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||||
|
current_chunk_offset: 0,
|
||||||
current_chunk_idx: 0,
|
current_chunk_idx: 0,
|
||||||
current_chunk_digest: [0u8; 32],
|
current_chunk_info: None,
|
||||||
|
position: 0,
|
||||||
|
seek_to_pos: 0,
|
||||||
state: AsyncIndexReaderState::NoData,
|
state: AsyncIndexReaderState::NoData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -58,23 +66,41 @@ where
|
|||||||
loop {
|
loop {
|
||||||
match &mut this.state {
|
match &mut this.state {
|
||||||
AsyncIndexReaderState::NoData => {
|
AsyncIndexReaderState::NoData => {
|
||||||
if this.current_chunk_idx >= this.index.index_count() {
|
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
||||||
|
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
||||||
|
{
|
||||||
|
// optimization for sequential chunk read
|
||||||
|
let next_idx = this.current_chunk_idx + 1;
|
||||||
|
(next_idx, 0)
|
||||||
|
} else {
|
||||||
|
match this.index.chunk_from_offset(this.position) {
|
||||||
|
Some(res) => res,
|
||||||
|
None => return Poll::Ready(Ok(0))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if idx >= this.index.index_count() {
|
||||||
return Poll::Ready(Ok(0));
|
return Poll::Ready(Ok(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
let digest = this
|
let info = this
|
||||||
.index
|
.index
|
||||||
.index_digest(this.current_chunk_idx)
|
.chunk_info(idx)
|
||||||
.ok_or(io_format_err!("could not get digest"))?
|
.ok_or(io_format_err!("could not get digest"))?;
|
||||||
.clone();
|
|
||||||
|
|
||||||
if digest == this.current_chunk_digest {
|
this.current_chunk_offset = offset;
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.current_chunk_idx = idx;
|
||||||
continue;
|
let old_info = this.current_chunk_info.replace(info.clone());
|
||||||
|
|
||||||
|
if let Some(old_info) = old_info {
|
||||||
|
if old_info.digest == info.digest {
|
||||||
|
// hit, chunk is currently in cache
|
||||||
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.current_chunk_digest = digest;
|
// miss, need to download new chunk
|
||||||
|
|
||||||
let store = match this.store.take() {
|
let store = match this.store.take() {
|
||||||
Some(store) => store,
|
Some(store) => store,
|
||||||
None => {
|
None => {
|
||||||
@ -83,7 +109,7 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
let future = async move {
|
let future = async move {
|
||||||
store.read_chunk(&digest)
|
store.read_chunk(&info.digest)
|
||||||
.await
|
.await
|
||||||
.map(move |x| (store, x))
|
.map(move |x| (store, x))
|
||||||
};
|
};
|
||||||
@ -95,7 +121,7 @@ where
|
|||||||
Ok((store, mut chunk_data)) => {
|
Ok((store, mut chunk_data)) => {
|
||||||
this.read_buffer.clear();
|
this.read_buffer.clear();
|
||||||
this.read_buffer.append(&mut chunk_data);
|
this.read_buffer.append(&mut chunk_data);
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
this.store = Some(store);
|
this.store = Some(store);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@ -103,8 +129,8 @@ where
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
AsyncIndexReaderState::HaveData(offset) => {
|
AsyncIndexReaderState::HaveData => {
|
||||||
let offset = *offset;
|
let offset = this.current_chunk_offset as usize;
|
||||||
let len = this.read_buffer.len();
|
let len = this.read_buffer.len();
|
||||||
let n = if len - offset < buf.len() {
|
let n = if len - offset < buf.len() {
|
||||||
len - offset
|
len - offset
|
||||||
@ -113,11 +139,13 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||||
|
this.position += n as u64;
|
||||||
|
|
||||||
if offset + n == len {
|
if offset + n == len {
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
this.current_chunk_idx += 1;
|
|
||||||
} else {
|
} else {
|
||||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
this.current_chunk_offset += n as u64;
|
||||||
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready(Ok(n));
|
return Poll::Ready(Ok(n));
|
||||||
@ -126,3 +154,51 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
||||||
|
where
|
||||||
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin,
|
||||||
|
{
|
||||||
|
fn start_seek(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
pos: SeekFrom,
|
||||||
|
) -> Poll<tokio::io::Result<()>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
this.seek_to_pos = match pos {
|
||||||
|
SeekFrom::Start(offset) => {
|
||||||
|
offset as i64
|
||||||
|
},
|
||||||
|
SeekFrom::End(offset) => {
|
||||||
|
this.index.index_bytes() as i64 + offset
|
||||||
|
},
|
||||||
|
SeekFrom::Current(offset) => {
|
||||||
|
this.position as i64 + offset
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
) -> Poll<tokio::io::Result<u64>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
|
||||||
|
let index_bytes = this.index.index_bytes();
|
||||||
|
if this.seek_to_pos < 0 {
|
||||||
|
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
||||||
|
} else if this.seek_to_pos > index_bytes as i64 {
|
||||||
|
this.position = index_bytes;
|
||||||
|
} else {
|
||||||
|
this.position = this.seek_to_pos as u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
// even if seeking within one chunk, we need to go to NoData to
|
||||||
|
// recalculate the current_chunk_offset (data is cached anyway)
|
||||||
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
|
|
||||||
|
Poll::Ready(Ok(this.position))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -106,7 +106,11 @@ impl BackupGroup {
|
|||||||
|
|
||||||
use nix::fcntl::{openat, OFlag};
|
use nix::fcntl::{openat, OFlag};
|
||||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
||||||
Ok(_) => { /* manifest exists --> assume backup was successful */ },
|
Ok(rawfd) => {
|
||||||
|
/* manifest exists --> assume backup was successful */
|
||||||
|
/* close else this leaks! */
|
||||||
|
nix::unistd::close(rawfd)?;
|
||||||
|
},
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("last_successful_backup: unexpected error - {}", err);
|
bail!("last_successful_backup: unexpected error - {}", err);
|
||||||
|
@ -89,6 +89,10 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
|||||||
"find",
|
"find",
|
||||||
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
||||||
)
|
)
|
||||||
|
.insert(
|
||||||
|
"exit",
|
||||||
|
CliCommand::new(&API_METHOD_EXIT),
|
||||||
|
)
|
||||||
.insert_help(),
|
.insert_help(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -104,6 +108,14 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// just an empty wrapper so that it is displayed in help/docs, we check
|
||||||
|
// in the readloop for 'exit' again break
|
||||||
|
#[api(input: { properties: {} })]
|
||||||
|
/// Exit the shell
|
||||||
|
async fn exit() -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[api(input: { properties: {} })]
|
#[api(input: { properties: {} })]
|
||||||
/// List the current working directory.
|
/// List the current working directory.
|
||||||
async fn pwd_command() -> Result<(), Error> {
|
async fn pwd_command() -> Result<(), Error> {
|
||||||
@ -439,6 +451,9 @@ impl Shell {
|
|||||||
SHELL = Some(this as *mut Shell as usize);
|
SHELL = Some(this as *mut Shell as usize);
|
||||||
}
|
}
|
||||||
while let Ok(line) = this.rl.readline(&this.prompt) {
|
while let Ok(line) = this.rl.readline(&this.prompt) {
|
||||||
|
if line == "exit" {
|
||||||
|
break;
|
||||||
|
}
|
||||||
let helper = this.rl.helper().unwrap();
|
let helper = this.rl.helper().unwrap();
|
||||||
let args = match cli::shellword_split(&line) {
|
let args = match cli::shellword_split(&line) {
|
||||||
Ok(args) => args,
|
Ok(args) => args,
|
||||||
|
@ -80,8 +80,9 @@ impl ChunkStore {
|
|||||||
|
|
||||||
let default_options = CreateOptions::new();
|
let default_options = CreateOptions::new();
|
||||||
|
|
||||||
if let Err(err) = create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||||
bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
|
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||||
|
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
||||||
@ -177,7 +178,7 @@ impl ChunkStore {
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
|
bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
|
@ -5,15 +5,15 @@
|
|||||||
/// use hash value 0 to detect a boundary.
|
/// use hash value 0 to detect a boundary.
|
||||||
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||||
|
|
||||||
/// Slinding window chunker (Buzhash)
|
/// Sliding window chunker (Buzhash)
|
||||||
///
|
///
|
||||||
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
||||||
///
|
///
|
||||||
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
||||||
/// of avoiding multiplications, using barrel shifts instead. For more
|
/// of avoiding multiplications, using barrel shifts instead. For more
|
||||||
/// information please take a look at the [Rolling
|
/// information please take a look at the [Rolling
|
||||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) artikel from
|
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from
|
||||||
/// wikipedia.
|
/// Wikipedia.
|
||||||
|
|
||||||
pub struct Chunker {
|
pub struct Chunker {
|
||||||
h: u32,
|
h: u32,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::io::{Read, BufReader};
|
use std::io::{Read, BufReader};
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
@ -40,23 +40,25 @@ impl <R: Read> DataBlobReader<R> {
|
|||||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||||
}
|
}
|
||||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||||
|
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
let expected_crc = u32::from_le_bytes(head.crc);
|
||||||
let mut iv = [0u8; 16];
|
let mut iv = [0u8; 16];
|
||||||
let mut expected_tag = [0u8; 16];
|
let mut expected_tag = [0u8; 16];
|
||||||
reader.read_exact(&mut iv)?;
|
reader.read_exact(&mut iv)?;
|
||||||
reader.read_exact(&mut expected_tag)?;
|
reader.read_exact(&mut expected_tag)?;
|
||||||
let csum_reader = ChecksumReader::new(reader, None);
|
let csum_reader = ChecksumReader::new(reader, None);
|
||||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||||
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
||||||
}
|
}
|
||||||
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
||||||
|
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
let expected_crc = u32::from_le_bytes(head.crc);
|
||||||
let mut iv = [0u8; 16];
|
let mut iv = [0u8; 16];
|
||||||
let mut expected_tag = [0u8; 16];
|
let mut expected_tag = [0u8; 16];
|
||||||
reader.read_exact(&mut iv)?;
|
reader.read_exact(&mut iv)?;
|
||||||
reader.read_exact(&mut expected_tag)?;
|
reader.read_exact(&mut expected_tag)?;
|
||||||
let csum_reader = ChecksumReader::new(reader, None);
|
let csum_reader = ChecksumReader::new(reader, None);
|
||||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||||
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
||||||
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ impl DataStore {
|
|||||||
self.chunk_store.base_path()
|
self.chunk_store.base_path()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clenaup a backup directory
|
/// Cleanup a backup directory
|
||||||
///
|
///
|
||||||
/// Removes all files not mentioned in the manifest.
|
/// Removes all files not mentioned in the manifest.
|
||||||
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
||||||
@ -340,9 +340,30 @@ impl DataStore {
|
|||||||
.map(|s| s.starts_with("."))
|
.map(|s| s.starts_with("."))
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
let handle_entry_err = |err: walkdir::Error| {
|
||||||
|
if let Some(inner) = err.io_error() {
|
||||||
|
let path = err.path().unwrap_or(Path::new(""));
|
||||||
|
match inner.kind() {
|
||||||
|
io::ErrorKind::PermissionDenied => {
|
||||||
|
// only allow to skip ext4 fsck directory, avoid GC if, for example,
|
||||||
|
// a user got file permissions wrong on datastore rsync to new server
|
||||||
|
if err.depth() > 1 || !path.ends_with("lost+found") {
|
||||||
|
bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
||||||
let path = entry?.into_path();
|
let path = match entry {
|
||||||
|
Ok(entry) => entry.into_path(),
|
||||||
|
Err(err) => {
|
||||||
|
handle_entry_err(err)?;
|
||||||
|
continue
|
||||||
|
},
|
||||||
|
};
|
||||||
if let Ok(archive_type) = archive_type(&path) {
|
if let Ok(archive_type) = archive_type(&path) {
|
||||||
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
||||||
list.push(path);
|
list.push(path);
|
||||||
|
@ -216,6 +216,24 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
digest: self.index[pos].digest.clone(),
|
digest: self.index[pos].digest.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||||
|
let end_idx = self.index.len() - 1;
|
||||||
|
let end = self.chunk_end(end_idx);
|
||||||
|
let found_idx = self.binary_search(0, 0, end_idx, end, offset);
|
||||||
|
let found_idx = match found_idx {
|
||||||
|
Ok(i) => i,
|
||||||
|
Err(_) => return None
|
||||||
|
};
|
||||||
|
|
||||||
|
let found_start = if found_idx == 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
self.chunk_end(found_idx - 1)
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((found_idx, offset - found_start))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CachedChunk {
|
struct CachedChunk {
|
||||||
|
@ -13,7 +13,6 @@ use std::os::unix::io::AsRawFd;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::read_chunk::*;
|
|
||||||
use super::ChunkInfo;
|
use super::ChunkInfo;
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
@ -146,20 +145,6 @@ impl FixedIndexReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
panic!("chunk index out of range");
|
|
||||||
}
|
|
||||||
|
|
||||||
let end = ((pos + 1) * self.chunk_size) as u64;
|
|
||||||
if end > self.size {
|
|
||||||
self.size
|
|
||||||
} else {
|
|
||||||
end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_info(&self) {
|
pub fn print_info(&self) {
|
||||||
println!("Size: {}", self.size);
|
println!("Size: {}", self.size);
|
||||||
println!("ChunkSize: {}", self.chunk_size);
|
println!("ChunkSize: {}", self.chunk_size);
|
||||||
@ -219,6 +204,17 @@ impl IndexFile for FixedIndexReader {
|
|||||||
|
|
||||||
(csum, chunk_end)
|
(csum, chunk_end)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||||
|
if offset >= self.size {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((
|
||||||
|
(offset / self.chunk_size as u64) as usize,
|
||||||
|
offset & (self.chunk_size - 1) as u64 // fast modulo, valid for 2^x chunk_size
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FixedIndexWriter {
|
pub struct FixedIndexWriter {
|
||||||
@ -465,142 +461,3 @@ impl FixedIndexWriter {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BufferedFixedReader<S> {
|
|
||||||
store: S,
|
|
||||||
index: FixedIndexReader,
|
|
||||||
archive_size: u64,
|
|
||||||
read_buffer: Vec<u8>,
|
|
||||||
buffered_chunk_idx: usize,
|
|
||||||
buffered_chunk_start: u64,
|
|
||||||
read_offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> BufferedFixedReader<S> {
|
|
||||||
pub fn new(index: FixedIndexReader, store: S) -> Self {
|
|
||||||
let archive_size = index.size;
|
|
||||||
Self {
|
|
||||||
store,
|
|
||||||
index,
|
|
||||||
archive_size,
|
|
||||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
|
||||||
buffered_chunk_idx: 0,
|
|
||||||
buffered_chunk_start: 0,
|
|
||||||
read_offset: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn archive_size(&self) -> u64 {
|
|
||||||
self.archive_size
|
|
||||||
}
|
|
||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
|
||||||
let index = &self.index;
|
|
||||||
let info = match index.chunk_info(idx) {
|
|
||||||
Some(info) => info,
|
|
||||||
None => bail!("chunk index out of range"),
|
|
||||||
};
|
|
||||||
|
|
||||||
// fixme: avoid copy
|
|
||||||
|
|
||||||
let data = self.store.read_chunk(&info.digest)?;
|
|
||||||
let size = info.range.end - info.range.start;
|
|
||||||
if size != data.len() as u64 {
|
|
||||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.read_buffer.clear();
|
|
||||||
self.read_buffer.extend_from_slice(&data);
|
|
||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
|
||||||
|
|
||||||
self.buffered_chunk_start = info.range.start as u64;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedFixedReader<S> {
|
|
||||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
|
||||||
if offset == self.archive_size {
|
|
||||||
return Ok(&self.read_buffer[0..0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_len = self.read_buffer.len();
|
|
||||||
let index = &self.index;
|
|
||||||
|
|
||||||
// optimization for sequential read
|
|
||||||
if buffer_len > 0
|
|
||||||
&& ((self.buffered_chunk_idx + 1) < index.index_length)
|
|
||||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let next_idx = self.buffered_chunk_idx + 1;
|
|
||||||
let next_end = index.chunk_end(next_idx);
|
|
||||||
if offset < next_end {
|
|
||||||
self.buffer_chunk(next_idx)?;
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
return Ok(&self.read_buffer[buffer_offset..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buffer_len == 0)
|
|
||||||
|| (offset < self.buffered_chunk_start)
|
|
||||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let idx = (offset / index.chunk_size as u64) as usize;
|
|
||||||
self.buffer_chunk(idx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
Ok(&self.read_buffer[buffer_offset..])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> std::io::Read for BufferedFixedReader<S> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
|
||||||
use crate::tools::BufferedRead;
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
|
|
||||||
let data = match self.buffered_read(self.read_offset) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let n = if data.len() > buf.len() {
|
|
||||||
buf.len()
|
|
||||||
} else {
|
|
||||||
data.len()
|
|
||||||
};
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.read_offset += n as u64;
|
|
||||||
|
|
||||||
Ok(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> Seek for BufferedFixedReader<S> {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
|
||||||
let new_offset = match pos {
|
|
||||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
|
||||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
|
||||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
|
||||||
};
|
|
||||||
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
|
||||||
return Err(Error::new(
|
|
||||||
ErrorKind::Other,
|
|
||||||
format!(
|
|
||||||
"seek is out of range {} ([0..{}])",
|
|
||||||
new_offset, self.archive_size
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
self.read_offset = new_offset as u64;
|
|
||||||
|
|
||||||
Ok(self.read_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct ChunkReadInfo {
|
pub struct ChunkReadInfo {
|
||||||
pub range: Range<u64>,
|
pub range: Range<u64>,
|
||||||
pub digest: [u8; 32],
|
pub digest: [u8; 32],
|
||||||
@ -22,6 +23,9 @@ pub trait IndexFile {
|
|||||||
fn index_bytes(&self) -> u64;
|
fn index_bytes(&self) -> u64;
|
||||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||||
|
|
||||||
|
/// Get the chunk index and the relative offset within it for a byte offset
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||||
|
|
||||||
/// Compute index checksum and size
|
/// Compute index checksum and size
|
||||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||||
|
|
||||||
|
@ -35,10 +35,14 @@ mod hex_csum {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn crypt_mode_none() -> CryptMode { CryptMode::None }
|
||||||
|
fn empty_value() -> Value { json!({}) }
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all="kebab-case")]
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
|
#[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
|
||||||
pub crypt_mode: CryptMode,
|
pub crypt_mode: CryptMode,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
#[serde(with = "hex_csum")]
|
#[serde(with = "hex_csum")]
|
||||||
@ -52,6 +56,7 @@ pub struct BackupManifest {
|
|||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
files: Vec<FileInfo>,
|
files: Vec<FileInfo>,
|
||||||
|
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||||
pub unprotected: Value,
|
pub unprotected: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +104,7 @@ impl BackupManifest {
|
|||||||
&self.files[..]
|
&self.files[..]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||||
|
|
||||||
let info = self.files.iter().find(|item| item.filename == name);
|
let info = self.files.iter().find(|item| item.filename == name);
|
||||||
|
|
||||||
@ -125,39 +130,47 @@ impl BackupManifest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generate cannonical json
|
// Generate cannonical json
|
||||||
fn to_canonical_json(value: &Value, output: &mut String) -> Result<(), Error> {
|
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||||
|
let mut data = Vec::new();
|
||||||
|
Self::write_canonical_json(value, &mut data)?;
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_canonical_json(value: &Value, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||||
match value {
|
match value {
|
||||||
Value::Null => bail!("got unexpected null value"),
|
Value::Null => bail!("got unexpected null value"),
|
||||||
Value::String(_) => {
|
Value::String(_) | Value::Number(_) | Value::Bool(_) => {
|
||||||
output.push_str(&serde_json::to_string(value)?);
|
serde_json::to_writer(output, &value)?;
|
||||||
},
|
|
||||||
Value::Number(_) => {
|
|
||||||
output.push_str(&serde_json::to_string(value)?);
|
|
||||||
}
|
}
|
||||||
Value::Bool(_) => {
|
|
||||||
output.push_str(&serde_json::to_string(value)?);
|
|
||||||
},
|
|
||||||
Value::Array(list) => {
|
Value::Array(list) => {
|
||||||
output.push('[');
|
output.push(b'[');
|
||||||
for (i, item) in list.iter().enumerate() {
|
let mut iter = list.iter();
|
||||||
if i != 0 { output.push(','); }
|
if let Some(item) = iter.next() {
|
||||||
Self::to_canonical_json(item, output)?;
|
Self::write_canonical_json(item, output)?;
|
||||||
|
for item in iter {
|
||||||
|
output.push(b',');
|
||||||
|
Self::write_canonical_json(item, output)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
output.push(']');
|
output.push(b']');
|
||||||
}
|
}
|
||||||
Value::Object(map) => {
|
Value::Object(map) => {
|
||||||
output.push('{');
|
output.push(b'{');
|
||||||
let mut keys: Vec<String> = map.keys().map(|s| s.clone()).collect();
|
let mut keys: Vec<&str> = map.keys().map(String::as_str).collect();
|
||||||
keys.sort();
|
keys.sort();
|
||||||
for (i, key) in keys.iter().enumerate() {
|
let mut iter = keys.into_iter();
|
||||||
let item = map.get(key).unwrap();
|
if let Some(key) = iter.next() {
|
||||||
if i != 0 { output.push(','); }
|
Self::write_canonical_json(&key.into(), output)?;
|
||||||
|
output.push(b':');
|
||||||
output.push_str(&serde_json::to_string(&Value::String(key.clone()))?);
|
Self::write_canonical_json(&map[key], output)?;
|
||||||
output.push(':');
|
for key in iter {
|
||||||
Self::to_canonical_json(item, output)?;
|
output.push(b',');
|
||||||
|
Self::write_canonical_json(&key.into(), output)?;
|
||||||
|
output.push(b':');
|
||||||
|
Self::write_canonical_json(&map[key], output)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
output.push('}');
|
output.push(b'}');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -176,11 +189,11 @@ impl BackupManifest {
|
|||||||
let mut signed_data = data.clone();
|
let mut signed_data = data.clone();
|
||||||
|
|
||||||
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||||
|
signed_data.as_object_mut().unwrap().remove("signature"); // exclude
|
||||||
|
|
||||||
let mut canonical = String::new();
|
let canonical = Self::to_canonical_json(&signed_data)?;
|
||||||
Self::to_canonical_json(&signed_data, &mut canonical)?;
|
|
||||||
|
|
||||||
let sig = crypt_config.compute_auth_tag(canonical.as_bytes());
|
let sig = crypt_config.compute_auth_tag(&canonical);
|
||||||
|
|
||||||
Ok(sig)
|
Ok(sig)
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
|||||||
|
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools;
|
||||||
use proxmox_backup::api2::types::*;
|
use proxmox_backup::api2::types::*;
|
||||||
|
use proxmox_backup::api2::version;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::pxar::catalog::*;
|
use proxmox_backup::pxar::catalog::*;
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
@ -552,6 +553,56 @@ fn api_logout(param: Value) -> Result<Value, Error> {
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Show client and optional server version
|
||||||
|
async fn api_version(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let mut version_info = json!({
|
||||||
|
"client": {
|
||||||
|
"version": version::PROXMOX_PKG_VERSION,
|
||||||
|
"release": version::PROXMOX_PKG_RELEASE,
|
||||||
|
"repoid": version::PROXMOX_PKG_REPOID,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m);
|
||||||
|
if let Ok(repo) = repo {
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
match client.get("api2/json/version", None).await {
|
||||||
|
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||||
|
Err(e) => eprintln!("could not connect to server - {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if output_format == "text" {
|
||||||
|
println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
|
||||||
|
if let Some(server) = version_info["server"].as_object() {
|
||||||
|
let server_version = server["version"].as_str().unwrap();
|
||||||
|
let server_release = server["release"].as_str().unwrap();
|
||||||
|
println!("server version: {}.{}", server_version, server_release);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
format_and_print_result(&version_info, &output_format);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -884,12 +935,18 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut upload_list = vec![];
|
let mut upload_list = vec![];
|
||||||
|
let mut target_set = HashSet::new();
|
||||||
|
|
||||||
for backupspec in backupspec_list {
|
for backupspec in backupspec_list {
|
||||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||||
let filename = &spec.config_string;
|
let filename = &spec.config_string;
|
||||||
let target = &spec.archive_name;
|
let target = &spec.archive_name;
|
||||||
|
|
||||||
|
if target_set.contains(target) {
|
||||||
|
bail!("got target twice: '{}'", target);
|
||||||
|
}
|
||||||
|
target_set.insert(target.to_string());
|
||||||
|
|
||||||
use std::os::unix::fs::FileTypeExt;
|
use std::os::unix::fs::FileTypeExt;
|
||||||
|
|
||||||
let metadata = std::fs::metadata(filename)
|
let metadata = std::fs::metadata(filename)
|
||||||
@ -986,14 +1043,14 @@ async fn create_backup(
|
|||||||
for (backup_type, filename, target, size) in upload_list {
|
for (backup_type, filename, target, size) in upload_list {
|
||||||
match backup_type {
|
match backup_type {
|
||||||
BackupSpecificationType::CONFIG => {
|
BackupSpecificationType::CONFIG => {
|
||||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||||
.await?;
|
.await?;
|
||||||
@ -1008,7 +1065,7 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
let catalog = catalog.as_ref().unwrap();
|
let catalog = catalog.as_ref().unwrap();
|
||||||
|
|
||||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
|
||||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||||
let stats = backup_directory(
|
let stats = backup_directory(
|
||||||
&client,
|
&client,
|
||||||
@ -1086,7 +1143,7 @@ async fn create_backup(
|
|||||||
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
||||||
|
|
||||||
|
|
||||||
println!("Upload index.json to '{:?}'", repo);
|
if verbose { println!("Upload index.json to '{}'", repo) };
|
||||||
client
|
client
|
||||||
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
||||||
.await?;
|
.await?;
|
||||||
@ -1878,6 +1935,9 @@ fn main() {
|
|||||||
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
||||||
.completion_cb("repository", complete_repository);
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
|
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
||||||
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("backup", backup_cmd_def)
|
.insert("backup", backup_cmd_def)
|
||||||
.insert("upload-log", upload_log_cmd_def)
|
.insert("upload-log", upload_log_cmd_def)
|
||||||
@ -1895,6 +1955,7 @@ fn main() {
|
|||||||
.insert("mount", mount_cmd_def())
|
.insert("mount", mount_cmd_def())
|
||||||
.insert("catalog", catalog_mgmt_cli())
|
.insert("catalog", catalog_mgmt_cli())
|
||||||
.insert("task", task_mgmt_cli())
|
.insert("task", task_mgmt_cli())
|
||||||
|
.insert("version", version_cmd_def)
|
||||||
.insert("benchmark", benchmark_cmd_def);
|
.insert("benchmark", benchmark_cmd_def);
|
||||||
|
|
||||||
let rpcenv = CliEnvironment::new();
|
let rpcenv = CliEnvironment::new();
|
||||||
|
@ -127,7 +127,7 @@ async fn garbage_collection_status(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let mut result = client.get(&path, None).await?;
|
let mut result = client.get(&path, None).await?;
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
let schema = api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
let schema = &api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||||
|
|
||||||
let options = default_table_format_options();
|
let options = default_table_format_options();
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
|||||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||||
|
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
let schema = api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
let schema = &api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||||
|
|
||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -53,6 +53,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
||||||
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
||||||
|
|
||||||
|
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
|
||||||
|
indexpath.push("index.hbs");
|
||||||
|
config.register_template("index", &indexpath)?;
|
||||||
|
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||||
|
|
||||||
let rest_server = RestServer::new(config);
|
let rest_server = RestServer::new(config);
|
||||||
|
|
||||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||||
|
@ -4,14 +4,24 @@ use std::sync::Arc;
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use chrono::{TimeZone, Utc};
|
use chrono::{TimeZone, Utc};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||||
use proxmox::api::api;
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
cli::{
|
||||||
|
OUTPUT_FORMAT,
|
||||||
|
ColumnConfig,
|
||||||
|
get_output_format,
|
||||||
|
format_and_print_result_full,
|
||||||
|
default_table_format_options,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
load_and_decrypt_key,
|
load_and_decrypt_key,
|
||||||
CryptConfig,
|
CryptConfig,
|
||||||
|
KeyDerivationConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
@ -23,6 +33,75 @@ use crate::{
|
|||||||
connect,
|
connect,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Copy, Clone, Serialize)]
|
||||||
|
/// Speed test result
|
||||||
|
struct Speed {
|
||||||
|
/// The meassured speed in Bytes/second
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
speed: Option<f64>,
|
||||||
|
/// Top result we want to compare with
|
||||||
|
top: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"tls": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"sha256": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"compress": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"decompress": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"aes256_gcm": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone, Serialize)]
|
||||||
|
/// Benchmark Results
|
||||||
|
struct BenchmarkResult {
|
||||||
|
/// TLS upload speed
|
||||||
|
tls: Speed,
|
||||||
|
/// SHA256 checksum comptation speed
|
||||||
|
sha256: Speed,
|
||||||
|
/// ZStd level 1 compression speed
|
||||||
|
compress: Speed,
|
||||||
|
/// ZStd level 1 decompression speed
|
||||||
|
decompress: Speed,
|
||||||
|
/// AES256 GCM encryption speed
|
||||||
|
aes256_gcm: Speed,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||||
|
tls: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
sha256: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
compress: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
decompress: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
aes256_gcm: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -30,10 +109,19 @@ use crate::{
|
|||||||
schema: REPO_URL_SCHEMA,
|
schema: REPO_URL_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
verbose: {
|
||||||
|
description: "Verbose output.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
keyfile: {
|
keyfile: {
|
||||||
schema: KEYFILE_SCHEMA,
|
schema: KEYFILE_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -44,10 +132,14 @@ pub async fn benchmark(
|
|||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m).ok();
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
@ -57,25 +149,178 @@ pub async fn benchmark(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut benchmark_result = BENCHMARK_RESULT_2020_TOP;
|
||||||
|
|
||||||
|
// do repo tests first, because this may prompt for a password
|
||||||
|
if let Some(repo) = repo {
|
||||||
|
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone(), verbose).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_crypt_speed(&mut benchmark_result, verbose)?;
|
||||||
|
|
||||||
|
render_result(&output_format, &benchmark_result)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// print comparison table
|
||||||
|
fn render_result(
|
||||||
|
output_format: &str,
|
||||||
|
benchmark_result: &BenchmarkResult,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut data = serde_json::to_value(benchmark_result)?;
|
||||||
|
let schema = &BenchmarkResult::API_SCHEMA;
|
||||||
|
|
||||||
|
let render_speed = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||||
|
match value["speed"].as_f64() {
|
||||||
|
None => Ok(String::from("not tested")),
|
||||||
|
Some(speed) => {
|
||||||
|
let top = value["top"].as_f64().unwrap();
|
||||||
|
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("tls")
|
||||||
|
.header("TLS (maximal backup upload speed)")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("sha256")
|
||||||
|
.header("SHA256 checksum comptation speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("compress")
|
||||||
|
.header("ZStd level 1 compression speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("decompress")
|
||||||
|
.header("ZStd level 1 decompression speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("aes256_gcm")
|
||||||
|
.header("AES256 GCM encryption speed")
|
||||||
|
.right_align(false).renderer(render_speed));
|
||||||
|
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, schema, output_format, &options);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn test_upload_speed(
|
||||||
|
benchmark_result: &mut BenchmarkResult,
|
||||||
|
repo: BackupRepository,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.user())?;
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
|
if verbose { eprintln!("Connecting to backup server"); }
|
||||||
let client = BackupWriter::start(
|
let client = BackupWriter::start(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
"host",
|
"host",
|
||||||
"benshmark",
|
"benchmark",
|
||||||
backup_time,
|
backup_time,
|
||||||
false,
|
false,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
println!("Start upload speed test");
|
if verbose { eprintln!("Start TLS speed test"); }
|
||||||
let speed = client.upload_speedtest().await?;
|
let speed = client.upload_speedtest(verbose).await?;
|
||||||
|
|
||||||
println!("Upload speed: {} MiB/s", speed);
|
eprintln!("TLS speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
benchmark_result.tls.speed = Some(speed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// test hash/crypt/compress speed
|
||||||
|
fn test_crypt_speed(
|
||||||
|
benchmark_result: &mut BenchmarkResult,
|
||||||
|
_verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let pw = b"test";
|
||||||
|
|
||||||
|
let kdf = KeyDerivationConfig::Scrypt {
|
||||||
|
n: 65536,
|
||||||
|
r: 8,
|
||||||
|
p: 1,
|
||||||
|
salt: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let testkey = kdf.derive_key(pw)?;
|
||||||
|
|
||||||
|
let crypt_config = CryptConfig::new(testkey)?;
|
||||||
|
|
||||||
|
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
openssl::sha::sha256(&random_data);
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.sha256.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut reader = &random_data[..];
|
||||||
|
zstd::stream::encode_all(&mut reader, 1)?;
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 3_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.compress.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let compressed_data = {
|
||||||
|
let mut reader = &random_data[..];
|
||||||
|
zstd::stream::encode_all(&mut reader, 1)?
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut reader = &compressed_data[..];
|
||||||
|
let data = zstd::stream::decode_all(&mut reader)?;
|
||||||
|
bytes += data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.decompress.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut out = Vec::new();
|
||||||
|
crypt_config.encrypt_to(&random_data, &mut out)?;
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::io::{Seek, SeekFrom};
|
use std::io::{Seek, SeekFrom};
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
@ -14,8 +13,12 @@ use proxmox_backup::client::*;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
REPO_URL_SCHEMA,
|
REPO_URL_SCHEMA,
|
||||||
|
KEYFD_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
record_repository,
|
record_repository,
|
||||||
|
keyfile_parameters,
|
||||||
|
key::get_encryption_key_password,
|
||||||
|
decrypt_key,
|
||||||
api_datastore_latest_snapshot,
|
api_datastore_latest_snapshot,
|
||||||
complete_repository,
|
complete_repository,
|
||||||
complete_backup_snapshot,
|
complete_backup_snapshot,
|
||||||
@ -34,10 +37,6 @@ use crate::{
|
|||||||
Shell,
|
Shell,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_backup::backup::load_and_decrypt_key;
|
|
||||||
|
|
||||||
use crate::key::get_encryption_key_password;
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -49,6 +48,15 @@ use crate::key::get_encryption_key_password;
|
|||||||
type: String,
|
type: String,
|
||||||
description: "Snapshot path.",
|
description: "Snapshot path.",
|
||||||
},
|
},
|
||||||
|
"keyfile": {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Path to encryption key.",
|
||||||
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -60,13 +68,14 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot: BackupDir = path.parse()?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keydata {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(key) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -132,7 +141,11 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
type: String,
|
type: String,
|
||||||
description: "Path to encryption key.",
|
description: "Path to encryption key.",
|
||||||
},
|
},
|
||||||
},
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Shell to interactively inspect and restore snapshots.
|
/// Shell to interactively inspect and restore snapshots.
|
||||||
@ -150,12 +163,14 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
};
|
};
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||||
let crypt_config = match keyfile {
|
|
||||||
|
let crypt_config = match keydata {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(key) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -99,7 +99,11 @@ impl Default for Kdf {
|
|||||||
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
let path = match path {
|
let path = match path {
|
||||||
Some(path) => PathBuf::from(path),
|
Some(path) => PathBuf::from(path),
|
||||||
None => place_default_encryption_key()?,
|
None => {
|
||||||
|
let path = place_default_encryption_key()?;
|
||||||
|
println!("creating default key at: {:?}", path);
|
||||||
|
path
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
@ -156,8 +160,14 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
|||||||
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
let path = match path {
|
let path = match path {
|
||||||
Some(path) => PathBuf::from(path),
|
Some(path) => PathBuf::from(path),
|
||||||
None => find_default_encryption_key()?
|
None => {
|
||||||
.ok_or_else(|| format_err!("no encryption file provided and no default file found"))?,
|
let path = find_default_encryption_key()?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format_err!("no encryption file provided and no default file found")
|
||||||
|
})?;
|
||||||
|
println!("updating default key at: {:?}", path);
|
||||||
|
path
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
@ -1,32 +1,18 @@
|
|||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use proxmox::api::{api, cli::*};
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
use proxmox_backup::config;
|
use proxmox_backup::config;
|
||||||
use proxmox_backup::configdir;
|
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
|
use proxmox_backup::tools::cert::CertInfo;
|
||||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
|
||||||
let mut parts = Vec::new();
|
|
||||||
for entry in name.entries() {
|
|
||||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
|
||||||
}
|
|
||||||
Ok(parts.join(", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
/// Display node certificate information.
|
/// Display node certificate information.
|
||||||
fn cert_info() -> Result<(), Error> {
|
fn cert_info() -> Result<(), Error> {
|
||||||
|
|
||||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
let cert = CertInfo::new()?;
|
||||||
|
|
||||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
println!("Subject: {}", cert.subject_name()?);
|
||||||
|
|
||||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
|
||||||
|
|
||||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
|
||||||
|
|
||||||
if let Some(san) = cert.subject_alt_names() {
|
if let Some(san) = cert.subject_alt_names() {
|
||||||
for name in san.iter() {
|
for name in san.iter() {
|
||||||
@ -42,17 +28,12 @@ fn cert_info() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
println!("Issuer: {}", cert.issuer_name()?);
|
||||||
println!("Validity:");
|
println!("Validity:");
|
||||||
println!(" Not Before: {}", cert.not_before());
|
println!(" Not Before: {}", cert.not_before());
|
||||||
println!(" Not After : {}", cert.not_after());
|
println!(" Not After : {}", cert.not_after());
|
||||||
|
|
||||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
println!("Fingerprint (sha256): {}", cert.fingerprint()?);
|
||||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
|
||||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
|
||||||
.collect::<Vec<&str>>().join(":");
|
|
||||||
|
|
||||||
println!("Fingerprint (sha256): {}", fp_string);
|
|
||||||
|
|
||||||
let pubkey = cert.public_key()?;
|
let pubkey = cert.public_key()?;
|
||||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||||
|
@ -16,6 +16,7 @@ use proxmox::tools::digest_to_hex;
|
|||||||
|
|
||||||
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
|
use crate::tools::format::HumanByte;
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::{HttpClient, H2Client};
|
||||||
|
|
||||||
@ -242,7 +243,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||||
|
|
||||||
let (chunk_count, size, duration, speed, csum) =
|
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
|
||||||
Self::upload_chunk_info_stream(
|
Self::upload_chunk_info_stream(
|
||||||
self.h2.clone(),
|
self.h2.clone(),
|
||||||
wid,
|
wid,
|
||||||
@ -255,10 +256,30 @@ impl BackupWriter {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
println!("{}: Uploaded {} bytes as {} chunks in {} seconds ({} MB/s).", archive_name, size, chunk_count, duration.as_secs(), speed);
|
let uploaded = size - size_reused;
|
||||||
if chunk_count > 0 {
|
let vsize_h: HumanByte = size.into();
|
||||||
println!("{}: Average chunk size was {} bytes.", archive_name, size/chunk_count);
|
let archive = if self.verbose {
|
||||||
println!("{}: Time per request: {} microseconds.", archive_name, (duration.as_micros())/(chunk_count as u128));
|
archive_name.to_string()
|
||||||
|
} else {
|
||||||
|
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
|
||||||
|
};
|
||||||
|
if archive_name != CATALOG_NAME {
|
||||||
|
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||||
|
let uploaded: HumanByte = uploaded.into();
|
||||||
|
println!("{}: had to upload {} of {} in {:.2}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
||||||
|
} else {
|
||||||
|
println!("Uploaded backup catalog ({})", vsize_h);
|
||||||
|
}
|
||||||
|
|
||||||
|
if size_reused > 0 && size > 1024*1024 {
|
||||||
|
let reused_percent = size_reused as f64 * 100. / size as f64;
|
||||||
|
let reused: HumanByte = size_reused.into();
|
||||||
|
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
|
||||||
|
}
|
||||||
|
if self.verbose && chunk_count > 0 {
|
||||||
|
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
|
||||||
|
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
|
||||||
|
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
@ -274,7 +295,7 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue() -> (
|
fn response_queue(verbose: bool) -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>
|
oneshot::Receiver<Result<(), Error>>
|
||||||
) {
|
) {
|
||||||
@ -298,11 +319,11 @@ impl BackupWriter {
|
|||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
verify_queue_rx
|
verify_queue_rx
|
||||||
.map(Ok::<_, Error>)
|
.map(Ok::<_, Error>)
|
||||||
.try_for_each(|response: h2::client::ResponseFuture| {
|
.try_for_each(move |response: h2::client::ResponseFuture| {
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.map_ok(|result| println!("RESPONSE: {:?}", result))
|
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
|
||||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||||
})
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
@ -476,13 +497,17 @@ impl BackupWriter {
|
|||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
|
||||||
|
|
||||||
let repeat = Arc::new(AtomicUsize::new(0));
|
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||||
let repeat2 = repeat.clone();
|
let total_chunks2 = total_chunks.clone();
|
||||||
|
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||||
|
let known_chunk_count2 = known_chunk_count.clone();
|
||||||
|
|
||||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||||
let stream_len2 = stream_len.clone();
|
let stream_len2 = stream_len.clone();
|
||||||
|
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||||
|
let reused_len2 = reused_len.clone();
|
||||||
|
|
||||||
let append_chunk_path = format!("{}_index", prefix);
|
let append_chunk_path = format!("{}_index", prefix);
|
||||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
let upload_chunk_path = format!("{}_chunk", prefix);
|
||||||
@ -501,7 +526,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_len = data.len();
|
let chunk_len = data.len();
|
||||||
|
|
||||||
repeat.fetch_add(1, Ordering::SeqCst);
|
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||||
@ -524,6 +549,8 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_is_known = known_chunks.contains(digest);
|
let chunk_is_known = known_chunks.contains(digest);
|
||||||
if chunk_is_known {
|
if chunk_is_known {
|
||||||
|
known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||||
} else {
|
} else {
|
||||||
known_chunks.insert(*digest);
|
known_chunks.insert(*digest);
|
||||||
@ -546,7 +573,7 @@ impl BackupWriter {
|
|||||||
let digest = chunk_info.digest;
|
let digest = chunk_info.digest;
|
||||||
let digest_str = digest_to_hex(&digest);
|
let digest_str = digest_to_hex(&digest);
|
||||||
|
|
||||||
if verbose {
|
if false && verbose { // TO verbose, needs finer verbosity setting granularity
|
||||||
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
||||||
chunk_info.chunk_len, offset);
|
chunk_info.chunk_len, offset);
|
||||||
}
|
}
|
||||||
@ -589,18 +616,21 @@ impl BackupWriter {
|
|||||||
upload_result.await?.and(result)
|
upload_result.await?.and(result)
|
||||||
}.boxed())
|
}.boxed())
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
let repeat = repeat2.load(Ordering::SeqCst);
|
let duration = start_time.elapsed();
|
||||||
|
let total_chunks = total_chunks2.load(Ordering::SeqCst);
|
||||||
|
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
|
||||||
let stream_len = stream_len2.load(Ordering::SeqCst);
|
let stream_len = stream_len2.load(Ordering::SeqCst);
|
||||||
let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
let reused_len = reused_len2.load(Ordering::SeqCst);
|
||||||
|
|
||||||
let mut guard = index_csum_2.lock().unwrap();
|
let mut guard = index_csum_2.lock().unwrap();
|
||||||
let csum = guard.take().unwrap().finish();
|
let csum = guard.take().unwrap().finish();
|
||||||
|
|
||||||
futures::future::ok((repeat, stream_len, start_time.elapsed(), speed, csum))
|
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_speedtest(&self) -> Result<usize, Error> {
|
/// Upload speed test - prints result ot stderr
|
||||||
|
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
// generate pseudo random byte sequence
|
// generate pseudo random byte sequence
|
||||||
@ -615,7 +645,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let mut repeat = 0;
|
let mut repeat = 0;
|
||||||
|
|
||||||
let (upload_queue, upload_result) = Self::response_queue();
|
let (upload_queue, upload_result) = Self::response_queue(verbose);
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
@ -627,7 +657,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let mut upload_queue = upload_queue.clone();
|
let mut upload_queue = upload_queue.clone();
|
||||||
|
|
||||||
println!("send test data ({} bytes)", data.len());
|
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
|
||||||
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||||
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
||||||
|
|
||||||
@ -638,9 +668,9 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let _ = upload_result.await?;
|
let _ = upload_result.await?;
|
||||||
|
|
||||||
println!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
||||||
let speed = ((item_len*1_000_000*(repeat as usize))/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
|
||||||
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
||||||
|
|
||||||
Ok(speed)
|
Ok(speed)
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ use percent_encoding::percent_encode;
|
|||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
|
api::error::HttpError,
|
||||||
sys::linux::tty,
|
sys::linux::tty,
|
||||||
tools::{
|
tools::{
|
||||||
fs::{file_get_json, replace_file, CreateOptions},
|
fs::{file_get_json, replace_file, CreateOptions},
|
||||||
@ -606,7 +607,7 @@ impl HttpClient {
|
|||||||
Ok(value)
|
Ok(value)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bail!("HTTP Error {}: {}", status, text);
|
Err(Error::from(HttpError::new(status, text)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,7 +820,7 @@ impl H2Client {
|
|||||||
bail!("got result without data property");
|
bail!("got result without data property");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bail!("HTTP Error {}: {}", status, text);
|
Err(Error::from(HttpError::new(status, text)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,8 +6,8 @@ use std::convert::TryFrom;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::{Seek, SeekFrom};
|
use std::io::{Seek, SeekFrom};
|
||||||
use chrono::{Utc, TimeZone};
|
|
||||||
|
|
||||||
|
use proxmox::api::error::{StatusCode, HttpError};
|
||||||
use crate::server::{WorkerTask};
|
use crate::server::{WorkerTask};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -152,7 +152,28 @@ async fn pull_snapshot(
|
|||||||
let mut tmp_manifest_name = manifest_name.clone();
|
let mut tmp_manifest_name = manifest_name.clone();
|
||||||
tmp_manifest_name.set_extension("tmp");
|
tmp_manifest_name.set_extension("tmp");
|
||||||
|
|
||||||
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
|
let download_res = download_manifest(&reader, &tmp_manifest_name).await;
|
||||||
|
let mut tmp_manifest_file = match download_res {
|
||||||
|
Ok(manifest_file) => manifest_file,
|
||||||
|
Err(err) => {
|
||||||
|
match err.downcast_ref::<HttpError>() {
|
||||||
|
Some(HttpError { code, message }) => {
|
||||||
|
match code {
|
||||||
|
&StatusCode::NOT_FOUND => {
|
||||||
|
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
bail!("HTTP error {} - {}", code, message);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
return Err(err);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
||||||
tmp_manifest_blob.verify_crc()?;
|
tmp_manifest_blob.verify_crc()?;
|
||||||
|
|
||||||
@ -302,7 +323,16 @@ pub async fn pull_group(
|
|||||||
let mut remote_snapshots = std::collections::HashSet::new();
|
let mut remote_snapshots = std::collections::HashSet::new();
|
||||||
|
|
||||||
for item in list {
|
for item in list {
|
||||||
let backup_time = Utc.timestamp(item.backup_time, 0);
|
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||||
|
|
||||||
|
// in-progress backups can't be synced
|
||||||
|
if let None = item.size {
|
||||||
|
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let backup_time = snapshot.backup_time();
|
||||||
|
|
||||||
remote_snapshots.insert(backup_time);
|
remote_snapshots.insert(backup_time);
|
||||||
|
|
||||||
if let Some(last_sync_time) = last_sync {
|
if let Some(last_sync_time) = last_sync {
|
||||||
@ -319,14 +349,12 @@ pub async fn pull_group(
|
|||||||
new_client,
|
new_client,
|
||||||
None,
|
None,
|
||||||
src_repo.store(),
|
src_repo.store(),
|
||||||
&item.backup_type,
|
snapshot.group().backup_type(),
|
||||||
&item.backup_id,
|
snapshot.group().backup_id(),
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
|
||||||
|
|
||||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,8 @@ constnamemap! {
|
|||||||
PRIV_REMOTE_MODIFY("Remote.Modify") = 1 << 10;
|
PRIV_REMOTE_MODIFY("Remote.Modify") = 1 << 10;
|
||||||
PRIV_REMOTE_READ("Remote.Read") = 1 << 11;
|
PRIV_REMOTE_READ("Remote.Read") = 1 << 11;
|
||||||
PRIV_REMOTE_PRUNE("Remote.Prune") = 1 << 12;
|
PRIV_REMOTE_PRUNE("Remote.Prune") = 1 << 12;
|
||||||
|
|
||||||
|
PRIV_SYS_CONSOLE("Sys.Console") = 1 << 13;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,9 @@ impl CachedUserInfo {
|
|||||||
(user_privs & required_privs) == required_privs
|
(user_privs & required_privs) == required_privs
|
||||||
};
|
};
|
||||||
if !allowed {
|
if !allowed {
|
||||||
bail!("no permissions");
|
// printing the path doesn't leaks any information as long as we
|
||||||
|
// always check privilege before resource existence
|
||||||
|
bail!("no permissions on '/{}'", path.join("/"));
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ use proxmox::tools::fd::RawFdNum;
|
|||||||
use proxmox::tools::vec;
|
use proxmox::tools::vec;
|
||||||
|
|
||||||
use crate::pxar::catalog::BackupCatalogWriter;
|
use crate::pxar::catalog::BackupCatalogWriter;
|
||||||
|
use crate::pxar::metadata::errno_is_unsupported;
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
use crate::pxar::tools::assert_single_path_component;
|
use crate::pxar::tools::assert_single_path_component;
|
||||||
use crate::tools::{acl, fs, xattr, Fd};
|
use crate::tools::{acl, fs, xattr, Fd};
|
||||||
@ -161,7 +162,7 @@ where
|
|||||||
|
|
||||||
if skip_lost_and_found {
|
if skip_lost_and_found {
|
||||||
patterns.push(MatchEntry::parse_pattern(
|
patterns.push(MatchEntry::parse_pattern(
|
||||||
"**/lost+found",
|
"lost+found",
|
||||||
PatternFlag::PATH_NAME,
|
PatternFlag::PATH_NAME,
|
||||||
MatchType::Exclude,
|
MatchType::Exclude,
|
||||||
)?);
|
)?);
|
||||||
@ -289,11 +290,13 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
|
|
||||||
let old_pattern_count = self.patterns.len();
|
let old_pattern_count = self.patterns.len();
|
||||||
|
|
||||||
|
let path_bytes = self.path.as_os_str().as_bytes();
|
||||||
|
|
||||||
if let Some(fd) = fd {
|
if let Some(fd) = fd {
|
||||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||||
|
|
||||||
use io::BufRead;
|
use io::BufRead;
|
||||||
for line in io::BufReader::new(file).lines() {
|
for line in io::BufReader::new(file).split(b'\n') {
|
||||||
let line = match line {
|
let line = match line {
|
||||||
Ok(line) => line,
|
Ok(line) => line,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@ -308,13 +311,29 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let line = line.trim();
|
let line = crate::tools::strip_ascii_whitespace(&line);
|
||||||
|
|
||||||
if line.is_empty() || line.starts_with('#') {
|
if line.is_empty() || line[0] == b'#' {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
let mut buf;
|
||||||
|
let (line, mode) = if line[0] == b'/' {
|
||||||
|
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
||||||
|
buf.extend(path_bytes);
|
||||||
|
buf.extend(line);
|
||||||
|
(&buf[..], MatchType::Exclude)
|
||||||
|
} else if line.starts_with(b"!/") {
|
||||||
|
// inverted case with absolute path
|
||||||
|
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
||||||
|
buf.extend(path_bytes);
|
||||||
|
buf.extend(&line[1..]); // without the '!'
|
||||||
|
(&buf[..], MatchType::Include)
|
||||||
|
} else {
|
||||||
|
(line, MatchType::Exclude)
|
||||||
|
};
|
||||||
|
|
||||||
|
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
||||||
Ok(pattern) => self.patterns.push(pattern),
|
Ok(pattern) => self.patterns.push(pattern),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||||
@ -698,13 +717,6 @@ fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Resu
|
|||||||
Ok(meta)
|
Ok(meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn errno_is_unsupported(errno: Errno) -> bool {
|
|
||||||
match errno {
|
|
||||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
||||||
if flags.contains(Flags::WITH_FCAPS) {
|
if flags.contains(Flags::WITH_FCAPS) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -769,7 +781,7 @@ fn get_xattr_fcaps_acl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
||||||
let mut attr: usize = 0;
|
let mut attr: libc::c_long = 0;
|
||||||
|
|
||||||
match unsafe { fs::read_attr_fd(fd, &mut attr) } {
|
match unsafe { fs::read_attr_fd(fd, &mut attr) } {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
@ -779,7 +791,7 @@ fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
|||||||
Err(err) => bail!("failed to read file attributes: {}", err),
|
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata.stat.flags |= Flags::from_chattr(attr as u32).bits();
|
metadata.stat.flags |= Flags::from_chattr(attr).bits();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -230,7 +230,8 @@ impl Extractor {
|
|||||||
dir.metadata(),
|
dir.metadata(),
|
||||||
fd,
|
fd,
|
||||||
&CString::new(dir.file_name().as_bytes())?,
|
&CString::new(dir.file_name().as_bytes())?,
|
||||||
)?;
|
)
|
||||||
|
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -241,7 +242,9 @@ impl Extractor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||||
self.dir_stack.last_dir_fd(self.allow_existing_dirs)
|
self.dir_stack
|
||||||
|
.last_dir_fd(self.allow_existing_dirs)
|
||||||
|
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_symlink(
|
pub fn extract_symlink(
|
||||||
@ -320,10 +323,14 @@ impl Extractor {
|
|||||||
file_name,
|
file_name,
|
||||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||||
Mode::from_bits(0o600).unwrap(),
|
Mode::from_bits(0o600).unwrap(),
|
||||||
)?)
|
)
|
||||||
|
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||||
};
|
};
|
||||||
|
|
||||||
let extracted = io::copy(&mut *contents, &mut file)?;
|
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||||
|
|
||||||
|
let extracted = io::copy(&mut *contents, &mut file)
|
||||||
|
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||||
if size != extracted {
|
if size != extracted {
|
||||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||||
}
|
}
|
||||||
@ -345,10 +352,15 @@ impl Extractor {
|
|||||||
file_name,
|
file_name,
|
||||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||||
Mode::from_bits(0o600).unwrap(),
|
Mode::from_bits(0o600).unwrap(),
|
||||||
)?)
|
)
|
||||||
|
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||||
});
|
});
|
||||||
|
|
||||||
let extracted = tokio::io::copy(&mut *contents, &mut file).await?;
|
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||||
|
|
||||||
|
let extracted = tokio::io::copy(&mut *contents, &mut file)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||||
if size != extracted {
|
if size != extracted {
|
||||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
//! Flags for known supported features for a given filesystem can be derived
|
//! Flags for known supported features for a given filesystem can be derived
|
||||||
//! from the superblocks magic number.
|
//! from the superblocks magic number.
|
||||||
|
|
||||||
|
use libc::c_long;
|
||||||
|
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
@ -149,34 +151,54 @@ impl Default for Flags {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// form /usr/include/linux/fs.h
|
||||||
|
const FS_APPEND_FL: c_long = 0x0000_0020;
|
||||||
|
const FS_NOATIME_FL: c_long = 0x0000_0080;
|
||||||
|
const FS_COMPR_FL: c_long = 0x0000_0004;
|
||||||
|
const FS_NOCOW_FL: c_long = 0x0080_0000;
|
||||||
|
const FS_NODUMP_FL: c_long = 0x0000_0040;
|
||||||
|
const FS_DIRSYNC_FL: c_long = 0x0001_0000;
|
||||||
|
const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
|
||||||
|
const FS_SYNC_FL: c_long = 0x0000_0008;
|
||||||
|
const FS_NOCOMP_FL: c_long = 0x0000_0400;
|
||||||
|
const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
|
||||||
|
|
||||||
|
pub(crate) const INITIAL_FS_FLAGS: c_long =
|
||||||
|
FS_NOATIME_FL
|
||||||
|
| FS_COMPR_FL
|
||||||
|
| FS_NOCOW_FL
|
||||||
|
| FS_NOCOMP_FL
|
||||||
|
| FS_PROJINHERIT_FL;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const CHATTR_MAP: [(Flags, c_long); 10] = [
|
||||||
|
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||||
|
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||||
|
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||||
|
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||||
|
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||||
|
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||||
|
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||||
|
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||||
|
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||||
|
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||||
|
];
|
||||||
|
|
||||||
|
// from /usr/include/linux/msdos_fs.h
|
||||||
|
const ATTR_HIDDEN: u32 = 2;
|
||||||
|
const ATTR_SYS: u32 = 4;
|
||||||
|
const ATTR_ARCH: u32 = 32;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||||
|
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||||
|
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||||
|
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||||
|
];
|
||||||
|
|
||||||
impl Flags {
|
impl Flags {
|
||||||
/// Get a set of feature flags from file attributes.
|
/// Get a set of feature flags from file attributes.
|
||||||
pub fn from_chattr(attr: u32) -> Flags {
|
pub fn from_chattr(attr: c_long) -> Flags {
|
||||||
// form /usr/include/linux/fs.h
|
|
||||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
|
||||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
|
||||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
|
||||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
|
||||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
|
||||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
|
||||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
|
||||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
|
||||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
|
||||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
|
||||||
|
|
||||||
const CHATTR_MAP: [(Flags, u32); 10] = [
|
|
||||||
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
|
||||||
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
|
||||||
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
|
||||||
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
|
||||||
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
|
||||||
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
|
||||||
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
|
||||||
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
|
||||||
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
|
||||||
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut flags = Flags::empty();
|
let mut flags = Flags::empty();
|
||||||
|
|
||||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||||
@ -188,19 +210,25 @@ impl Flags {
|
|||||||
flags
|
flags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the chattr bit representation of these feature flags.
|
||||||
|
pub fn to_chattr(self) -> c_long {
|
||||||
|
let mut flags: c_long = 0;
|
||||||
|
|
||||||
|
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||||
|
if self.contains(*fe_flag) {
|
||||||
|
flags |= *fs_flag;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flags
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_initial_chattr(self) -> c_long {
|
||||||
|
self.to_chattr() & INITIAL_FS_FLAGS
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a set of feature flags from FAT attributes.
|
/// Get a set of feature flags from FAT attributes.
|
||||||
pub fn from_fat_attr(attr: u32) -> Flags {
|
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||||
// from /usr/include/linux/msdos_fs.h
|
|
||||||
const ATTR_HIDDEN: u32 = 2;
|
|
||||||
const ATTR_SYS: u32 = 4;
|
|
||||||
const ATTR_ARCH: u32 = 32;
|
|
||||||
|
|
||||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
|
||||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
|
||||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
|
||||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut flags = Flags::empty();
|
let mut flags = Flags::empty();
|
||||||
|
|
||||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||||
@ -212,6 +240,19 @@ impl Flags {
|
|||||||
flags
|
flags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the fat attribute bit representation of these feature flags.
|
||||||
|
pub fn to_fat_attr(self) -> u32 {
|
||||||
|
let mut flags = 0u32;
|
||||||
|
|
||||||
|
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||||
|
if self.contains(*fe_flag) {
|
||||||
|
flags |= *fs_flag;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flags
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||||
pub fn from_magic(magic: i64) -> Flags {
|
pub fn from_magic(magic: i64) -> Flags {
|
||||||
use proxmox::sys::linux::magic::*;
|
use proxmox::sys::linux::magic::*;
|
||||||
|
@ -79,13 +79,19 @@ pub fn apply_at(
|
|||||||
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn apply_initial_flags(
|
||||||
|
flags: Flags,
|
||||||
|
metadata: &Metadata,
|
||||||
|
fd: RawFd,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
|
||||||
|
apply_chattr(fd, entry_flags.to_initial_chattr(), flags.to_initial_chattr())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||||
|
|
||||||
if metadata.stat.flags != 0 {
|
|
||||||
todo!("apply flags!");
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
// UID and GID first, as this fails if we lose access anyway.
|
// UID and GID first, as this fails if we lose access anyway.
|
||||||
c_result!(libc::chown(
|
c_result!(libc::chown(
|
||||||
@ -94,13 +100,15 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
|||||||
metadata.stat.gid
|
metadata.stat.gid
|
||||||
))
|
))
|
||||||
.map(drop)
|
.map(drop)
|
||||||
.or_else(allow_notsupp)?;
|
.or_else(allow_notsupp)
|
||||||
|
.map_err(|err| format_err!("failed to set ownership: {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut skip_xattrs = false;
|
let mut skip_xattrs = false;
|
||||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||||
apply_acls(flags, &c_proc_path, metadata)?;
|
apply_acls(flags, &c_proc_path, metadata)
|
||||||
|
.map_err(|err| format_err!("failed to apply acls: {}", err))?;
|
||||||
apply_quota_project_id(flags, fd, metadata)?;
|
apply_quota_project_id(flags, fd, metadata)?;
|
||||||
|
|
||||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||||
@ -110,7 +118,12 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
|||||||
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||||
})
|
})
|
||||||
.map(drop)
|
.map(drop)
|
||||||
.or_else(allow_notsupp)?;
|
.or_else(allow_notsupp)
|
||||||
|
.map_err(|err| format_err!("failed to change file mode: {}", err))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.stat.flags != 0 {
|
||||||
|
apply_flags(flags, fd, metadata.stat.flags)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = c_result!(unsafe {
|
let res = c_result!(unsafe {
|
||||||
@ -160,7 +173,8 @@ fn add_fcaps(
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map(drop)
|
.map(drop)
|
||||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
|
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
|
||||||
|
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -195,7 +209,8 @@ fn apply_xattrs(
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map(drop)
|
.map(drop)
|
||||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
|
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
|
||||||
|
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -317,3 +332,49 @@ fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Resul
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
||||||
|
match errno {
|
||||||
|
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
||||||
|
if chattr == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut fattr: libc::c_long = 0;
|
||||||
|
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||||
|
}
|
||||||
|
|
||||||
|
let attr = (chattr & mask) | (fattr & !mask);
|
||||||
|
match unsafe { fs::write_attr_fd(fd, &attr) } {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
|
||||||
|
Err(err) => bail!("failed to set file attributes: {}", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
|
||||||
|
let entry_flags = Flags::from_bits_truncate(entry_flags);
|
||||||
|
|
||||||
|
apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
|
||||||
|
|
||||||
|
let fatattr = (flags & entry_flags).to_fat_attr();
|
||||||
|
if fatattr != 0 {
|
||||||
|
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
|
||||||
|
Err(err) => bail!("failed to set file attributes: {}", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
@ -1,9 +1,13 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::{PathBuf};
|
use std::path::PathBuf;
|
||||||
use anyhow::Error;
|
use std::time::SystemTime;
|
||||||
|
use std::fs::metadata;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error, format_err};
|
||||||
use hyper::Method;
|
use hyper::Method;
|
||||||
use handlebars::Handlebars;
|
use handlebars::Handlebars;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
||||||
|
|
||||||
@ -12,21 +16,20 @@ pub struct ApiConfig {
|
|||||||
router: &'static Router,
|
router: &'static Router,
|
||||||
aliases: HashMap<String, PathBuf>,
|
aliases: HashMap<String, PathBuf>,
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
pub templates: Handlebars<'static>,
|
templates: RwLock<Handlebars<'static>>,
|
||||||
|
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ApiConfig {
|
impl ApiConfig {
|
||||||
|
|
||||||
pub fn new<B: Into<PathBuf>>(basedir: B, router: &'static Router, env_type: RpcEnvironmentType) -> Result<Self, Error> {
|
pub fn new<B: Into<PathBuf>>(basedir: B, router: &'static Router, env_type: RpcEnvironmentType) -> Result<Self, Error> {
|
||||||
let mut templates = Handlebars::new();
|
|
||||||
let basedir = basedir.into();
|
|
||||||
templates.register_template_file("index", basedir.join("index.hbs"))?;
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
basedir,
|
basedir: basedir.into(),
|
||||||
router,
|
router,
|
||||||
aliases: HashMap::new(),
|
aliases: HashMap::new(),
|
||||||
env_type,
|
env_type,
|
||||||
templates
|
templates: RwLock::new(Handlebars::new()),
|
||||||
|
template_files: RwLock::new(HashMap::new()),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,4 +70,52 @@ impl ApiConfig {
|
|||||||
pub fn env_type(&self) -> RpcEnvironmentType {
|
pub fn env_type(&self) -> RpcEnvironmentType {
|
||||||
self.env_type
|
self.env_type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn register_template<P>(&self, name: &str, path: P) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
P: Into<PathBuf>
|
||||||
|
{
|
||||||
|
if self.template_files.read().unwrap().contains_key(name) {
|
||||||
|
bail!("template already registered");
|
||||||
|
}
|
||||||
|
|
||||||
|
let path: PathBuf = path.into();
|
||||||
|
let metadata = metadata(&path)?;
|
||||||
|
let mtime = metadata.modified()?;
|
||||||
|
|
||||||
|
self.templates.write().unwrap().register_template_file(name, &path)?;
|
||||||
|
self.template_files.write().unwrap().insert(name.to_string(), (mtime, path));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the template was modified since the last rendering
|
||||||
|
/// if yes, it loads a the new version of the template
|
||||||
|
pub fn render_template<T>(&self, name: &str, data: &T) -> Result<String, Error>
|
||||||
|
where
|
||||||
|
T: Serialize,
|
||||||
|
{
|
||||||
|
let path;
|
||||||
|
let mtime;
|
||||||
|
{
|
||||||
|
let template_files = self.template_files.read().unwrap();
|
||||||
|
let (old_mtime, old_path) = template_files.get(name).ok_or_else(|| format_err!("template not found"))?;
|
||||||
|
|
||||||
|
mtime = metadata(old_path)?.modified()?;
|
||||||
|
if mtime <= *old_mtime {
|
||||||
|
return self.templates.read().unwrap().render(name, data).map_err(|err| format_err!("{}", err));
|
||||||
|
}
|
||||||
|
path = old_path.to_path_buf();
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut template_files = self.template_files.write().unwrap();
|
||||||
|
let mut templates = self.templates.write().unwrap();
|
||||||
|
|
||||||
|
templates.register_template_file(name, &path)?;
|
||||||
|
template_files.insert(name.to_string(), (mtime, path));
|
||||||
|
|
||||||
|
templates.render(name, data).map_err(|err| format_err!("{}", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ impl <E: RpcEnvironment + Clone> H2Service<E> {
|
|||||||
|
|
||||||
match self.router.find_method(&components, method, &mut uri_param) {
|
match self.router.find_method(&components, method, &mut uri_param) {
|
||||||
None => {
|
None => {
|
||||||
let err = http_err!(NOT_FOUND, "Path not found.".to_string());
|
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||||
future::ok((formatter.format_error)(err)).boxed()
|
future::ok((formatter.format_error)(err)).boxed()
|
||||||
}
|
}
|
||||||
Some(api_method) => {
|
Some(api_method) => {
|
||||||
|
@ -16,7 +16,6 @@ use serde_json::{json, Value};
|
|||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
use url::form_urlencoded;
|
use url::form_urlencoded;
|
||||||
use handlebars::Handlebars;
|
|
||||||
|
|
||||||
use proxmox::http_err;
|
use proxmox::http_err;
|
||||||
use proxmox::api::{ApiHandler, ApiMethod, HttpError};
|
use proxmox::api::{ApiHandler, ApiMethod, HttpError};
|
||||||
@ -312,7 +311,7 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
|||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_index(username: Option<String>, token: Option<String>, template: &Handlebars, parts: Parts) -> Response<Body> {
|
fn get_index(username: Option<String>, token: Option<String>, api: &Arc<ApiConfig>, parts: Parts) -> Response<Body> {
|
||||||
|
|
||||||
let nodename = proxmox::tools::nodename();
|
let nodename = proxmox::tools::nodename();
|
||||||
let username = username.unwrap_or_else(|| String::from(""));
|
let username = username.unwrap_or_else(|| String::from(""));
|
||||||
@ -320,11 +319,14 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
|||||||
let token = token.unwrap_or_else(|| String::from(""));
|
let token = token.unwrap_or_else(|| String::from(""));
|
||||||
|
|
||||||
let mut debug = false;
|
let mut debug = false;
|
||||||
|
let mut template_file = "index";
|
||||||
|
|
||||||
if let Some(query_str) = parts.uri.query() {
|
if let Some(query_str) = parts.uri.query() {
|
||||||
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
||||||
if k == "debug" && v != "0" && v != "false" {
|
if k == "debug" && v != "0" && v != "false" {
|
||||||
debug = true;
|
debug = true;
|
||||||
|
} else if k == "console" {
|
||||||
|
template_file = "console";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -338,12 +340,12 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
|||||||
|
|
||||||
let mut ct = "text/html";
|
let mut ct = "text/html";
|
||||||
|
|
||||||
let index = match template.render("index", &data) {
|
let index = match api.render_template(template_file, &data) {
|
||||||
Ok(index) => index,
|
Ok(index) => index,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
ct = "text/plain";
|
ct = "text/plain";
|
||||||
format!("Error rendering template: {}", err.desc)
|
format!("Error rendering template: {}", err)
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Response::builder()
|
Response::builder()
|
||||||
@ -497,8 +499,8 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
|
|
||||||
let comp_len = components.len();
|
let comp_len = components.len();
|
||||||
|
|
||||||
println!("REQUEST {} {}", method, path);
|
//println!("REQUEST {} {}", method, path);
|
||||||
println!("COMPO {:?}", components);
|
//println!("COMPO {:?}", components);
|
||||||
|
|
||||||
let env_type = api.env_type();
|
let env_type = api.env_type();
|
||||||
let mut rpcenv = RestEnvironment::new(env_type);
|
let mut rpcenv = RestEnvironment::new(env_type);
|
||||||
@ -542,7 +544,7 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
|
|
||||||
match api.find_method(&components[2..], method, &mut uri_param) {
|
match api.find_method(&components[2..], method, &mut uri_param) {
|
||||||
None => {
|
None => {
|
||||||
let err = http_err!(NOT_FOUND, "Path not found.".to_string());
|
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||||
return Ok((formatter.format_error)(err));
|
return Ok((formatter.format_error)(err));
|
||||||
}
|
}
|
||||||
Some(api_method) => {
|
Some(api_method) => {
|
||||||
@ -580,15 +582,15 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
match check_auth(&method, &ticket, &token, &user_info) {
|
match check_auth(&method, &ticket, &token, &user_info) {
|
||||||
Ok(username) => {
|
Ok(username) => {
|
||||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let new_token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
return Ok(get_index(Some(username), Some(new_token), &api.templates, parts));
|
return Ok(get_index(Some(username), Some(new_token), &api, parts));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||||
return Ok(get_index(None, None, &api.templates, parts));
|
return Ok(get_index(None, None, &api, parts));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(get_index(None, None, &api.templates, parts));
|
return Ok(get_index(None, None, &api, parts));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let filename = api.find_alias(&components);
|
let filename = api.find_alias(&components);
|
||||||
@ -596,5 +598,5 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(http_err!(NOT_FOUND, "Path not found.".to_string()))
|
Err(http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string()))
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ pub struct ServerState {
|
|||||||
pub shutdown_listeners: BroadcastData<()>,
|
pub shutdown_listeners: BroadcastData<()>,
|
||||||
pub last_worker_listeners: BroadcastData<()>,
|
pub last_worker_listeners: BroadcastData<()>,
|
||||||
pub worker_count: usize,
|
pub worker_count: usize,
|
||||||
|
pub task_count: usize,
|
||||||
pub reload_request: bool,
|
pub reload_request: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,6 +29,7 @@ lazy_static! {
|
|||||||
shutdown_listeners: BroadcastData::new(),
|
shutdown_listeners: BroadcastData::new(),
|
||||||
last_worker_listeners: BroadcastData::new(),
|
last_worker_listeners: BroadcastData::new(),
|
||||||
worker_count: 0,
|
worker_count: 0,
|
||||||
|
task_count: 0,
|
||||||
reload_request: false,
|
reload_request: false,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -101,20 +103,40 @@ pub fn last_worker_future() -> impl Future<Output = Result<(), Error>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_worker_count(count: usize) {
|
pub fn set_worker_count(count: usize) {
|
||||||
let mut data = SERVER_STATE.lock().unwrap();
|
SERVER_STATE.lock().unwrap().worker_count = count;
|
||||||
data.worker_count = count;
|
|
||||||
|
|
||||||
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0) { return; }
|
check_last_worker();
|
||||||
|
|
||||||
data.last_worker_listeners.notify_listeners(Ok(()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn check_last_worker() {
|
pub fn check_last_worker() {
|
||||||
|
|
||||||
let mut data = SERVER_STATE.lock().unwrap();
|
let mut data = SERVER_STATE.lock().unwrap();
|
||||||
|
|
||||||
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0) { return; }
|
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0 && data.task_count == 0) { return; }
|
||||||
|
|
||||||
data.last_worker_listeners.notify_listeners(Ok(()));
|
data.last_worker_listeners.notify_listeners(Ok(()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Spawns a tokio task that will be tracked for reload
|
||||||
|
/// and if it is finished, notify the last_worker_listener if we
|
||||||
|
/// are in shutdown mode
|
||||||
|
pub fn spawn_internal_task<T>(task: T)
|
||||||
|
where
|
||||||
|
T: Future + Send + 'static,
|
||||||
|
T::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
let mut data = SERVER_STATE.lock().unwrap();
|
||||||
|
data.task_count += 1;
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _ = tokio::spawn(task).await; // ignore errors
|
||||||
|
|
||||||
|
{ // drop mutex
|
||||||
|
let mut data = SERVER_STATE.lock().unwrap();
|
||||||
|
if data.task_count > 0 {
|
||||||
|
data.task_count -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check_last_worker();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
@ -270,28 +270,22 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
|||||||
let line = line?;
|
let line = line?;
|
||||||
match parse_worker_status_line(&line) {
|
match parse_worker_status_line(&line) {
|
||||||
Err(err) => bail!("unable to parse active worker status '{}' - {}", line, err),
|
Err(err) => bail!("unable to parse active worker status '{}' - {}", line, err),
|
||||||
Ok((upid_str, upid, state)) => {
|
Ok((upid_str, upid, state)) => match state {
|
||||||
|
None if worker_is_active_local(&upid) => {
|
||||||
let running = worker_is_active_local(&upid);
|
|
||||||
|
|
||||||
if running {
|
|
||||||
active_list.push(TaskListInfo { upid, upid_str, state: None });
|
active_list.push(TaskListInfo { upid, upid_str, state: None });
|
||||||
} else {
|
},
|
||||||
match state {
|
None => {
|
||||||
None => {
|
println!("Detected stopped UPID {}", upid_str);
|
||||||
println!("Detected stopped UPID {}", upid_str);
|
let status = upid_read_status(&upid)
|
||||||
let status = upid_read_status(&upid)
|
.unwrap_or_else(|_| String::from("unknown"));
|
||||||
.unwrap_or_else(|_| String::from("unknown"));
|
finish_list.push(TaskListInfo {
|
||||||
finish_list.push(TaskListInfo {
|
upid, upid_str, state: Some((Local::now().timestamp(), status))
|
||||||
upid, upid_str, state: Some((Local::now().timestamp(), status))
|
});
|
||||||
});
|
},
|
||||||
}
|
Some((endtime, status)) => {
|
||||||
Some((endtime, status)) => {
|
finish_list.push(TaskListInfo {
|
||||||
finish_list.push(TaskListInfo {
|
upid, upid_str, state: Some((endtime, status))
|
||||||
upid, upid_str, state: Some((endtime, status))
|
})
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
12
src/tools.rs
12
src/tools.rs
@ -23,6 +23,7 @@ pub use proxmox::tools::fd::Fd;
|
|||||||
pub mod acl;
|
pub mod acl;
|
||||||
pub mod async_io;
|
pub mod async_io;
|
||||||
pub mod borrow;
|
pub mod borrow;
|
||||||
|
pub mod cert;
|
||||||
pub mod daemon;
|
pub mod daemon;
|
||||||
pub mod disks;
|
pub mod disks;
|
||||||
pub mod fs;
|
pub mod fs;
|
||||||
@ -646,3 +647,14 @@ pub fn setup_safe_path_env() {
|
|||||||
std::env::remove_var(name);
|
std::env::remove_var(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
|
||||||
|
let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
|
||||||
|
Some(n) => &line[n..],
|
||||||
|
None => return &[],
|
||||||
|
};
|
||||||
|
match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
|
||||||
|
Some(n) => &line[..(line.len() - n)],
|
||||||
|
None => &[],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
67
src/tools/cert.rs
Normal file
67
src/tools/cert.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use openssl::x509::{X509, GeneralName};
|
||||||
|
use openssl::stack::Stack;
|
||||||
|
use openssl::pkey::{Public, PKey};
|
||||||
|
|
||||||
|
use crate::configdir;
|
||||||
|
|
||||||
|
pub struct CertInfo {
|
||||||
|
x509: X509,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||||
|
let mut parts = Vec::new();
|
||||||
|
for entry in name.entries() {
|
||||||
|
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||||
|
}
|
||||||
|
Ok(parts.join(", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CertInfo {
|
||||||
|
pub fn new() -> Result<Self, Error> {
|
||||||
|
Self::from_path(PathBuf::from(configdir!("/proxy.pem")))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_path(path: PathBuf) -> Result<Self, Error> {
|
||||||
|
let cert_pem = proxmox::tools::fs::file_get_contents(&path)?;
|
||||||
|
let x509 = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||||
|
Ok(Self{
|
||||||
|
x509
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn subject_alt_names(&self) -> Option<Stack<GeneralName>> {
|
||||||
|
self.x509.subject_alt_names()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn subject_name(&self) -> Result<String, Error> {
|
||||||
|
Ok(x509name_to_string(self.x509.subject_name())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn issuer_name(&self) -> Result<String, Error> {
|
||||||
|
Ok(x509name_to_string(self.x509.issuer_name())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fingerprint(&self) -> Result<String, Error> {
|
||||||
|
let fp = self.x509.digest(openssl::hash::MessageDigest::sha256())?;
|
||||||
|
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||||
|
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||||
|
.collect::<Vec<&str>>().join(":");
|
||||||
|
Ok(fp_string)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn public_key(&self) -> Result<PKey<Public>, Error> {
|
||||||
|
let pubkey = self.x509.public_key()?;
|
||||||
|
Ok(pubkey)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn not_before(&self) -> &openssl::asn1::Asn1TimeRef {
|
||||||
|
self.x509.not_before()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn not_after(&self) -> &openssl::asn1::Asn1TimeRef {
|
||||||
|
self.x509.not_after()
|
||||||
|
}
|
||||||
|
}
|
@ -743,7 +743,10 @@ pub fn get_disks(
|
|||||||
|
|
||||||
let partition_type_map = get_partition_type_info()?;
|
let partition_type_map = get_partition_type_info()?;
|
||||||
|
|
||||||
let zfs_devices = zfs_devices(&partition_type_map, None)?;
|
let zfs_devices = zfs_devices(&partition_type_map, None).or_else(|err| -> Result<HashSet<u64>, Error> {
|
||||||
|
eprintln!("error getting zfs devices: {}", err);
|
||||||
|
Ok(HashSet::new())
|
||||||
|
})?;
|
||||||
|
|
||||||
let lvm_devices = get_lvm_devices(&partition_type_map)?;
|
let lvm_devices = get_lvm_devices(&partition_type_map)?;
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ fn parse_zpool_list_header(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
|||||||
let (i, (text, size, alloc, free, _, _,
|
let (i, (text, size, alloc, free, _, _,
|
||||||
frag, _, dedup, health,
|
frag, _, dedup, health,
|
||||||
_altroot, _eol)) = tuple((
|
_altroot, _eol)) = tuple((
|
||||||
take_while1(|c| char::is_alphanumeric(c)), // name
|
take_while1(|c| char::is_alphanumeric(c) || c == '-' || c == ':' || c == '_' || c == '.'), // name
|
||||||
preceded(multispace1, parse_optional_u64), // size
|
preceded(multispace1, parse_optional_u64), // size
|
||||||
preceded(multispace1, parse_optional_u64), // allocated
|
preceded(multispace1, parse_optional_u64), // allocated
|
||||||
preceded(multispace1, parse_optional_u64), // free
|
preceded(multispace1, parse_optional_u64), // free
|
||||||
@ -221,7 +221,7 @@ logs
|
|||||||
assert_eq!(data, expect);
|
assert_eq!(data, expect);
|
||||||
|
|
||||||
let output = "\
|
let output = "\
|
||||||
btest 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
b-test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||||
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
||||||
/dev/sda1 - - - - - - - - ONLINE
|
/dev/sda1 - - - - - - - - ONLINE
|
||||||
/dev/sda2 - - - - - - - - ONLINE
|
/dev/sda2 - - - - - - - - ONLINE
|
||||||
@ -235,7 +235,7 @@ logs - - - - - - - - -
|
|||||||
let data = parse_zpool_list(&output)?;
|
let data = parse_zpool_list(&output)?;
|
||||||
let expect = vec![
|
let expect = vec![
|
||||||
ZFSPoolInfo {
|
ZFSPoolInfo {
|
||||||
name: String::from("btest"),
|
name: String::from("b-test"),
|
||||||
health: String::from("ONLINE"),
|
health: String::from("ONLINE"),
|
||||||
usage: Some(ZFSPoolUsage {
|
usage: Some(ZFSPoolUsage {
|
||||||
size: 427349245952,
|
size: 427349245952,
|
||||||
@ -261,5 +261,31 @@ logs - - - - - - - - -
|
|||||||
|
|
||||||
assert_eq!(data, expect);
|
assert_eq!(data, expect);
|
||||||
|
|
||||||
|
let output = "\
|
||||||
|
b.test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||||
|
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
||||||
|
/dev/sda1 - - - - - - - - ONLINE
|
||||||
|
";
|
||||||
|
|
||||||
|
let data = parse_zpool_list(&output)?;
|
||||||
|
let expect = vec![
|
||||||
|
ZFSPoolInfo {
|
||||||
|
name: String::from("b.test"),
|
||||||
|
health: String::from("ONLINE"),
|
||||||
|
usage: Some(ZFSPoolUsage {
|
||||||
|
size: 427349245952,
|
||||||
|
alloc: 761856,
|
||||||
|
free: 427348484096,
|
||||||
|
dedup: 1.0,
|
||||||
|
frag: 0,
|
||||||
|
}),
|
||||||
|
devices: vec![
|
||||||
|
String::from("/dev/sda1"),
|
||||||
|
]
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
assert_eq!(data, expect);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -430,3 +430,38 @@ errors: No known data errors
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_zpool_status_parser3() -> Result<(), Error> {
|
||||||
|
|
||||||
|
let output = r###" pool: bt-est
|
||||||
|
state: ONLINE
|
||||||
|
scan: none requested
|
||||||
|
config:
|
||||||
|
|
||||||
|
NAME STATE READ WRITE CKSUM
|
||||||
|
bt-est ONLINE 0 0 0
|
||||||
|
mirror-0 ONLINE 0 0 0
|
||||||
|
/dev/sda1 ONLINE 0 0 0
|
||||||
|
/dev/sda2 ONLINE 0 0 0
|
||||||
|
mirror-1 ONLINE 0 0 0
|
||||||
|
/dev/sda3 ONLINE 0 0 0
|
||||||
|
/dev/sda4 ONLINE 0 0 0
|
||||||
|
logs
|
||||||
|
/dev/sda5 ONLINE 0 0 0
|
||||||
|
|
||||||
|
errors: No known data errors
|
||||||
|
"###;
|
||||||
|
|
||||||
|
let key_value_list = parse_zpool_status(&output)?;
|
||||||
|
for (k, v) in key_value_list {
|
||||||
|
println!("{} => {}", k,v);
|
||||||
|
if k == "config" {
|
||||||
|
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||||
|
let _tree = vdev_list_to_tree(&vdev_list);
|
||||||
|
//println!("TREE1 {}", serde_json::to_string_pretty(&tree)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
@ -46,3 +46,49 @@ pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<S
|
|||||||
let value = value.as_bool().unwrap_or(true);
|
let value = value.as_bool().unwrap_or(true);
|
||||||
Ok((if value { "1" } else { "0" }).to_string())
|
Ok((if value { "1" } else { "0" }).to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct HumanByte {
|
||||||
|
b: usize,
|
||||||
|
}
|
||||||
|
impl std::fmt::Display for HumanByte {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
if self.b < 1024 {
|
||||||
|
return write!(f, "{} B", self.b);
|
||||||
|
}
|
||||||
|
let kb: f64 = self.b as f64 / 1024.0;
|
||||||
|
if kb < 1024.0 {
|
||||||
|
return write!(f, "{:.2} KiB", kb);
|
||||||
|
}
|
||||||
|
let mb: f64 = kb / 1024.0;
|
||||||
|
if mb < 1024.0 {
|
||||||
|
return write!(f, "{:.2} MiB", mb);
|
||||||
|
}
|
||||||
|
let gb: f64 = mb / 1024.0;
|
||||||
|
if gb < 1024.0 {
|
||||||
|
return write!(f, "{:.2} GiB", gb);
|
||||||
|
}
|
||||||
|
let tb: f64 = gb / 1024.0;
|
||||||
|
if tb < 1024.0 {
|
||||||
|
return write!(f, "{:.2} TiB", tb);
|
||||||
|
}
|
||||||
|
let pb: f64 = tb / 1024.0;
|
||||||
|
return write!(f, "{:.2} PiB", pb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<usize> for HumanByte {
|
||||||
|
fn from(v: usize) -> Self {
|
||||||
|
HumanByte { b: v }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn correct_byte_convert() {
|
||||||
|
fn convert(b: usize) -> String {
|
||||||
|
HumanByte::from(b).to_string()
|
||||||
|
}
|
||||||
|
assert_eq!(convert(1023), "1023 B");
|
||||||
|
assert_eq!(convert(1<<10), "1.00 KiB");
|
||||||
|
assert_eq!(convert(1<<20), "1.00 MiB");
|
||||||
|
assert_eq!(convert((1<<30) + (103 * 1<<20)), "1.10 GiB");
|
||||||
|
assert_eq!(convert((2<<50) + (500 * 1<<40)), "2.49 PiB");
|
||||||
|
}
|
||||||
|
@ -222,11 +222,13 @@ where
|
|||||||
|
|
||||||
// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
|
// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
|
||||||
// read Linux file system attributes (see man chattr)
|
// read Linux file system attributes (see man chattr)
|
||||||
nix::ioctl_read!(read_attr_fd, b'f', 1, usize);
|
nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long);
|
||||||
|
nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long);
|
||||||
|
|
||||||
// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
|
// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
|
||||||
// read FAT file system attributes
|
// read FAT file system attributes
|
||||||
nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
|
nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
|
||||||
|
nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32);
|
||||||
|
|
||||||
// From /usr/include/linux/fs.h
|
// From /usr/include/linux/fs.h
|
||||||
// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
|
// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
|
||||||
|
@ -219,7 +219,16 @@ fn parse_calendar_event_incomplete(mut i: &str) -> IResult<&str, CalendarEvent>
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
"monthly" | "weekly" | "yearly" | "quarterly" | "semiannually" => {
|
"weekly" => {
|
||||||
|
return Ok(("", CalendarEvent {
|
||||||
|
hour: vec![DateTimeValue::Single(0)],
|
||||||
|
minute: vec![DateTimeValue::Single(0)],
|
||||||
|
second: vec![DateTimeValue::Single(0)],
|
||||||
|
days: WeekDays::MONDAY,
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
"monthly" | "yearly" | "quarterly" | "semiannually" => {
|
||||||
return Err(parse_error(i, "unimplemented date or time specification"));
|
return Err(parse_error(i, "unimplemented date or time specification"));
|
||||||
}
|
}
|
||||||
_ => { /* continue */ }
|
_ => { /* continue */ }
|
||||||
|
@ -88,12 +88,27 @@ impl DateTimeValue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calendar events may be used to refer to one or more points in time in a
|
||||||
|
/// single expression. They are designed after the systemd.time Calendar Events
|
||||||
|
/// specification, but are not guaranteed to be 100% compatible.
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct CalendarEvent {
|
pub struct CalendarEvent {
|
||||||
|
/// the days in a week this event should trigger
|
||||||
pub days: WeekDays,
|
pub days: WeekDays,
|
||||||
|
/// the second(s) this event should trigger
|
||||||
pub second: Vec<DateTimeValue>, // todo: support float values
|
pub second: Vec<DateTimeValue>, // todo: support float values
|
||||||
|
/// the minute(s) this event should trigger
|
||||||
pub minute: Vec<DateTimeValue>,
|
pub minute: Vec<DateTimeValue>,
|
||||||
|
/// the hour(s) this event should trigger
|
||||||
pub hour: Vec<DateTimeValue>,
|
pub hour: Vec<DateTimeValue>,
|
||||||
|
/* FIXME: TODO
|
||||||
|
/// the day(s) in a month this event should trigger
|
||||||
|
pub day: Vec<DateTimeValue>,
|
||||||
|
/// the month(s) in a year this event should trigger
|
||||||
|
pub month: Vec<DateTimeValue>,
|
||||||
|
/// the years(s) this event should trigger
|
||||||
|
pub year: Vec<DateTimeValue>,
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
|
@ -11,6 +11,38 @@ use crate::tools::epoch_now_u64;
|
|||||||
|
|
||||||
pub const TICKET_LIFETIME: i64 = 3600*2; // 2 hours
|
pub const TICKET_LIFETIME: i64 = 3600*2; // 2 hours
|
||||||
|
|
||||||
|
const TERM_PREFIX: &str = "PBSTERM";
|
||||||
|
|
||||||
|
pub fn assemble_term_ticket(
|
||||||
|
keypair: &PKey<Private>,
|
||||||
|
username: &str,
|
||||||
|
path: &str,
|
||||||
|
port: u16,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
assemble_rsa_ticket(
|
||||||
|
keypair,
|
||||||
|
TERM_PREFIX,
|
||||||
|
None,
|
||||||
|
Some(&format!("{}{}{}", username, path, port)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_term_ticket(
|
||||||
|
keypair: &PKey<Public>,
|
||||||
|
username: &str,
|
||||||
|
path: &str,
|
||||||
|
port: u16,
|
||||||
|
ticket: &str,
|
||||||
|
) -> Result<(i64, Option<String>), Error> {
|
||||||
|
verify_rsa_ticket(
|
||||||
|
keypair,
|
||||||
|
TERM_PREFIX,
|
||||||
|
ticket,
|
||||||
|
Some(&format!("{}{}{}", username, path, port)),
|
||||||
|
-300,
|
||||||
|
TICKET_LIFETIME,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn assemble_rsa_ticket(
|
pub fn assemble_rsa_ticket(
|
||||||
keypair: &PKey<Private>,
|
keypair: &PKey<Private>,
|
||||||
|
@ -82,7 +82,7 @@ pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
|
|||||||
let mut size = 256;
|
let mut size = 256;
|
||||||
let mut buffer = vec::undefined(size);
|
let mut buffer = vec::undefined(size);
|
||||||
let mut bytes = unsafe {
|
let mut bytes = unsafe {
|
||||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut i8, buffer.len())
|
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
|
||||||
};
|
};
|
||||||
while bytes < 0 {
|
while bytes < 0 {
|
||||||
let err = Errno::last();
|
let err = Errno::last();
|
||||||
@ -96,7 +96,7 @@ pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
|
|||||||
// Retry to read the list with new buffer
|
// Retry to read the list with new buffer
|
||||||
buffer.resize(size, 0);
|
buffer.resize(size, 0);
|
||||||
bytes = unsafe {
|
bytes = unsafe {
|
||||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut i8, buffer.len())
|
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
buffer.truncate(bytes as usize);
|
buffer.truncate(bytes as usize);
|
||||||
@ -125,7 +125,7 @@ pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
|
|||||||
}
|
}
|
||||||
buffer.resize(size, 0);
|
buffer.resize(size, 0);
|
||||||
bytes = unsafe {
|
bytes = unsafe {
|
||||||
libc::fgetxattr(fd, name.as_ptr() as *const i8, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
|
libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
buffer.resize(bytes as usize, 0);
|
buffer.resize(bytes as usize, 0);
|
||||||
|
@ -76,6 +76,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
let viewmodel = me.getViewModel();
|
let viewmodel = me.getViewModel();
|
||||||
|
|
||||||
let res = records[0].data;
|
let res = records[0].data;
|
||||||
|
viewmodel.set('fingerprint', res.info.fingerprint || Proxmox.Utils.unknownText);
|
||||||
|
|
||||||
let cpu = res.cpu,
|
let cpu = res.cpu,
|
||||||
mem = res.memory,
|
mem = res.memory,
|
||||||
@ -91,6 +92,45 @@ Ext.define('PBS.Dashboard', {
|
|||||||
hdPanel.updateValue(root.used / root.total);
|
hdPanel.updateValue(root.used / root.total);
|
||||||
},
|
},
|
||||||
|
|
||||||
|
showFingerPrint: function() {
|
||||||
|
let me = this;
|
||||||
|
let vm = me.getViewModel();
|
||||||
|
let fingerprint = vm.get('fingerprint');
|
||||||
|
Ext.create('Ext.window.Window', {
|
||||||
|
modal: true,
|
||||||
|
width: 600,
|
||||||
|
title: gettext('Fingerprint'),
|
||||||
|
layout: 'form',
|
||||||
|
bodyPadding: '10 0',
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'textfield',
|
||||||
|
inputId: 'fingerprintField',
|
||||||
|
value: fingerprint,
|
||||||
|
editable: false,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
buttons: [
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
iconCls: 'fa fa-clipboard',
|
||||||
|
handler: function(b) {
|
||||||
|
var el = document.getElementById('fingerprintField');
|
||||||
|
el.select();
|
||||||
|
document.execCommand("copy");
|
||||||
|
},
|
||||||
|
text: gettext('Copy')
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Ok'),
|
||||||
|
handler: function() {
|
||||||
|
this.up('window').close();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
updateTasks: function(store, records, success) {
|
updateTasks: function(store, records, success) {
|
||||||
if (!success) return;
|
if (!success) return;
|
||||||
let me = this;
|
let me = this;
|
||||||
@ -134,11 +174,16 @@ Ext.define('PBS.Dashboard', {
|
|||||||
timespan: 300, // in seconds
|
timespan: 300, // in seconds
|
||||||
hours: 12, // in hours
|
hours: 12, // in hours
|
||||||
error_shown: false,
|
error_shown: false,
|
||||||
|
fingerprint: "",
|
||||||
'bytes_in': 0,
|
'bytes_in': 0,
|
||||||
'bytes_out': 0,
|
'bytes_out': 0,
|
||||||
'avg_ptime': 0.0
|
'avg_ptime': 0.0
|
||||||
},
|
},
|
||||||
|
|
||||||
|
formulas: {
|
||||||
|
disableFPButton: (get) => get('fingerprint') === "",
|
||||||
|
},
|
||||||
|
|
||||||
stores: {
|
stores: {
|
||||||
usage: {
|
usage: {
|
||||||
storeid: 'dash-usage',
|
storeid: 'dash-usage',
|
||||||
@ -164,7 +209,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
autoDestroy: true,
|
autoDestroy: true,
|
||||||
proxy: {
|
proxy: {
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
url: '/api2/json/subscription'
|
url: '/api2/json/nodes/localhost/subscription'
|
||||||
},
|
},
|
||||||
listeners: {
|
listeners: {
|
||||||
load: 'updateSubscription'
|
load: 'updateSubscription'
|
||||||
@ -211,6 +256,16 @@ Ext.define('PBS.Dashboard', {
|
|||||||
iconCls: 'fa fa-tasks',
|
iconCls: 'fa fa-tasks',
|
||||||
title: gettext('Server Resources'),
|
title: gettext('Server Resources'),
|
||||||
bodyPadding: '0 20 0 20',
|
bodyPadding: '0 20 0 20',
|
||||||
|
tools: [
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
text: gettext('Show Fingerprint'),
|
||||||
|
handler: 'showFingerPrint',
|
||||||
|
bind: {
|
||||||
|
disabled: '{disableFPButton}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
layout: {
|
layout: {
|
||||||
type: 'hbox',
|
type: 'hbox',
|
||||||
align: 'center'
|
align: 'center'
|
||||||
|
@ -12,26 +12,28 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
'owner',
|
'owner',
|
||||||
{ name: 'size', type: 'int', allowNull: true, },
|
{ name: 'size', type: 'int', allowNull: true, },
|
||||||
{
|
{
|
||||||
name: 'encrypted',
|
name: 'crypt-mode',
|
||||||
type: 'boolean',
|
type: 'boolean',
|
||||||
calculate: function(data) {
|
calculate: function(data) {
|
||||||
let encrypted = 0;
|
let encrypted = 0;
|
||||||
let files = 0;
|
let crypt = {
|
||||||
|
none: 0,
|
||||||
|
mixed: 0,
|
||||||
|
'sign-only': 0,
|
||||||
|
encrypt: 0,
|
||||||
|
count: 0,
|
||||||
|
};
|
||||||
|
let signed = 0;
|
||||||
data.files.forEach(file => {
|
data.files.forEach(file => {
|
||||||
if (file.filename === 'index.json.blob') return; // is never encrypted
|
if (file.filename === 'index.json.blob') return; // is never encrypted
|
||||||
if (file.encrypted) {
|
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||||
encrypted++;
|
if (mode !== -1) {
|
||||||
|
crypt[file['crypt-mode']]++;
|
||||||
}
|
}
|
||||||
files++;
|
crypt.count++;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (encrypted === 0) {
|
return PBS.Utils.calculateCryptMode(crypt);
|
||||||
return 0;
|
|
||||||
} else if (encrypted < files) {
|
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -149,11 +151,14 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
let children = [];
|
let children = [];
|
||||||
for (const [_key, group] of Object.entries(groups)) {
|
for (const [_key, group] of Object.entries(groups)) {
|
||||||
let last_backup = 0;
|
let last_backup = 0;
|
||||||
let encrypted = 0;
|
let crypt = {
|
||||||
|
none: 0,
|
||||||
|
mixed: 0,
|
||||||
|
'sign-only': 0,
|
||||||
|
encrypt: 0,
|
||||||
|
};
|
||||||
for (const item of group.children) {
|
for (const item of group.children) {
|
||||||
if (item.encrypted > 0) {
|
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
|
||||||
encrypted++;
|
|
||||||
}
|
|
||||||
if (item["backup-time"] > last_backup && item.size !== null) {
|
if (item["backup-time"] > last_backup && item.size !== null) {
|
||||||
last_backup = item["backup-time"];
|
last_backup = item["backup-time"];
|
||||||
group["backup-time"] = last_backup;
|
group["backup-time"] = last_backup;
|
||||||
@ -163,14 +168,9 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if (encrypted === 0) {
|
|
||||||
group.encrypted = 0;
|
|
||||||
} else if (encrypted < group.children.length) {
|
|
||||||
group.encrypted = 1;
|
|
||||||
} else {
|
|
||||||
group.encrypted = 2;
|
|
||||||
}
|
|
||||||
group.count = group.children.length;
|
group.count = group.children.length;
|
||||||
|
crypt.count = group.count;
|
||||||
|
group['crypt-mode'] = PBS.Utils.calculateCryptMode(crypt);
|
||||||
children.push(group);
|
children.push(group);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +296,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
|
|
||||||
let encrypted = false;
|
let encrypted = false;
|
||||||
data.files.forEach(file => {
|
data.files.forEach(file => {
|
||||||
if (file.filename === 'catalog.pcat1.didx' && file.encrypted) {
|
if (file.filename === 'catalog.pcat1.didx' && file['crypt-mode'] === 'encrypt') {
|
||||||
encrypted = true;
|
encrypted = true;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -365,15 +365,8 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Encrypted'),
|
header: gettext('Encrypted'),
|
||||||
dataIndex: 'encrypted',
|
dataIndex: 'crypt-mode',
|
||||||
renderer: function(value) {
|
renderer: value => PBS.Utils.cryptText[value] || Proxmox.Utils.unknownText,
|
||||||
switch (value) {
|
|
||||||
case 0: return Proxmox.Utils.noText;
|
|
||||||
case 1: return gettext('Mixed');
|
|
||||||
case 2: return Proxmox.Utils.yesText;
|
|
||||||
default: Proxmox.Utils.unknownText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext("Files"),
|
header: gettext("Files"),
|
||||||
@ -383,8 +376,10 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
return files.map((file) => {
|
return files.map((file) => {
|
||||||
let icon = '';
|
let icon = '';
|
||||||
let size = '';
|
let size = '';
|
||||||
if (file.encrypted) {
|
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||||
icon = '<i class="fa fa-lock"></i> ';
|
let iconCls = PBS.Utils.cryptIconCls[mode] || '';
|
||||||
|
if (iconCls !== '') {
|
||||||
|
icon = `<i class="fa fa-${iconCls}"></i> `;
|
||||||
}
|
}
|
||||||
if (file.size) {
|
if (file.size) {
|
||||||
size = ` (${Proxmox.Utils.format_size(file.size)})`;
|
size = ` (${Proxmox.Utils.format_size(file.size)})`;
|
||||||
|
@ -125,7 +125,7 @@ Ext.define('PBS.MainView', {
|
|||||||
},
|
},
|
||||||
|
|
||||||
control: {
|
control: {
|
||||||
'button[reference=logoutButton]': {
|
'[reference=logoutButton]': {
|
||||||
click: 'logout'
|
click: 'logout'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -133,7 +133,8 @@ Ext.define('PBS.MainView', {
|
|||||||
init: function(view) {
|
init: function(view) {
|
||||||
var me = this;
|
var me = this;
|
||||||
|
|
||||||
me.lookupReference('usernameinfo').update({username:Proxmox.UserName});
|
PBS.data.RunningTasksStore.startUpdate();
|
||||||
|
me.lookupReference('usernameinfo').setText(Proxmox.UserName);
|
||||||
|
|
||||||
// show login on requestexception
|
// show login on requestexception
|
||||||
// fixme: what about other errors
|
// fixme: what about other errors
|
||||||
@ -189,7 +190,7 @@ Ext.define('PBS.MainView', {
|
|||||||
type: 'hbox',
|
type: 'hbox',
|
||||||
align: 'middle'
|
align: 'middle'
|
||||||
},
|
},
|
||||||
margin: '2 5 2 5',
|
margin: '2 0 2 5',
|
||||||
height: 38,
|
height: 38,
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
@ -197,7 +198,8 @@ Ext.define('PBS.MainView', {
|
|||||||
prefix: '',
|
prefix: '',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'versioninfo'
|
padding: '0 0 0 5',
|
||||||
|
xtype: 'versioninfo',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
padding: 5,
|
padding: 5,
|
||||||
@ -208,12 +210,6 @@ Ext.define('PBS.MainView', {
|
|||||||
flex: 1,
|
flex: 1,
|
||||||
baseCls: 'x-plain',
|
baseCls: 'x-plain',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
baseCls: 'x-plain',
|
|
||||||
reference: 'usernameinfo',
|
|
||||||
padding: '0 5',
|
|
||||||
tpl: Ext.String.format(gettext("You are logged in as {0}"), "'{username}'")
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
xtype: 'button',
|
xtype: 'button',
|
||||||
baseCls: 'x-btn',
|
baseCls: 'x-btn',
|
||||||
@ -224,11 +220,27 @@ Ext.define('PBS.MainView', {
|
|||||||
margin: '0 5 0 0',
|
margin: '0 5 0 0',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reference: 'logoutButton',
|
xtype: 'pbsTaskButton',
|
||||||
|
margin: '0 5 0 0',
|
||||||
|
},
|
||||||
|
{
|
||||||
xtype: 'button',
|
xtype: 'button',
|
||||||
iconCls: 'fa fa-sign-out',
|
reference: 'usernameinfo',
|
||||||
text: gettext('Logout')
|
style: {
|
||||||
}
|
// proxmox dark grey p light grey as border
|
||||||
|
backgroundColor: '#464d4d',
|
||||||
|
borderColor: '#ABBABA'
|
||||||
|
},
|
||||||
|
margin: '0 5 0 0',
|
||||||
|
iconCls: 'fa fa-user',
|
||||||
|
menu: [
|
||||||
|
{
|
||||||
|
reference: 'logoutButton',
|
||||||
|
iconCls: 'fa fa-sign-out',
|
||||||
|
text: gettext('Logout'),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -8,6 +8,9 @@ JSSRC= \
|
|||||||
form/UserSelector.js \
|
form/UserSelector.js \
|
||||||
form/RemoteSelector.js \
|
form/RemoteSelector.js \
|
||||||
form/DataStoreSelector.js \
|
form/DataStoreSelector.js \
|
||||||
|
form/CalendarEvent.js \
|
||||||
|
data/RunningTasksStore.js \
|
||||||
|
button/TaskButton.js \
|
||||||
config/UserView.js \
|
config/UserView.js \
|
||||||
config/RemoteView.js \
|
config/RemoteView.js \
|
||||||
config/ACLView.js \
|
config/ACLView.js \
|
||||||
@ -53,6 +56,10 @@ js/proxmox-backup-gui.js: js OnlineHelpInfo.js ${JSSRC}
|
|||||||
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
||||||
mv $@.tmp $@
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint: ${JSSRC}
|
||||||
|
eslint ${JSSRC}
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
|
@ -86,7 +86,15 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
iconCls: 'fa fa-power-off'
|
iconCls: 'fa fa-power-off'
|
||||||
});
|
});
|
||||||
|
|
||||||
me.tbar = [ restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
var consoleBtn = Ext.create('Proxmox.button.Button', {
|
||||||
|
text: gettext('Console'),
|
||||||
|
iconCls: 'fa fa-terminal',
|
||||||
|
handler: function() {
|
||||||
|
Proxmox.Utils.openXtermJsViewer('shell', 0, Proxmox.NodeName);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
me.tbar = [ consoleBtn, restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
||||||
|
|
||||||
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
||||||
rrdurl: "/api2/json/nodes/localhost/rrd",
|
rrdurl: "/api2/json/nodes/localhost/rrd",
|
||||||
|
@ -37,7 +37,7 @@ Ext.define('PBS.Subscription', {
|
|||||||
me.rstore.load();
|
me.rstore.load();
|
||||||
};
|
};
|
||||||
|
|
||||||
var baseurl = '/subscription';
|
var baseurl = '/nodes/localhost/subscription';
|
||||||
|
|
||||||
var render_status = function(value) {
|
var render_status = function(value) {
|
||||||
|
|
||||||
|
39
www/Utils.js
39
www/Utils.js
@ -13,6 +13,45 @@ Ext.define('PBS.Utils', {
|
|||||||
|
|
||||||
dataStorePrefix: 'DataStore-',
|
dataStorePrefix: 'DataStore-',
|
||||||
|
|
||||||
|
cryptmap: [
|
||||||
|
'none',
|
||||||
|
'mixed',
|
||||||
|
'sign-only',
|
||||||
|
'encrypt',
|
||||||
|
],
|
||||||
|
|
||||||
|
cryptText: [
|
||||||
|
Proxmox.Utils.noText,
|
||||||
|
gettext('Mixed'),
|
||||||
|
gettext('Signed'),
|
||||||
|
gettext('Encrypted'),
|
||||||
|
],
|
||||||
|
|
||||||
|
cryptIconCls: [
|
||||||
|
'',
|
||||||
|
'',
|
||||||
|
'certificate',
|
||||||
|
'lock',
|
||||||
|
],
|
||||||
|
|
||||||
|
calculateCryptMode: function(data) {
|
||||||
|
let mixed = data.mixed;
|
||||||
|
let encrypted = data.encrypt;
|
||||||
|
let signed = data['sign-only'];
|
||||||
|
let files = data.count;
|
||||||
|
if (mixed > 0) {
|
||||||
|
return PBS.Utils.cryptmap.indexOf('mixed');
|
||||||
|
} else if (files === encrypted) {
|
||||||
|
return PBS.Utils.cryptmap.indexOf('encrypt');
|
||||||
|
} else if (files === signed) {
|
||||||
|
return PBS.Utils.cryptmap.indexOf('sign-only');
|
||||||
|
} else if ((signed+encrypted) === 0) {
|
||||||
|
return PBS.Utils.cryptmap.indexOf('none');
|
||||||
|
} else {
|
||||||
|
return PBS.Utils.cryptmap.indexOf('mixed');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
getDataStoreFromPath: function(path) {
|
getDataStoreFromPath: function(path) {
|
||||||
return path.slice(PBS.Utils.dataStorePrefix.length);
|
return path.slice(PBS.Utils.dataStorePrefix.length);
|
||||||
},
|
},
|
||||||
|
92
www/button/TaskButton.js
Normal file
92
www/button/TaskButton.js
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
Ext.define('PBS.TaskButton', {
|
||||||
|
extend: 'Ext.button.Button',
|
||||||
|
alias: 'widget.pbsTaskButton',
|
||||||
|
|
||||||
|
config: {
|
||||||
|
badgeText: '0',
|
||||||
|
badgeCls: '',
|
||||||
|
},
|
||||||
|
|
||||||
|
iconCls: 'fa fa-list',
|
||||||
|
userCls: 'pmx-has-badge',
|
||||||
|
text: gettext('Tasks'),
|
||||||
|
|
||||||
|
setText: function(value) {
|
||||||
|
let me = this;
|
||||||
|
me.realText = value;
|
||||||
|
let badgeText = me.getBadgeText();
|
||||||
|
let badgeCls = me.getBadgeCls();
|
||||||
|
let text = `${value} <span class="pmx-button-badge ${badgeCls}">${badgeText}</span>`;
|
||||||
|
return me.callParent([text]);
|
||||||
|
},
|
||||||
|
|
||||||
|
getText: function() {
|
||||||
|
let me = this;
|
||||||
|
return me.realText;
|
||||||
|
},
|
||||||
|
|
||||||
|
setBadgeText: function(value) {
|
||||||
|
let me = this;
|
||||||
|
me.badgeText = value.toString();
|
||||||
|
return me.setText(me.getText());
|
||||||
|
},
|
||||||
|
|
||||||
|
setBadgeCls: function(value) {
|
||||||
|
let me = this;
|
||||||
|
let res = me.callParent([value]);
|
||||||
|
let badgeText = me.getBadgeText();
|
||||||
|
me.setBadgeText(badgeText);
|
||||||
|
return res;
|
||||||
|
},
|
||||||
|
|
||||||
|
handler: function() {
|
||||||
|
let me = this;
|
||||||
|
if (me.grid.isVisible()) {
|
||||||
|
me.grid.setVisible(false);
|
||||||
|
} else {
|
||||||
|
me.grid.showBy(me, 'tr-br');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
me.grid = Ext.create({
|
||||||
|
xtype: 'pbsRunningTasks',
|
||||||
|
title: '',
|
||||||
|
hideHeaders: false,
|
||||||
|
floating: true,
|
||||||
|
|
||||||
|
width: 600,
|
||||||
|
|
||||||
|
bbar: [
|
||||||
|
'->',
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
text: gettext('Show All Tasks'),
|
||||||
|
handler: function() {
|
||||||
|
var mainview = me.up('mainview');
|
||||||
|
mainview.getController().redirectTo('pbsServerAdministration:tasks');
|
||||||
|
me.grid.hide();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
'taskopened': function() {
|
||||||
|
me.grid.hide();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
me.callParent();
|
||||||
|
me.mon(me.grid.getStore().rstore, 'load', function(store, records, success) {
|
||||||
|
if (!success) return;
|
||||||
|
|
||||||
|
let count = records.length;
|
||||||
|
let text = count > 99 ? '99+' : count.toString();
|
||||||
|
let cls = count > 0 ? 'active': '';
|
||||||
|
me.setBadgeText(text);
|
||||||
|
me.setBadgeCls(cls);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
});
|
@ -190,3 +190,21 @@ p.logs {
|
|||||||
visibility: hidden;
|
visibility: hidden;
|
||||||
width: 5px;
|
width: 5px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.pmx-has-badge .x-btn-inner {
|
||||||
|
padding: 0 0 0 5px;
|
||||||
|
min-width: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.pmx-button-badge {
|
||||||
|
display: inline-block;
|
||||||
|
font-weight: bold;
|
||||||
|
border-radius: 4px;
|
||||||
|
padding: 2px 3px;
|
||||||
|
min-width: 24px;
|
||||||
|
line-height: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.pmx-button-badge.active {
|
||||||
|
background-color: #464d4d;
|
||||||
|
}
|
||||||
|
@ -18,6 +18,8 @@ Ext.define('PBS.RunningTasks', {
|
|||||||
upid: record.data.upid,
|
upid: record.data.upid,
|
||||||
endtime: record.data.endtime,
|
endtime: record.data.endtime,
|
||||||
}).show();
|
}).show();
|
||||||
|
|
||||||
|
view.fireEvent('taskopened', view, record.data.upid);
|
||||||
},
|
},
|
||||||
|
|
||||||
openTaskItemDblClick: function(grid, record) {
|
openTaskItemDblClick: function(grid, record) {
|
||||||
@ -54,20 +56,8 @@ Ext.define('PBS.RunningTasks', {
|
|||||||
store: {
|
store: {
|
||||||
type: 'diff',
|
type: 'diff',
|
||||||
autoDestroy: true,
|
autoDestroy: true,
|
||||||
autoDestroyRstore: true,
|
|
||||||
sorters: 'starttime',
|
sorters: 'starttime',
|
||||||
rstore: {
|
rstore: PBS.data.RunningTasksStore,
|
||||||
type: 'update',
|
|
||||||
autoStart: true,
|
|
||||||
interval: 3000,
|
|
||||||
storeid: 'pbs-running-tasks-dash',
|
|
||||||
model: 'proxmox-tasks',
|
|
||||||
proxy: {
|
|
||||||
type: 'proxmox',
|
|
||||||
// maybe separate api call?
|
|
||||||
url: '/api2/json/nodes/localhost/tasks?running=1'
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
columns: [
|
columns: [
|
||||||
|
@ -9,12 +9,27 @@ Ext.define('PBS.TaskSummary', {
|
|||||||
|
|
||||||
render_count: function(value, md, record, rowindex, colindex) {
|
render_count: function(value, md, record, rowindex, colindex) {
|
||||||
let cls = 'question';
|
let cls = 'question';
|
||||||
|
let color = 'faded';
|
||||||
switch (colindex) {
|
switch (colindex) {
|
||||||
case 1: cls = "times-circle critical"; break;
|
case 1:
|
||||||
case 2: cls = "exclamation-circle warning"; break;
|
cls = "times-circle";
|
||||||
case 3: cls = "check-circle good"; break;
|
color = "critical";
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
cls = "exclamation-circle";
|
||||||
|
color = "warning";
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
cls = "check-circle";
|
||||||
|
color = "good";
|
||||||
|
break;
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (value < 1) {
|
||||||
|
color = "faded";
|
||||||
|
}
|
||||||
|
cls += " " + color;
|
||||||
return `<i class="fa fa-${cls}"></i> ${value}`;
|
return `<i class="fa fa-${cls}"></i> ${value}`;
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
21
www/data/RunningTasksStore.js
Normal file
21
www/data/RunningTasksStore.js
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
Ext.define('PBS.data.RunningTasksStore', {
|
||||||
|
extend: 'Proxmox.data.UpdateStore',
|
||||||
|
|
||||||
|
singleton: true,
|
||||||
|
|
||||||
|
constructor: function(config) {
|
||||||
|
let me = this;
|
||||||
|
config = config || {};
|
||||||
|
Ext.apply(config, {
|
||||||
|
interval: 3000,
|
||||||
|
storeid: 'pbs-running-tasks-dash',
|
||||||
|
model: 'proxmox-tasks',
|
||||||
|
proxy: {
|
||||||
|
type: 'proxmox',
|
||||||
|
// maybe separate api call?
|
||||||
|
url: '/api2/json/nodes/localhost/tasks?running=1&limit=100',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
me.callParent([config]);
|
||||||
|
},
|
||||||
|
});
|
64
www/form/CalendarEvent.js
Normal file
64
www/form/CalendarEvent.js
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
Ext.define('PBS.data.CalendarEventExamples', {
|
||||||
|
extend: 'Ext.data.Store',
|
||||||
|
alias: 'store.calendarEventExamples',
|
||||||
|
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
//FIXME { value: '*/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
|
||||||
|
{ value: 'hourly', text: gettext("Every hour") },
|
||||||
|
//FIXME { value: '*/2:00', text: gettext("Every two hours") },
|
||||||
|
{ value: '2,22:30', text: gettext("Every day") + " 02:30, 22:30" },
|
||||||
|
{ value: 'daily', text: gettext("Every day") + " 00:00" },
|
||||||
|
{ value: 'mon..fri', text: gettext("Monday to Friday") + " 00:00" },
|
||||||
|
//FIXME{ value: 'mon..fri */1:00', text: gettext("Monday to Friday") + ': ' + gettext("hourly") },
|
||||||
|
{ value: 'sat 18:15', text: gettext("Every Saturday") + " 18:15" },
|
||||||
|
//FIXME{ value: 'monthly', text: gettext("Every 1st of Month") + " 00:00" }, // not yet possible..
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.form.CalendarEvent', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
xtype: 'pbsCalendarEvent',
|
||||||
|
|
||||||
|
editable: true,
|
||||||
|
|
||||||
|
valueField: 'value',
|
||||||
|
displayField: 'text',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
config: {
|
||||||
|
deleteEmpty: true,
|
||||||
|
},
|
||||||
|
// overide framework function to implement deleteEmpty behaviour
|
||||||
|
getSubmitData: function() {
|
||||||
|
let me = this, data = null;
|
||||||
|
if (!me.disabled && me.submitValue) {
|
||||||
|
let val = me.getSubmitValue();
|
||||||
|
if (val !== null && val !== '' && val !== '__default__') {
|
||||||
|
data = {};
|
||||||
|
data[me.getName()] = val;
|
||||||
|
} else if (me.getDeleteEmpty()) {
|
||||||
|
data = {};
|
||||||
|
data.delete = me.getName();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
store: {
|
||||||
|
type: 'calendarEventExamples',
|
||||||
|
},
|
||||||
|
|
||||||
|
tpl: [
|
||||||
|
'<ul class="x-list-plain"><tpl for=".">',
|
||||||
|
'<li role="option" class="x-boundlist-item">{text}</li>',
|
||||||
|
'</tpl></ul>',
|
||||||
|
],
|
||||||
|
|
||||||
|
displayTpl: [
|
||||||
|
'<tpl for=".">',
|
||||||
|
'{value}',
|
||||||
|
'</tpl>',
|
||||||
|
],
|
||||||
|
});
|
@ -46,8 +46,9 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
|||||||
let me = this;
|
let me = this;
|
||||||
let combo = me.lookup('file');
|
let combo = me.lookup('file');
|
||||||
let rec = combo.getStore().findRecord('filename', value, 0, false, true, true);
|
let rec = combo.getStore().findRecord('filename', value, 0, false, true, true);
|
||||||
let canDownload = !rec.data.encrypted;
|
let canDownload = rec.data['crypt-mode'] !== 'encrypt';
|
||||||
me.lookup('encryptedHint').setVisible(!canDownload);
|
me.lookup('encryptedHint').setVisible(!canDownload);
|
||||||
|
me.lookup('signedHint').setVisible(rec.data['crypt-mode'] === 'sign-only');
|
||||||
me.lookup('downloadBtn').setDisabled(!canDownload);
|
me.lookup('downloadBtn').setDisabled(!canDownload);
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -88,7 +89,7 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
|||||||
emptyText: gettext('No file selected'),
|
emptyText: gettext('No file selected'),
|
||||||
fieldLabel: gettext('File'),
|
fieldLabel: gettext('File'),
|
||||||
store: {
|
store: {
|
||||||
fields: ['filename', 'size', 'encrypted',],
|
fields: ['filename', 'size', 'crypt-mode',],
|
||||||
idProperty: ['filename'],
|
idProperty: ['filename'],
|
||||||
},
|
},
|
||||||
listConfig: {
|
listConfig: {
|
||||||
@ -107,12 +108,25 @@ Ext.define('PBS.window.BackupFileDownloader', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Encrypted'),
|
text: gettext('Encrypted'),
|
||||||
dataIndex: 'encrypted',
|
dataIndex: 'crypt-mode',
|
||||||
renderer: Proxmox.Utils.format_boolean,
|
renderer: function(value) {
|
||||||
|
let mode = -1;
|
||||||
|
if (value !== undefined) {
|
||||||
|
mode = PBS.Utils.cryptmap.indexOf(value);
|
||||||
|
}
|
||||||
|
return PBS.Utils.cryptText[mode] || Proxmox.Utils.unknownText;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
userCls: 'pmx-hint',
|
||||||
|
reference: 'signedHint',
|
||||||
|
hidden: true,
|
||||||
|
value: gettext('Note: Signatures of signed files will not be verified on the server. Please use the client to do this.'),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'displayfield',
|
xtype: 'displayfield',
|
||||||
userCls: 'pmx-hint',
|
userCls: 'pmx-hint',
|
||||||
|
@ -15,140 +15,139 @@ Ext.define('PBS.DataStoreEdit', {
|
|||||||
let baseurl = '/api2/extjs/config/datastore';
|
let baseurl = '/api2/extjs/config/datastore';
|
||||||
|
|
||||||
me.isCreate = !name;
|
me.isCreate = !name;
|
||||||
|
if (!me.isCreate) {
|
||||||
|
me.defaultFocus = 'textfield[name=comment]';
|
||||||
|
}
|
||||||
me.url = name ? baseurl + '/' + name : baseurl;
|
me.url = name ? baseurl + '/' + name : baseurl;
|
||||||
me.method = name ? 'PUT' : 'POST';
|
me.method = name ? 'PUT' : 'POST';
|
||||||
me.autoLoad = !!name;
|
me.autoLoad = !!name;
|
||||||
return {};
|
return {};
|
||||||
},
|
},
|
||||||
|
|
||||||
items: [
|
items: {
|
||||||
{
|
xtype: 'tabpanel',
|
||||||
xtype: 'tabpanel',
|
bodyPadding: 10,
|
||||||
bodyPadding: 10,
|
items: [
|
||||||
items: [
|
{
|
||||||
{
|
title: gettext('General'),
|
||||||
title: gettext('General'),
|
xtype: 'inputpanel',
|
||||||
xtype: 'inputpanel',
|
column1: [
|
||||||
column1: [
|
{
|
||||||
{
|
xtype: 'pmxDisplayEditField',
|
||||||
xtype: 'pmxDisplayEditField',
|
cbind: {
|
||||||
cbind: {
|
editable: '{isCreate}',
|
||||||
editable: '{isCreate}',
|
|
||||||
},
|
|
||||||
name: 'name',
|
|
||||||
allowBlank: false,
|
|
||||||
fieldLabel: gettext('Name'),
|
|
||||||
},
|
},
|
||||||
{
|
name: 'name',
|
||||||
xtype: 'pmxDisplayEditField',
|
allowBlank: false,
|
||||||
cbind: {
|
fieldLabel: gettext('Name'),
|
||||||
editable: '{isCreate}',
|
},
|
||||||
},
|
{
|
||||||
name: 'path',
|
xtype: 'pmxDisplayEditField',
|
||||||
allowBlank: false,
|
cbind: {
|
||||||
fieldLabel: gettext('Backing Path'),
|
editable: '{isCreate}',
|
||||||
emptyText: gettext('An absolute path'),
|
|
||||||
},
|
},
|
||||||
],
|
name: 'path',
|
||||||
|
allowBlank: false,
|
||||||
column2: [
|
fieldLabel: gettext('Backing Path'),
|
||||||
{
|
emptyText: gettext('An absolute path'),
|
||||||
xtype: 'proxmoxtextfield',
|
},
|
||||||
name: 'gc-schedule',
|
],
|
||||||
fieldLabel: gettext("GC Schedule"),
|
column2: [
|
||||||
cbind: {
|
{
|
||||||
deleteEmpty: '{!isCreate}',
|
xtype: 'pbsCalendarEvent',
|
||||||
},
|
name: 'gc-schedule',
|
||||||
|
fieldLabel: gettext("GC Schedule"),
|
||||||
|
emptyText: gettext('none'),
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
{
|
},
|
||||||
xtype: 'proxmoxtextfield',
|
{
|
||||||
name: 'prune-schedule',
|
xtype: 'pbsCalendarEvent',
|
||||||
fieldLabel: gettext("Prune Schedule"),
|
name: 'prune-schedule',
|
||||||
cbind: {
|
fieldLabel: gettext("Prune Schedule"),
|
||||||
deleteEmpty: '{!isCreate}',
|
emptyText: gettext('none'),
|
||||||
},
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
],
|
},
|
||||||
|
],
|
||||||
columnB: [
|
columnB: [
|
||||||
{
|
{
|
||||||
xtype: 'textfield',
|
xtype: 'textfield',
|
||||||
name: 'comment',
|
name: 'comment',
|
||||||
fieldLabel: gettext('Comment'),
|
fieldLabel: gettext('Comment'),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
xtype: 'inputpanel',
|
||||||
|
column1: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Last'),
|
||||||
|
name: 'keep-last',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
],
|
minValue: 1,
|
||||||
},
|
allowBlank: true,
|
||||||
{
|
},
|
||||||
title: gettext('Prune Options'),
|
{
|
||||||
xtype: 'inputpanel',
|
xtype: 'proxmoxintegerfield',
|
||||||
column1: [
|
fieldLabel: gettext('Keep Daily'),
|
||||||
{
|
name: 'keep-daily',
|
||||||
xtype: 'proxmoxintegerfield',
|
cbind: {
|
||||||
fieldLabel: gettext('Keep Last'),
|
deleteEmpty: '{!isCreate}',
|
||||||
name: 'keep-last',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
},
|
||||||
{
|
minValue: 1,
|
||||||
xtype: 'proxmoxintegerfield',
|
allowBlank: true,
|
||||||
fieldLabel: gettext('Keep Daily'),
|
},
|
||||||
name: 'keep-daily',
|
{
|
||||||
cbind: {
|
xtype: 'proxmoxintegerfield',
|
||||||
deleteEmpty: '{!isCreate}',
|
fieldLabel: gettext('Keep Monthly'),
|
||||||
},
|
name: 'keep-monthly',
|
||||||
minValue: 1,
|
cbind: {
|
||||||
allowBlank: true,
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
{
|
minValue: 1,
|
||||||
xtype: 'proxmoxintegerfield',
|
allowBlank: true,
|
||||||
fieldLabel: gettext('Keep Monthly'),
|
},
|
||||||
name: 'keep-monthly',
|
],
|
||||||
cbind: {
|
column2: [
|
||||||
deleteEmpty: '{!isCreate}',
|
{
|
||||||
},
|
xtype: 'proxmoxintegerfield',
|
||||||
minValue: 1,
|
fieldLabel: gettext('Keep Hourly'),
|
||||||
allowBlank: true,
|
name: 'keep-hourly',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
],
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
column2: [
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxintegerfield',
|
xtype: 'proxmoxintegerfield',
|
||||||
fieldLabel: gettext('Keep Hourly'),
|
fieldLabel: gettext('Keep Weekly'),
|
||||||
name: 'keep-hourly',
|
name: 'keep-weekly',
|
||||||
cbind: {
|
cbind: {
|
||||||
deleteEmpty: '{!isCreate}',
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
},
|
||||||
{
|
minValue: 1,
|
||||||
xtype: 'proxmoxintegerfield',
|
allowBlank: true,
|
||||||
fieldLabel: gettext('Keep Weekly'),
|
},
|
||||||
name: 'keep-weekly',
|
{
|
||||||
cbind: {
|
xtype: 'proxmoxintegerfield',
|
||||||
deleteEmpty: '{!isCreate}',
|
fieldLabel: gettext('Keep Yearly'),
|
||||||
},
|
name: 'keep-yearly',
|
||||||
minValue: 1,
|
cbind: {
|
||||||
allowBlank: true,
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
{
|
minValue: 1,
|
||||||
xtype: 'proxmoxintegerfield',
|
allowBlank: true,
|
||||||
fieldLabel: gettext('Keep Yearly'),
|
},
|
||||||
name: 'keep-yearly',
|
],
|
||||||
cbind: {
|
},
|
||||||
deleteEmpty: '{!isCreate}',
|
],
|
||||||
},
|
},
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
});
|
});
|
||||||
|
@ -28,7 +28,7 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
xtype: 'inputpanel',
|
xtype: 'inputpanel',
|
||||||
column1: [
|
column1: [
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Sync Job'),
|
fieldLabel: gettext('Sync Job ID'),
|
||||||
xtype: 'pmxDisplayEditField',
|
xtype: 'pmxDisplayEditField',
|
||||||
name: 'id',
|
name: 'id',
|
||||||
renderer: Ext.htmlEncode,
|
renderer: Ext.htmlEncode,
|
||||||
@ -39,23 +39,23 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Remote'),
|
fieldLabel: gettext('Source Remote'),
|
||||||
xtype: 'pbsRemoteSelector',
|
xtype: 'pbsRemoteSelector',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
name: 'remote',
|
name: 'remote',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Source Datastore'),
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
allowBlank: false,
|
||||||
|
name: 'remote-store',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Local Datastore'),
|
fieldLabel: gettext('Local Datastore'),
|
||||||
xtype: 'pbsDataStoreSelector',
|
xtype: 'pbsDataStoreSelector',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
name: 'store',
|
name: 'store',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
fieldLabel: gettext('Remote Datastore'),
|
|
||||||
xtype: 'proxmoxtextfield',
|
|
||||||
allowBlank: false,
|
|
||||||
name: 'remote-store',
|
|
||||||
},
|
|
||||||
],
|
],
|
||||||
|
|
||||||
column2: [
|
column2: [
|
||||||
@ -64,12 +64,13 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
xtype: 'proxmoxcheckbox',
|
xtype: 'proxmoxcheckbox',
|
||||||
name: 'remove-vanished',
|
name: 'remove-vanished',
|
||||||
uncheckedValue: false,
|
uncheckedValue: false,
|
||||||
value: true,
|
value: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Schedule'),
|
fieldLabel: gettext('Schedule'),
|
||||||
xtype: 'proxmoxtextfield',
|
xtype: 'pbsCalendarEvent',
|
||||||
name: 'schedule',
|
name: 'schedule',
|
||||||
|
emptyText: gettext('none'),
|
||||||
cbind: {
|
cbind: {
|
||||||
deleteEmpty: '{!isCreate}',
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
|
Reference in New Issue
Block a user