Compare commits
131 Commits
Author | SHA1 | Date | |
---|---|---|---|
39cd81de92 | |||
62c74d7749 | |||
254ec19412 | |||
97bbd1bf9e | |||
54aec2fa8b | |||
e1dfcddc79 | |||
344add3885 | |||
752dfc4bda | |||
72be0eb189 | |||
fdc00811ce | |||
6c5bdef567 | |||
ea545b395b | |||
f6b1d1cc66 | |||
d1993187b6 | |||
adfcfb6788 | |||
07995a3ca3 | |||
dd76eba73e | |||
b13da548b9 | |||
fe0efb25e8 | |||
b0b00c4a47 | |||
19ca962b15 | |||
d479f0c810 | |||
1d5dac1b1d | |||
96c3d98256 | |||
0b3dc8ed8c | |||
9a75eb11cb | |||
92dd02aaf6 | |||
41bfd24919 | |||
fddc8aa410 | |||
735ee5206a | |||
a86bf52390 | |||
2deee0e01f | |||
2d7d6e61be | |||
4ec17f7eb5 | |||
fcad02e1de | |||
708fab3082 | |||
3bbb70b3d3 | |||
0c80f4fa87 | |||
21486225c8 | |||
a2920c3757 | |||
6e0f58e7a9 | |||
dee74aa440 | |||
4acd7229d3 | |||
9608ac3486 | |||
ad9d1625a6 | |||
1a558edd0b | |||
5976c392ad | |||
a92b2d6a00 | |||
7d4bf881f7 | |||
05be0984b4 | |||
cdbc18fc4e | |||
2995aedf1d | |||
45f9b32e0f | |||
1d0b662b42 | |||
38f5cb5b71 | |||
476328b302 | |||
4c3efb532d | |||
dafe3197ab | |||
90d7425afe | |||
2d81f7b0c0 | |||
04e24b14f0 | |||
a2bf852818 | |||
0ac612476a | |||
0c6b83d656 | |||
4e6dc58727 | |||
66bbd4200c | |||
326c835e60 | |||
1a48cbf164 | |||
3480777d89 | |||
a71bc08ff4 | |||
df766e668f | |||
0a8f3ae0b3 | |||
da6e67b321 | |||
dec00364b3 | |||
5637087cc9 | |||
5ad4bdc482 | |||
823867f5b7 | |||
c6772c92b8 | |||
79f6a79cfc | |||
4c7f100d22 | |||
9070d11f4c | |||
124b93f31c | |||
0f22f53b36 | |||
3784dbf029 | |||
4c95d58c41 | |||
38d4675921 | |||
7b8aa893fa | |||
fb2678f96e | |||
486ed27299 | |||
df4827f2c0 | |||
ef1b436350 | |||
b19b4bfcb0 | |||
e64b9f9204 | |||
9c33683c25 | |||
ba20987ae7 | |||
729d41fe6a | |||
905147a5ee | |||
0c41e0d06b | |||
b37b59b726 | |||
60b9b48e71 | |||
abf8b5d475 | |||
7eebe1483e | |||
9a76091785 | |||
c386b06fc6 | |||
6bcfc5c1a4 | |||
768e10d0b3 | |||
e7244387c7 | |||
5ade6c25f3 | |||
784fa1c2e3 | |||
66f4e6a809 | |||
8074d2b0c3 | |||
b02d49ab26 | |||
82a0cd2ad4 | |||
ee1a9c3230 | |||
db24c01106 | |||
ae3cfa8f0d | |||
b56c111e93 | |||
bbeb0256f1 | |||
005a5b9677 | |||
55bee04856 | |||
42fd40a124 | |||
f21508b9e1 | |||
ee7a308de4 | |||
636e674ee7 | |||
b02b374b46 | |||
1c13afa8f9 | |||
69b92fab7e | |||
6ab77df3f5 | |||
264c19582b | |||
8acd4d9afc | |||
65b0cea6bd |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.8.21"
|
version = "0.9.1"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -38,8 +38,8 @@ pam-sys = "0.5"
|
|||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.4.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.4.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
||||||
|
20
README.rst
20
README.rst
@ -13,7 +13,7 @@ Versioning of proxmox helper crates
|
|||||||
|
|
||||||
To use current git master code of the proxmox* helper crates, add::
|
To use current git master code of the proxmox* helper crates, add::
|
||||||
|
|
||||||
git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox"
|
git = "git://git.proxmox.com/git/proxmox"
|
||||||
|
|
||||||
or::
|
or::
|
||||||
|
|
||||||
@ -22,6 +22,7 @@ or::
|
|||||||
to the proxmox dependency, and update the version to reflect the current,
|
to the proxmox dependency, and update the version to reflect the current,
|
||||||
pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
|
pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
|
||||||
|
|
||||||
|
|
||||||
Local cargo config
|
Local cargo config
|
||||||
==================
|
==================
|
||||||
|
|
||||||
@ -35,3 +36,20 @@ checksums are not compatible.
|
|||||||
To reference new dependencies (or updated versions) that are not yet packaged,
|
To reference new dependencies (or updated versions) that are not yet packaged,
|
||||||
the dependency needs to point directly to a path or git source (e.g., see
|
the dependency needs to point directly to a path or git source (e.g., see
|
||||||
example for proxmox crate above).
|
example for proxmox crate above).
|
||||||
|
|
||||||
|
|
||||||
|
Build
|
||||||
|
=====
|
||||||
|
on Debian Buster
|
||||||
|
|
||||||
|
Setup:
|
||||||
|
1. # echo 'deb http://download.proxmox.com/debian/devel/ buster main' >> /etc/apt/sources.list.d/proxmox-devel.list
|
||||||
|
2. # sudo wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
3. # sudo apt update
|
||||||
|
4. # sudo apt install devscripts debcargo clang
|
||||||
|
5. # git clone git://git.proxmox.com/git/proxmox-backup.git
|
||||||
|
6. # sudo mk-build-deps -ir
|
||||||
|
|
||||||
|
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
||||||
|
|
||||||
|
You are now able to build using the Makefile or cargo itself.
|
||||||
|
85
debian/changelog
vendored
85
debian/changelog
vendored
@ -1,3 +1,88 @@
|
|||||||
|
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
||||||
|
|
||||||
|
* introduction.rst: add History
|
||||||
|
|
||||||
|
* fix #2847: proxmox-backup-client: add change-owner cmd
|
||||||
|
|
||||||
|
* proxmox-backup-client key: rename 'paper-key' command to 'paperkey'
|
||||||
|
|
||||||
|
* don't require WorkerTask in backup/ (introduce TaskState trait)
|
||||||
|
|
||||||
|
* fix #3070: replace internal with public URLs
|
||||||
|
|
||||||
|
* backup: index readers: drop useless shared lock
|
||||||
|
|
||||||
|
* add "Build" section to README.rst
|
||||||
|
|
||||||
|
* reader: actually allow users to downlod their own backups
|
||||||
|
|
||||||
|
* reader: track index chunks and limit access
|
||||||
|
|
||||||
|
* Userid: fix borrow/deref recursion
|
||||||
|
|
||||||
|
* depend on proxmox 0.4.3
|
||||||
|
|
||||||
|
* api: datastore: require allocate privilege for deletion
|
||||||
|
|
||||||
|
* fuse_loop: handle unmap on crashed instance
|
||||||
|
|
||||||
|
* fuse_loop: wait for instance to close after killing
|
||||||
|
|
||||||
|
* fuse_loop: add automatic cleanup of run files and dangling instances
|
||||||
|
|
||||||
|
* mount/map: use names for map/unmap for easier use
|
||||||
|
|
||||||
|
* ui: network: remove create VLAN option
|
||||||
|
|
||||||
|
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
||||||
|
|
||||||
|
* ui: implment task history limit and make it configurable
|
||||||
|
|
||||||
|
* docs: installation: add system requirements section
|
||||||
|
|
||||||
|
* client: implement map/unmap commands for .img backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Oct 2020 13:42:12 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.0-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: RemoteEdit: only send delete on update
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Oct 2020 15:37:45 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* use ParallelHandler to verify chunks
|
||||||
|
|
||||||
|
* client: add new paper-key command to CLI tool
|
||||||
|
|
||||||
|
* server: split task list in active and archived
|
||||||
|
|
||||||
|
* tools: add logrotate module and use it for archived tasks, allowing to save
|
||||||
|
more than 100 thousands of tasks efficiently in the archive
|
||||||
|
|
||||||
|
* require square [brackets] for ipv6 addresses and fix ipv6 handling for
|
||||||
|
remotes/sync jobs
|
||||||
|
|
||||||
|
* ui: RemoteEdit: make comment and fingerprint deletable
|
||||||
|
|
||||||
|
* api/disks: create zfs: enable import systemd service unit for newly created
|
||||||
|
ZFS pools
|
||||||
|
|
||||||
|
* client and remotes: add support to specify a custom port number. The server
|
||||||
|
is still always listening on 8007, but you can now use things like reverse
|
||||||
|
proxies or port mapping.
|
||||||
|
|
||||||
|
* ui: RemoteEdit: allow to specify a port in the host field
|
||||||
|
|
||||||
|
* client pull: log progress
|
||||||
|
|
||||||
|
* various fixes and improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 01 Oct 2020 16:19:40 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
|
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
|
||||||
|
|
||||||
* depend on crossbeam-channel
|
* depend on crossbeam-channel
|
||||||
|
15
debian/control
vendored
15
debian/control
vendored
@ -34,10 +34,10 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.1-~~),
|
librust-proxmox-0.4+api-macro-dev (>= 0.4.3-~~),
|
||||||
librust-proxmox-0.4+default-dev (>= 0.4.1-~~),
|
librust-proxmox-0.4+default-dev (>= 0.4.3-~~),
|
||||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.1-~~),
|
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.3-~~),
|
||||||
librust-proxmox-0.4+websocket-dev (>= 0.4.1-~~),
|
librust-proxmox-0.4+websocket-dev (>= 0.4.3-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev,
|
librust-proxmox-fuse-0.1+default-dev,
|
||||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
||||||
@ -78,6 +78,7 @@ Build-Depends: debhelper (>= 11),
|
|||||||
uuid-dev,
|
uuid-dev,
|
||||||
debhelper (>= 12~),
|
debhelper (>= 12~),
|
||||||
bash-completion,
|
bash-completion,
|
||||||
|
pve-eslint,
|
||||||
python3-docutils,
|
python3-docutils,
|
||||||
python3-pygments,
|
python3-pygments,
|
||||||
rsync,
|
rsync,
|
||||||
@ -106,7 +107,7 @@ Depends: fonts-font-awesome,
|
|||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.3-1),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
@ -118,7 +119,9 @@ Description: Proxmox Backup Server daemon with tools and GUI
|
|||||||
|
|
||||||
Package: proxmox-backup-client
|
Package: proxmox-backup-client
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
Depends: qrencode,
|
||||||
|
${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
Description: Proxmox Backup Client tools
|
Description: Proxmox Backup Client tools
|
||||||
This package contains the Proxmox Backup client, which provides a
|
This package contains the Proxmox Backup client, which provides a
|
||||||
simple command line tool to create and restore backups.
|
simple command line tool to create and restore backups.
|
||||||
|
6
debian/control.in
vendored
6
debian/control.in
vendored
@ -7,7 +7,7 @@ Depends: fonts-font-awesome,
|
|||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.3-1),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
@ -19,7 +19,9 @@ Description: Proxmox Backup Server daemon with tools and GUI
|
|||||||
|
|
||||||
Package: proxmox-backup-client
|
Package: proxmox-backup-client
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
Depends: qrencode,
|
||||||
|
${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
Description: Proxmox Backup Client tools
|
Description: Proxmox Backup Client tools
|
||||||
This package contains the Proxmox Backup client, which provides a
|
This package contains the Proxmox Backup client, which provides a
|
||||||
simple command line tool to create and restore backups.
|
simple command line tool to create and restore backups.
|
||||||
|
1
debian/debcargo.toml
vendored
1
debian/debcargo.toml
vendored
@ -14,6 +14,7 @@ section = "admin"
|
|||||||
build_depends = [
|
build_depends = [
|
||||||
"debhelper (>= 12~)",
|
"debhelper (>= 12~)",
|
||||||
"bash-completion",
|
"bash-completion",
|
||||||
|
"pve-eslint",
|
||||||
"python3-docutils",
|
"python3-docutils",
|
||||||
"python3-pygments",
|
"python3-pygments",
|
||||||
"rsync",
|
"rsync",
|
||||||
|
@ -44,12 +44,13 @@ def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize
|
|||||||
js_files.append(os.path.join(root, filename))
|
js_files.append(os.path.join(root, filename))
|
||||||
for js_file in js_files:
|
for js_file in js_files:
|
||||||
fd = open(js_file).read()
|
fd = open(js_file).read()
|
||||||
match = re.search("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd) # match object is tuple
|
allmatch = re.findall("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd, re.M)
|
||||||
if match:
|
for match in allmatch:
|
||||||
anchor = match.groups()[0]
|
anchor = match
|
||||||
anchor = re.sub('_', '-', anchor) # normalize labels
|
anchor = re.sub('_', '-', anchor) # normalize labels
|
||||||
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
|
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
|
||||||
used_anchors.append(anchor)
|
used_anchors.append(anchor)
|
||||||
|
|
||||||
return used_anchors
|
return used_anchors
|
||||||
|
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
712
docs/backup-client.rst
Normal file
712
docs/backup-client.rst
Normal file
@ -0,0 +1,712 @@
|
|||||||
|
Backup Client Usage
|
||||||
|
===================
|
||||||
|
|
||||||
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
|
||||||
|
Repository Locations
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
The client uses the following notation to specify a datastore repository
|
||||||
|
on the backup server.
|
||||||
|
|
||||||
|
[[username@]server[:port]:]datastore
|
||||||
|
|
||||||
|
The default value for ``username`` is ``root@pam``. If no server is specified,
|
||||||
|
the default is the local host (``localhost``).
|
||||||
|
|
||||||
|
You can specify a port if your backup server is only reachable on a different
|
||||||
|
port (e.g. with NAT and port forwarding).
|
||||||
|
|
||||||
|
Note that if the server is an IPv6 address, you have to write it with
|
||||||
|
square brackets (e.g. [fe80::01]).
|
||||||
|
|
||||||
|
You can pass the repository with the ``--repository`` command
|
||||||
|
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||||
|
variable.
|
||||||
|
|
||||||
|
Here some examples of valid repositories and the real values
|
||||||
|
|
||||||
|
================================ ============ ================== ===========
|
||||||
|
Example User Host:Port Datastore
|
||||||
|
================================ ============ ================== ===========
|
||||||
|
mydatastore ``root@pam`` localhost:8007 mydatastore
|
||||||
|
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
|
||||||
|
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
|
||||||
|
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
|
||||||
|
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
|
||||||
|
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||||
|
================================ ============ ================== ===========
|
||||||
|
|
||||||
|
Environment Variables
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
``PBS_REPOSITORY``
|
||||||
|
The default backup repository.
|
||||||
|
|
||||||
|
``PBS_PASSWORD``
|
||||||
|
When set, this value is used for the password required for the
|
||||||
|
backup server.
|
||||||
|
|
||||||
|
``PBS_ENCRYPTION_PASSWORD``
|
||||||
|
When set, this value is used to access the secret encryption key (if
|
||||||
|
protected by password).
|
||||||
|
|
||||||
|
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||||
|
certificate (only used if the system CA certificates cannot
|
||||||
|
validate the certificate).
|
||||||
|
|
||||||
|
|
||||||
|
Output Format
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Most commands support the ``--output-format`` parameter. It accepts
|
||||||
|
the following values:
|
||||||
|
|
||||||
|
:``text``: Text format (default). Structured data is rendered as a table.
|
||||||
|
|
||||||
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
|
Please use the following environment variables to modify output behavior:
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_FORMAT``
|
||||||
|
Defines the default output format.
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_NO_BORDER``
|
||||||
|
If set (to any value), do not render table borders.
|
||||||
|
|
||||||
|
``PROXMOX_OUTPUT_NO_HEADER``
|
||||||
|
If set (to any value), do not render table headers.
|
||||||
|
|
||||||
|
.. note:: The ``text`` format is designed to be human readable, and
|
||||||
|
not meant to be parsed by automation tools. Please use the ``json``
|
||||||
|
format if you need to process the output.
|
||||||
|
|
||||||
|
|
||||||
|
.. _creating-backups:
|
||||||
|
|
||||||
|
Creating Backups
|
||||||
|
----------------
|
||||||
|
|
||||||
|
This section explains how to create a backup from within the machine. This can
|
||||||
|
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||||
|
and image archives. There are no restrictions in this case.
|
||||||
|
|
||||||
|
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||||
|
|
||||||
|
For the following example you need to have a backup server set up, working
|
||||||
|
credentials and need to know the repository name.
|
||||||
|
In the following examples we use ``backup-server:store1``.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup root.pxar:/ --repository backup-server:store1
|
||||||
|
Starting backup: host/elsa/2019-12-03T09:35:01Z
|
||||||
|
Client name: elsa
|
||||||
|
skip mount point: "/boot/efi"
|
||||||
|
skip mount point: "/dev"
|
||||||
|
skip mount point: "/run"
|
||||||
|
skip mount point: "/sys"
|
||||||
|
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
||||||
|
End Time: 2019-12-03T10:36:29+01:00
|
||||||
|
|
||||||
|
This will prompt you for a password and then uploads a file archive named
|
||||||
|
``root.pxar`` containing all the files in the ``/`` directory.
|
||||||
|
|
||||||
|
.. Caution:: Please note that the proxmox-backup-client does not
|
||||||
|
automatically include mount points. Instead, you will see a short
|
||||||
|
``skip mount point`` notice for each of them. The idea is to
|
||||||
|
create a separate file archive for each mounted disk. You can
|
||||||
|
explicitly include them using the ``--include-dev`` option
|
||||||
|
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||||
|
multiple times for each mount point that should be included.
|
||||||
|
|
||||||
|
The ``--repository`` option can get quite long and is used by all
|
||||||
|
commands. You can avoid having to enter this value by setting the
|
||||||
|
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
||||||
|
over multiple sessions, you should instead add the below line to your
|
||||||
|
``.bashrc`` file.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# export PBS_REPOSITORY=backup-server:store1
|
||||||
|
|
||||||
|
After this you can execute all commands without specifying the ``--repository``
|
||||||
|
option.
|
||||||
|
|
||||||
|
One single backup is allowed to contain more than one archive. For example, if
|
||||||
|
you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2
|
||||||
|
|
||||||
|
This creates a backup of both disks.
|
||||||
|
|
||||||
|
The backup command takes a list of backup specifications, which
|
||||||
|
include the archive name on the server, the type of the archive, and the
|
||||||
|
archive source at the client. The format is:
|
||||||
|
|
||||||
|
<archive-name>.<type>:<source-path>
|
||||||
|
|
||||||
|
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
||||||
|
device images. To create a backup of a block device run the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
||||||
|
|
||||||
|
|
||||||
|
Excluding files/folders from a backup
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||||
|
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||||
|
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||||
|
Whenever the backup client encounters such a file in a directory, it interprets
|
||||||
|
each line as glob match patterns for files and directories that are to be excluded
|
||||||
|
from the backup.
|
||||||
|
|
||||||
|
The file must contain a single glob pattern per line. Empty lines are ignored.
|
||||||
|
The same is true for lines starting with ``#``, which indicates a comment.
|
||||||
|
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||||
|
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||||
|
directory except for a few single files/subdirectories.
|
||||||
|
Lines ending in ``/`` match only on directories.
|
||||||
|
The directory containing the ``.pxarexclude`` file is considered to be the root of
|
||||||
|
the given patterns. It is only possible to match files in this directory and its subdirectories.
|
||||||
|
|
||||||
|
``\`` is used to escape special glob characters.
|
||||||
|
``?`` matches any single character.
|
||||||
|
``*`` matches any character, including an empty string.
|
||||||
|
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
||||||
|
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||||
|
following pattern ``**/*.tmp``.
|
||||||
|
``[...]`` matches a single character from any of the provided characters within
|
||||||
|
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||||
|
not contained within the brackets. It is also possible to specify ranges with two
|
||||||
|
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||||
|
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||||
|
|
||||||
|
The order of the glob match patterns defines whether a file is included or
|
||||||
|
excluded, that is to say later entries override previous ones.
|
||||||
|
This is also true for match patterns encountered deeper down the directory tree,
|
||||||
|
which can override a previous exclusion.
|
||||||
|
Be aware that excluded directories will **not** be read by the backup client.
|
||||||
|
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||||
|
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||||
|
backup archive.
|
||||||
|
|
||||||
|
For example, consider the following directory structure:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -aR folder
|
||||||
|
folder/:
|
||||||
|
. .. .pxarexclude subfolder0 subfolder1
|
||||||
|
|
||||||
|
folder/subfolder0:
|
||||||
|
. .. file0 file1 file2 file3 .pxarexclude
|
||||||
|
|
||||||
|
folder/subfolder1:
|
||||||
|
. .. file0 file1 file2 file3
|
||||||
|
|
||||||
|
The different ``.pxarexclude`` files contain the following:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# cat folder/.pxarexclude
|
||||||
|
/subfolder0/file1
|
||||||
|
/subfolder1/*
|
||||||
|
!/subfolder1/file2
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# cat folder/subfolder0/.pxarexclude
|
||||||
|
file3
|
||||||
|
|
||||||
|
This would exclude ``file1`` and ``file3`` in ``subfolder0`` and all of
|
||||||
|
``subfolder1`` except ``file2``.
|
||||||
|
|
||||||
|
Restoring this backup will result in:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
ls -aR restored
|
||||||
|
restored/:
|
||||||
|
. .. .pxarexclude subfolder0 subfolder1
|
||||||
|
|
||||||
|
restored/subfolder0:
|
||||||
|
. .. file0 file2 .pxarexclude
|
||||||
|
|
||||||
|
restored/subfolder1:
|
||||||
|
. .. file2
|
||||||
|
|
||||||
|
|
||||||
|
Encryption
|
||||||
|
----------
|
||||||
|
|
||||||
|
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||||
|
mode. To set this up, you first need to create an encryption key:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create my-backup.key
|
||||||
|
Encryption Key Password: **************
|
||||||
|
|
||||||
|
The key is password protected by default. If you do not need this
|
||||||
|
extra protection, you can also create it without a password:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||||
|
|
||||||
|
Having created this key, it is now possible to create an encrypted backup, by
|
||||||
|
passing the ``--keyfile`` parameter, with the path to the key file.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup etc.pxar:/etc --keyfile /path/to/my-backup.key
|
||||||
|
Password: *********
|
||||||
|
Encryption Key Password: **************
|
||||||
|
...
|
||||||
|
|
||||||
|
.. Note:: If you do not specify the name of the backup key, the key will be
|
||||||
|
created in the default location
|
||||||
|
``~/.config/proxmox-backup/encryption-key.json``. ``proxmox-backup-client``
|
||||||
|
will also search this location by default, in case the ``--keyfile``
|
||||||
|
parameter is not specified.
|
||||||
|
|
||||||
|
You can avoid entering the passwords by setting the environment
|
||||||
|
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||||
|
|
||||||
|
|
||||||
|
Using a master key to store and recover encryption keys
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||||
|
key pair, which can be used to store an encrypted version of the symmetric
|
||||||
|
backup encryption key alongside each backup and recover it later.
|
||||||
|
|
||||||
|
To set up a master key:
|
||||||
|
|
||||||
|
1. Create an encryption key for the backup:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create
|
||||||
|
creating default key at: "~/.config/proxmox-backup/encryption-key.json"
|
||||||
|
Encryption Key Password: **********
|
||||||
|
...
|
||||||
|
|
||||||
|
The resulting file will be saved to ``~/.config/proxmox-backup/encryption-key.json``.
|
||||||
|
|
||||||
|
2. Create an RSA public/private key pair:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create-master-key
|
||||||
|
Master Key Password: *********
|
||||||
|
...
|
||||||
|
|
||||||
|
This will create two files in your current directory, ``master-public.pem``
|
||||||
|
and ``master-private.pem``.
|
||||||
|
|
||||||
|
3. Import the newly created ``master-public.pem`` public certificate, so that
|
||||||
|
``proxmox-backup-client`` can find and use it upon backup.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key import-master-pubkey /path/to/master-public.pem
|
||||||
|
Imported public master key to "~/.config/proxmox-backup/master-public.pem"
|
||||||
|
|
||||||
|
4. With all these files in place, run a backup job:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup etc.pxar:/etc
|
||||||
|
|
||||||
|
The key will be stored in your backup, under the name ``rsa-encrypted.key``.
|
||||||
|
|
||||||
|
.. Note:: The ``--keyfile`` parameter can be excluded, if the encryption key
|
||||||
|
is in the default path. If you specified another path upon creation, you
|
||||||
|
must pass the ``--keyfile`` parameter.
|
||||||
|
|
||||||
|
5. To test that everything worked, you can restore the key from the backup:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client restore /path/to/backup/ rsa-encrypted.key /path/to/target
|
||||||
|
|
||||||
|
.. Note:: You should not need an encryption key to extract this file. However, if
|
||||||
|
a key exists at the default location
|
||||||
|
(``~/.config/proxmox-backup/encryption-key.json``) the program will prompt
|
||||||
|
you for an encryption key password. Simply moving ``encryption-key.json``
|
||||||
|
out of this directory will fix this issue.
|
||||||
|
|
||||||
|
6. Then, use the previously generated master key to decrypt the file:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
||||||
|
Enter pass phrase for ./master-private.pem: *********
|
||||||
|
|
||||||
|
7. The target file will now contain the encryption key information in plain
|
||||||
|
text. The success of this can be confirmed by passing the resulting ``json``
|
||||||
|
file, with the ``--keyfile`` parameter, when decrypting files from the backup.
|
||||||
|
|
||||||
|
.. warning:: Without their key, backed up files will be inaccessible. Thus, you should
|
||||||
|
keep keys ordered and in a place that is separate from the contents being
|
||||||
|
backed up. It can happen, for example, that you back up an entire system, using
|
||||||
|
a key on that system. If the system then becomes inaccessible for any reason
|
||||||
|
and needs to be restored, this will not be possible as the encryption key will be
|
||||||
|
lost along with the broken system. In preparation for the worst case scenario,
|
||||||
|
you should consider keeping a paper copy of this key locked away in
|
||||||
|
a safe place.
|
||||||
|
|
||||||
|
|
||||||
|
Restoring Data
|
||||||
|
--------------
|
||||||
|
|
||||||
|
The regular creation of backups is a necessary step to avoiding data
|
||||||
|
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||||
|
periodic recovery tests to ensure that you can access the data in
|
||||||
|
case of problems.
|
||||||
|
|
||||||
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
|
command provides a list of all the snapshots on the server:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client snapshots
|
||||||
|
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
||||||
|
│ snapshot │ size │ files │
|
||||||
|
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
||||||
|
│ host/elsa/2019-12-03T09:30:15Z │ 51788646825 │ root.pxar catalog.pcat1 index.json │
|
||||||
|
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||||
|
│ host/elsa/2019-12-03T09:35:01Z │ 51790622048 │ root.pxar catalog.pcat1 index.json │
|
||||||
|
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||||
|
...
|
||||||
|
|
||||||
|
You can inspect the catalog to find specific files.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client catalog dump host/elsa/2019-12-03T09:35:01Z
|
||||||
|
...
|
||||||
|
d "./root.pxar.didx/etc/cifs-utils"
|
||||||
|
l "./root.pxar.didx/etc/cifs-utils/idmap-plugin"
|
||||||
|
d "./root.pxar.didx/etc/console-setup"
|
||||||
|
...
|
||||||
|
|
||||||
|
The restore command lets you restore a single archive from the
|
||||||
|
backup.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||||
|
|
||||||
|
To get the contents of any archive, you can restore the ``index.json`` file in the
|
||||||
|
repository to the target path '-'. This will dump the contents to the standard output.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z index.json -
|
||||||
|
|
||||||
|
|
||||||
|
Interactive Restores
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you only want to restore a few individual files, it is often easier
|
||||||
|
to use the interactive recovery shell.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client catalog shell host/elsa/2019-12-03T09:35:01Z root.pxar
|
||||||
|
Starting interactive shell
|
||||||
|
pxar:/ > ls
|
||||||
|
bin boot dev etc home lib lib32
|
||||||
|
...
|
||||||
|
|
||||||
|
The interactive recovery shell is a minimal command line interface that
|
||||||
|
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||||
|
search files in a file archive.
|
||||||
|
To restore files, you can select them individually or match them with a glob
|
||||||
|
pattern.
|
||||||
|
|
||||||
|
Using the catalog for navigation reduces the overhead considerably because only
|
||||||
|
the catalog needs to be downloaded and, optionally, decrypted.
|
||||||
|
The actual chunks are only accessed if the metadata in the catalog is not enough
|
||||||
|
or for the actual restore.
|
||||||
|
|
||||||
|
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
||||||
|
working directory and list directory contents in the archive.
|
||||||
|
``pwd`` shows the full path of the current working directory with respect to the
|
||||||
|
archive root.
|
||||||
|
|
||||||
|
Being able to quickly search the contents of the archive is a commonly needed feature.
|
||||||
|
That's where the catalog is most valuable.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
pxar:/ > find etc/**/*.txt --select
|
||||||
|
"/etc/X11/rgb.txt"
|
||||||
|
pxar:/ > list-selected
|
||||||
|
etc/**/*.txt
|
||||||
|
pxar:/ > restore-selected /target/path
|
||||||
|
...
|
||||||
|
|
||||||
|
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
|
||||||
|
subdirectory and add the corresponding pattern to the list for subsequent restores.
|
||||||
|
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
||||||
|
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||||
|
host. This will scan the whole archive.
|
||||||
|
|
||||||
|
With ``restore /target/path`` you can restore the sub-archive given by the current
|
||||||
|
working directory to the local target path ``/target/path`` on your host.
|
||||||
|
By additionally passing a glob pattern with ``--pattern <glob>``, the restore is
|
||||||
|
further limited to files matching the pattern.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
pxar:/ > cd /etc/
|
||||||
|
pxar:/etc/ > restore /target/ --pattern **/*.conf
|
||||||
|
...
|
||||||
|
|
||||||
|
The above will scan trough all the directories below ``/etc`` and restore all
|
||||||
|
files ending in ``.conf``.
|
||||||
|
|
||||||
|
.. todo:: Explain interactive restore in more detail
|
||||||
|
|
||||||
|
Mounting of Archives via FUSE
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
||||||
|
file archive as a read-only filesystem to a mountpoint on your host.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt/mountpoint
|
||||||
|
# ls /mnt/mountpoint
|
||||||
|
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||||
|
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||||
|
|
||||||
|
This allows you to access the full contents of the archive in a seamless manner.
|
||||||
|
|
||||||
|
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
||||||
|
backup server's datastore, this can cause some additional network and CPU
|
||||||
|
load on your host, depending on the operations you perform on the mounted
|
||||||
|
filesystem.
|
||||||
|
|
||||||
|
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# umount /mnt/mountpoint
|
||||||
|
|
||||||
|
Login and Logout
|
||||||
|
----------------
|
||||||
|
|
||||||
|
The client tool prompts you to enter the logon password as soon as you
|
||||||
|
want to access the backup server. The server checks your credentials
|
||||||
|
and responds with a ticket that is valid for two hours. The client
|
||||||
|
tool automatically stores that ticket and uses it for further requests
|
||||||
|
to this server.
|
||||||
|
|
||||||
|
You can also manually trigger this login/logout using the login and
|
||||||
|
logout commands:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client login
|
||||||
|
Password: **********
|
||||||
|
|
||||||
|
To remove the ticket, issue a logout:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client logout
|
||||||
|
|
||||||
|
|
||||||
|
.. _backup-pruning:
|
||||||
|
|
||||||
|
Pruning and Removing Backups
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
You can manually delete a backup snapshot using the ``forget``
|
||||||
|
command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client forget <snapshot>
|
||||||
|
|
||||||
|
|
||||||
|
.. caution:: This command removes all archives in this backup
|
||||||
|
snapshot. They will be inaccessible and unrecoverable.
|
||||||
|
|
||||||
|
|
||||||
|
Although manual removal is sometimes required, the ``prune``
|
||||||
|
command is normally used to systematically delete older backups. Prune lets
|
||||||
|
you specify which backup snapshots you want to keep. The
|
||||||
|
following retention options are available:
|
||||||
|
|
||||||
|
``--keep-last <N>``
|
||||||
|
Keep the last ``<N>`` backup snapshots.
|
||||||
|
|
||||||
|
``--keep-hourly <N>``
|
||||||
|
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||||
|
backup for a single hour, only the latest is kept.
|
||||||
|
|
||||||
|
``--keep-daily <N>``
|
||||||
|
Keep backups for the last ``<N>`` days. If there is more than one
|
||||||
|
backup for a single day, only the latest is kept.
|
||||||
|
|
||||||
|
``--keep-weekly <N>``
|
||||||
|
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||||
|
backup for a single week, only the latest is kept.
|
||||||
|
|
||||||
|
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||||
|
uses the `ISO week date`_ system and handles weeks at
|
||||||
|
the end of the year correctly.
|
||||||
|
|
||||||
|
``--keep-monthly <N>``
|
||||||
|
Keep backups for the last ``<N>`` months. If there is more than one
|
||||||
|
backup for a single month, only the latest is kept.
|
||||||
|
|
||||||
|
``--keep-yearly <N>``
|
||||||
|
Keep backups for the last ``<N>`` years. If there is more than one
|
||||||
|
backup for a single year, only the latest is kept.
|
||||||
|
|
||||||
|
The retention options are processed in the order given above. Each option
|
||||||
|
only covers backups within its time period. The next option does not take care
|
||||||
|
of already covered backups. It will only consider older backups.
|
||||||
|
|
||||||
|
Unfinished and incomplete backups will be removed by the prune command unless
|
||||||
|
they are newer than the last successful backup. In this case, the last failed
|
||||||
|
backup is retained.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client prune <group> --keep-daily 7 --keep-weekly 4 --keep-monthly 3
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``--dry-run`` option to test your settings. This only
|
||||||
|
shows the list of existing snapshots and what actions prune would take.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client prune host/elsa --dry-run --keep-daily 1 --keep-weekly 3
|
||||||
|
┌────────────────────────────────┬──────┐
|
||||||
|
│ snapshot │ keep │
|
||||||
|
╞════════════════════════════════╪══════╡
|
||||||
|
│ host/elsa/2019-12-04T13:20:37Z │ 1 │
|
||||||
|
├────────────────────────────────┼──────┤
|
||||||
|
│ host/elsa/2019-12-03T09:35:01Z │ 0 │
|
||||||
|
├────────────────────────────────┼──────┤
|
||||||
|
│ host/elsa/2019-11-22T11:54:47Z │ 1 │
|
||||||
|
├────────────────────────────────┼──────┤
|
||||||
|
│ host/elsa/2019-11-21T12:36:25Z │ 0 │
|
||||||
|
├────────────────────────────────┼──────┤
|
||||||
|
│ host/elsa/2019-11-10T10:42:20Z │ 1 │
|
||||||
|
└────────────────────────────────┴──────┘
|
||||||
|
|
||||||
|
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||||
|
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||||
|
space you need to perform :ref:`garbage-collection`.
|
||||||
|
|
||||||
|
|
||||||
|
.. _garbage-collection:
|
||||||
|
|
||||||
|
Garbage Collection
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The ``prune`` command removes only the backup index files, not the data
|
||||||
|
from the datastore. This task is left to the garbage collection
|
||||||
|
command. It is recommended to carry out garbage collection on a regular basis.
|
||||||
|
|
||||||
|
The garbage collection works in two phases. In the first phase, all
|
||||||
|
data blocks that are still in use are marked. In the second phase,
|
||||||
|
unused data blocks are removed.
|
||||||
|
|
||||||
|
.. note:: This command needs to read all existing backup index files
|
||||||
|
and touches the complete chunk-store. This can take a long time
|
||||||
|
depending on the number of chunks and the speed of the underlying
|
||||||
|
disks.
|
||||||
|
|
||||||
|
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||||
|
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||||
|
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||||
|
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||||
|
by default. This results in a better performance by only updating the
|
||||||
|
``atime`` property if the last access has been at least 24 hours ago. The
|
||||||
|
downside is, that touching a chunk within these 24 hours will not always
|
||||||
|
update its ``atime`` property.
|
||||||
|
|
||||||
|
Chunks in the grace period will be logged at the end of the garbage
|
||||||
|
collection task as *Pending removals*.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client garbage-collect
|
||||||
|
starting garbage collection on store store2
|
||||||
|
Start GC phase1 (mark used chunks)
|
||||||
|
Start GC phase2 (sweep unused chunks)
|
||||||
|
percentage done: 1, chunk count: 219
|
||||||
|
percentage done: 2, chunk count: 453
|
||||||
|
...
|
||||||
|
percentage done: 99, chunk count: 21188
|
||||||
|
Removed bytes: 411368505
|
||||||
|
Removed chunks: 203
|
||||||
|
Original data bytes: 327160886391
|
||||||
|
Disk bytes: 52767414743 (16 %)
|
||||||
|
Disk chunks: 21221
|
||||||
|
Average chunk size: 2486565
|
||||||
|
TASK OK
|
||||||
|
|
||||||
|
|
||||||
|
.. todo:: howto run garbage-collection at regular intervals (cron)
|
||||||
|
|
||||||
|
Benchmarking
|
||||||
|
------------
|
||||||
|
|
||||||
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
|
various metrics relating to compression and encryption speeds. You can run a
|
||||||
|
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client benchmark
|
||||||
|
Uploaded 656 chunks in 5 seconds.
|
||||||
|
Time per request: 7659 microseconds.
|
||||||
|
TLS speed: 547.60 MB/s
|
||||||
|
SHA256 speed: 585.76 MB/s
|
||||||
|
Compression speed: 1923.96 MB/s
|
||||||
|
Decompress speed: 7885.24 MB/s
|
||||||
|
AES256/GCM speed: 3974.03 MB/s
|
||||||
|
┌───────────────────────────────────┬─────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═══════════════════════════════════╪═════════════════════╡
|
||||||
|
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||||
|
└───────────────────────────────────┴─────────────────────┘
|
||||||
|
|
||||||
|
.. note:: The percentages given in the output table correspond to a
|
||||||
|
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||||
|
local host, so there is no network involved.
|
||||||
|
|
||||||
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
|
rather than the default table format.
|
||||||
|
|
||||||
|
|
@ -13,7 +13,7 @@ by the systemd Time and Date Specification (see `systemd.time manpage`_)
|
|||||||
called `calendar events` for its schedules.
|
called `calendar events` for its schedules.
|
||||||
|
|
||||||
`Calendar events` are expressions to specify one or more points in time.
|
`Calendar events` are expressions to specify one or more points in time.
|
||||||
They are mostly compatible with systemds calendar events.
|
They are mostly compatible with systemd's calendar events.
|
||||||
|
|
||||||
The general format is as follows:
|
The general format is as follows:
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ If the weekday or date part is omitted, all (week)days are included.
|
|||||||
If the time part is omitted, the time 00:00:00 is implied.
|
If the time part is omitted, the time 00:00:00 is implied.
|
||||||
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
|
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
|
||||||
|
|
||||||
Weekdays are specified with the abbreviated english version:
|
Weekdays are specified with the abbreviated English version:
|
||||||
`mon, tue, wed, thu, fri, sat, sun`.
|
`mon, tue, wed, thu, fri, sat, sun`.
|
||||||
|
|
||||||
Each field can contain multiple values in the following formats:
|
Each field can contain multiple values in the following formats:
|
||||||
@ -48,7 +48,7 @@ Value Syntax
|
|||||||
`daily` `*-*-* 00:00:00`
|
`daily` `*-*-* 00:00:00`
|
||||||
`weekly` `mon *-*-* 00:00:00`
|
`weekly` `mon *-*-* 00:00:00`
|
||||||
`monthly` `*-*-01 00:00:00`
|
`monthly` `*-*-01 00:00:00`
|
||||||
`yearly` or `annualy` `*-01-01 00:00:00`
|
`yearly` or `annually` `*-01-01 00:00:00`
|
||||||
`quarterly` `*-01,04,07,10-01 00:00:00`
|
`quarterly` `*-01,04,07,10-01 00:00:00`
|
||||||
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
|
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
|
||||||
================================= ==============================
|
================================= ==============================
|
||||||
@ -80,7 +80,7 @@ Differences to systemd
|
|||||||
|
|
||||||
Not all features of systemd calendar events are implemented:
|
Not all features of systemd calendar events are implemented:
|
||||||
|
|
||||||
* no unix timestamps (e.g. `@12345`): instead use date and time to specify
|
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||||
a specific point in time
|
a specific point in time
|
||||||
* no timezone: all schedules use the set timezone on the server
|
* no timezone: all schedules use the set timezone on the server
|
||||||
* no sub-second resolution
|
* no sub-second resolution
|
||||||
|
@ -12,7 +12,7 @@ Command Line Tools
|
|||||||
.. include:: proxmox-backup-manager/description.rst
|
.. include:: proxmox-backup-manager/description.rst
|
||||||
|
|
||||||
``pxar``
|
``pxar``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
.. include:: pxar/description.rst
|
.. include:: pxar/description.rst
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ Command Syntax
|
|||||||
Catalog Shell Commands
|
Catalog Shell Commands
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Those command are available when you start an intercative restore shell:
|
Those command are available when you start an interactive restore shell:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
.. Epilog (included at top of each file)
|
.. Epilog (included at top of each file)
|
||||||
|
|
||||||
We use this file to define external links and commone replacement
|
We use this file to define external links and common replacement
|
||||||
patterns.
|
patterns.
|
||||||
|
|
||||||
.. |VERSION| replace:: 1.0
|
.. |VERSION| replace:: 1.0
|
||||||
|
@ -34,7 +34,7 @@ How long will my Proxmox Backup Server version be supported?
|
|||||||
Can I copy or synchronize my datastore to another location?
|
Can I copy or synchronize my datastore to another location?
|
||||||
-----------------------------------------------------------
|
-----------------------------------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server allows you to copy or synchroize datastores to other
|
Proxmox Backup Server allows you to copy or synchronize datastores to other
|
||||||
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
|
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
|
||||||
given to a separate server, which has a datastore that can be synced to a local store.
|
given to a separate server, which has a datastore that can be synced to a local store.
|
||||||
A *Sync Job* is the process which is used to pull the contents of a datastore from
|
A *Sync Job* is the process which is used to pull the contents of a datastore from
|
||||||
|
135
docs/gui.rst
Normal file
135
docs/gui.rst
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
Graphical User Interface
|
||||||
|
========================
|
||||||
|
|
||||||
|
Proxmox Backup Server offers an integrated, web-based interface to manage the
|
||||||
|
server. This means that you can carry out all administration tasks through your
|
||||||
|
web browser, and that you don't have to worry about installing extra management
|
||||||
|
tools. The web interface also provides a built in console, so if you prefer the
|
||||||
|
command line or need some extra control, you have this option.
|
||||||
|
|
||||||
|
The web interface can be accessed via https://youripaddress:8007. The default
|
||||||
|
login is `root`, and the password is the one specified during the installation
|
||||||
|
process.
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Simple management interface for Proxmox Backup Server
|
||||||
|
* Monitoring of tasks, logs and resource usage
|
||||||
|
* Management of users, permissions, datastores, etc.
|
||||||
|
* Secure HTML5 console
|
||||||
|
* Support for multiple authentication sources
|
||||||
|
* Support for multiple languages
|
||||||
|
* Based on ExtJS 6.x JavaScript framework
|
||||||
|
|
||||||
|
|
||||||
|
Login
|
||||||
|
-----
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-login-window.png
|
||||||
|
:width: 250
|
||||||
|
:align: right
|
||||||
|
:alt: PBS login window
|
||||||
|
|
||||||
|
When you connect to the web interface, you will first see the login window.
|
||||||
|
Proxmox Backup Server supports various languages and authentication back ends
|
||||||
|
(*Realms*), both of which can be selected here.
|
||||||
|
|
||||||
|
.. note:: For convenience, you can save the username on the client side, by
|
||||||
|
selecting the "Save User name" checkbox at the bottom of the window.
|
||||||
|
|
||||||
|
|
||||||
|
GUI Overview
|
||||||
|
------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-dashboard.png
|
||||||
|
:width: 250
|
||||||
|
:align: right
|
||||||
|
:alt: PBS GUI Dashboard
|
||||||
|
|
||||||
|
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||||
|
|
||||||
|
* **Header**: At the top. This shows version information, and contains buttons to view
|
||||||
|
documentation, monitor running tasks, and logout.
|
||||||
|
* **Sidebar**: On the left. This contains the configuration options for
|
||||||
|
the server.
|
||||||
|
* **Configuration Panel**: In the center. This contains the control interface for the
|
||||||
|
configuration options in the *Sidebar*.
|
||||||
|
|
||||||
|
|
||||||
|
Sidebar
|
||||||
|
-------
|
||||||
|
|
||||||
|
In the sidebar, on the left side of the page, you can see various items relating
|
||||||
|
to specific management activities.
|
||||||
|
|
||||||
|
|
||||||
|
Dashboard
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
The Dashboard shows a summary of activity and resource usage on the server.
|
||||||
|
Specifically, this displays hardware usage, a summary of
|
||||||
|
previous and currently running tasks, and subscription information.
|
||||||
|
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The Configuration section contains some system configuration options, such as
|
||||||
|
time and network configuration. It also contains the following subsections:
|
||||||
|
|
||||||
|
* **User Management**: Add users and manage accounts
|
||||||
|
* **Permissions**: Manage permissions for various users
|
||||||
|
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||||
|
* **Sync Jobs**: Manage and run sync jobs to remotes
|
||||||
|
* **Subscription**: Upload a subscription key and view subscription status
|
||||||
|
|
||||||
|
|
||||||
|
Administration
|
||||||
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
|
||||||
|
:width: 250
|
||||||
|
:align: right
|
||||||
|
:alt: Administration: Server Status overview
|
||||||
|
|
||||||
|
The Administration section contains a top panel, with further administration
|
||||||
|
tasks and information. These are:
|
||||||
|
|
||||||
|
* **ServerStatus**: Provides access to the console, power options, and various
|
||||||
|
resource usage statistics
|
||||||
|
* **Services**: Manage and monitor system services
|
||||||
|
* **Updates**: An interface for upgrading packages
|
||||||
|
* **Syslog**: View log messages from the server
|
||||||
|
* **Tasks**: Task history with multiple filter options
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-disks.png
|
||||||
|
:width: 250
|
||||||
|
:align: right
|
||||||
|
:alt: Administration: Disks
|
||||||
|
|
||||||
|
The administration menu item also contains a disk management subsection:
|
||||||
|
|
||||||
|
* **Disks**: View information on available disks
|
||||||
|
|
||||||
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
|
|
||||||
|
Datastore
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore.png
|
||||||
|
:width: 250
|
||||||
|
:align: right
|
||||||
|
:alt: Datastore Configuration
|
||||||
|
|
||||||
|
The Datastore section provides an interface for creating and managing
|
||||||
|
datastores. It contains a subsection for each datastore on the system, in
|
||||||
|
which you can use the top panel to view:
|
||||||
|
|
||||||
|
* **Content**: Information on the datastore's backup groups and their respective
|
||||||
|
contents
|
||||||
|
* **Statistics**: Usage statistics for the datastore
|
||||||
|
* **Permissions**: View and manage permissions for the datastore
|
BIN
docs/images/screenshots/pbs-gui-administration-serverstatus.png
Normal file
BIN
docs/images/screenshots/pbs-gui-administration-serverstatus.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 127 KiB |
BIN
docs/images/screenshots/pbs-gui-dashboard.png
Normal file
BIN
docs/images/screenshots/pbs-gui-dashboard.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 119 KiB |
BIN
docs/images/screenshots/pbs-gui-login-window.png
Normal file
BIN
docs/images/screenshots/pbs-gui-login-window.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
@ -22,7 +22,16 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
|
|
||||||
introduction.rst
|
introduction.rst
|
||||||
installation.rst
|
installation.rst
|
||||||
administration-guide.rst
|
terminology.rst
|
||||||
|
gui.rst
|
||||||
|
storage.rst
|
||||||
|
network-management.rst
|
||||||
|
user-management.rst
|
||||||
|
managing-remotes.rst
|
||||||
|
maintenance.rst
|
||||||
|
backup-client.rst
|
||||||
|
pve-integration.rst
|
||||||
|
pxar-tool.rst
|
||||||
sysadmin.rst
|
sysadmin.rst
|
||||||
faq.rst
|
faq.rst
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ Installation
|
|||||||
can either be installed with a graphical installer or on top of
|
can either be installed with a graphical installer or on top of
|
||||||
Debian_ from the provided package repository.
|
Debian_ from the provided package repository.
|
||||||
|
|
||||||
|
.. include:: system-requirements.rst
|
||||||
|
|
||||||
.. include:: package-repositories.rst
|
.. include:: package-repositories.rst
|
||||||
|
|
||||||
Server installation
|
Server installation
|
||||||
@ -82,7 +84,7 @@ support, and a set of common and useful packages.
|
|||||||
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
||||||
as well.
|
as well.
|
||||||
|
|
||||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
.. note:: You can access the web interface of the Proxmox Backup Server with
|
||||||
your web browser, using HTTPS on port 8007. For example at
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
``https://<ip-or-dns-name>:8007``
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
@ -103,7 +105,7 @@ After configuring the
|
|||||||
still access the backups.
|
still access the backups.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
You can access the webinterface of the Proxmox Backup Server with your web
|
You can access the web interface of the Proxmox Backup Server with your web
|
||||||
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Client installation
|
Client installation
|
||||||
|
@ -104,7 +104,7 @@ Software Stack
|
|||||||
|
|
||||||
Proxmox Backup Server consists of multiple components:
|
Proxmox Backup Server consists of multiple components:
|
||||||
|
|
||||||
* A server-daemon providing, among other things, a RESTfull API, super-fast
|
* A server-daemon providing, among other things, a RESTful API, super-fast
|
||||||
asynchronous tasks, lightweight usage statistic collection, scheduling
|
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||||
events, strict separation of privileged and unprivileged execution
|
events, strict separation of privileged and unprivileged execution
|
||||||
environments
|
environments
|
||||||
@ -127,6 +127,7 @@ language.
|
|||||||
|
|
||||||
.. todo:: further explain the software stack
|
.. todo:: further explain the software stack
|
||||||
|
|
||||||
|
|
||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
|
||||||
@ -178,5 +179,29 @@ along with this program. If not, see AGPL3_.
|
|||||||
History
|
History
|
||||||
-------
|
-------
|
||||||
|
|
||||||
.. todo:: Add development History of the product
|
Backup is, and always was, as central aspect of IT administration.
|
||||||
|
The need to recover from data loss is fundamental and increases with
|
||||||
|
virtualization.
|
||||||
|
|
||||||
|
Not surprisingly, we shipped a backup tool with Proxmox VE from the
|
||||||
|
beginning. The tool is called ``vzdump`` and is able to make
|
||||||
|
consistent snapshots of running LXC containers and KVM virtual
|
||||||
|
machines.
|
||||||
|
|
||||||
|
But ``vzdump`` only allowed for full backups. While this is perfect
|
||||||
|
for small backups, it becomes a burden for users with large VMs. Both
|
||||||
|
backup time and space usage was too large for this case, specially
|
||||||
|
when Users want to keep many backups of the same VMs. We need
|
||||||
|
deduplication and incremental backups to solve those problems.
|
||||||
|
|
||||||
|
Back in October 2018 development started. We had been looking into
|
||||||
|
several technologies and frameworks and finally decided to use
|
||||||
|
:term:`Rust` as implementation language to provide high speed and
|
||||||
|
memory efficiency. The 2018-edition of Rust seemed to be promising and
|
||||||
|
useful for our requirements.
|
||||||
|
|
||||||
|
In July 2020 we released the first beta version of Proxmox Backup
|
||||||
|
Server, followed by a first stable version in November 2020. With the
|
||||||
|
support of incremental, fully deduplicated backups, Proxmox Backup
|
||||||
|
significantly reduces the network load and saves valuable storage
|
||||||
|
space.
|
||||||
|
@ -220,7 +220,7 @@ and you can install it using `apt-get`:
|
|||||||
# apt-get install zfs-zed
|
# apt-get install zfs-zed
|
||||||
|
|
||||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||||
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ to an external Storage.
|
|||||||
|
|
||||||
We strongly recommend to use enough memory, so that you normally do not
|
We strongly recommend to use enough memory, so that you normally do not
|
||||||
run into low memory situations. Should you need or want to add swap, it is
|
run into low memory situations. Should you need or want to add swap, it is
|
||||||
preferred to create a partition on a physical disk and use it as swapdevice.
|
preferred to create a partition on a physical disk and use it as swap device.
|
||||||
You can leave some space free for this purpose in the advanced options of the
|
You can leave some space free for this purpose in the advanced options of the
|
||||||
installer. Additionally, you can lower the `swappiness` value.
|
installer. Additionally, you can lower the `swappiness` value.
|
||||||
A good value for servers is 10:
|
A good value for servers is 10:
|
||||||
@ -312,6 +312,8 @@ You can disable compression at any time with:
|
|||||||
|
|
||||||
Only new blocks will be affected by this change.
|
Only new blocks will be affected by this change.
|
||||||
|
|
||||||
|
.. _local_zfs_special_device:
|
||||||
|
|
||||||
ZFS Special Device
|
ZFS Special Device
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
13
docs/maintenance.rst
Normal file
13
docs/maintenance.rst
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Maintenance Tasks
|
||||||
|
=================
|
||||||
|
|
||||||
|
Garbage Collection
|
||||||
|
------------------
|
||||||
|
|
||||||
|
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||||
|
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||||
|
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
||||||
|
collection on an entire datastore and the ``status`` subcommand to see
|
||||||
|
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||||
|
|
||||||
|
.. todo:: Add section on verification
|
82
docs/managing-remotes.rst
Normal file
82
docs/managing-remotes.rst
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
Managing Remotes
|
||||||
|
================
|
||||||
|
|
||||||
|
.. _backup_remote:
|
||||||
|
|
||||||
|
:term:`Remote`
|
||||||
|
--------------
|
||||||
|
|
||||||
|
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||||
|
installation, from which you can `sync` datastores to a local datastore with a
|
||||||
|
`Sync Job`. You can configure remotes in the web interface, under **Configuration
|
||||||
|
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
|
||||||
|
configuration information for remotes is stored in the file
|
||||||
|
``/etc/proxmox-backup/remote.cfg``.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-remote-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a remote
|
||||||
|
|
||||||
|
To add a remote, you need its hostname or IP, a userid and password on the
|
||||||
|
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||||
|
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||||
|
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager cert info |grep Fingerprint
|
||||||
|
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Using the information specified above, you can add a remote from the **Remotes**
|
||||||
|
configuration panel, or by using the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote create pbs2 --host pbs2.mydomain.example --userid sync@pam --password 'SECRET' --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||||
|
``proxmox-backup-manager remote`` to manage your remotes:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote update pbs2 --host pbs2.example
|
||||||
|
# proxmox-backup-manager remote list
|
||||||
|
┌──────┬──────────────┬──────────┬───────────────────────────────────────────┬─────────┐
|
||||||
|
│ name │ host │ userid │ fingerprint │ comment │
|
||||||
|
╞══════╪══════════════╪══════════╪═══════════════════════════════════════════╪═════════╡
|
||||||
|
│ pbs2 │ pbs2.example │ sync@pam │64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe │ │
|
||||||
|
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||||
|
# proxmox-backup-manager remote remove pbs2
|
||||||
|
|
||||||
|
|
||||||
|
.. _syncjobs:
|
||||||
|
|
||||||
|
Sync Jobs
|
||||||
|
---------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-syncjob-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a Sync Job
|
||||||
|
|
||||||
|
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||||
|
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
|
||||||
|
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
|
||||||
|
The configuration information for sync jobs is stored at
|
||||||
|
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||||
|
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||||
|
either start it manually on the GUI or provide it with a schedule (see
|
||||||
|
:ref:`calendar-events`) to run regularly.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job create pbs2-local --remote pbs2 --remote-store local --store local --schedule 'Wed 02:30'
|
||||||
|
# proxmox-backup-manager sync-job update pbs2-local --comment 'offsite'
|
||||||
|
# proxmox-backup-manager sync-job list
|
||||||
|
┌────────────┬───────┬────────┬──────────────┬───────────┬─────────┐
|
||||||
|
│ id │ store │ remote │ remote-store │ schedule │ comment │
|
||||||
|
╞════════════╪═══════╪════════╪══════════════╪═══════════╪═════════╡
|
||||||
|
│ pbs2-local │ local │ pbs2 │ local │ Wed 02:30 │ offsite │
|
||||||
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
|
|
88
docs/network-management.rst
Normal file
88
docs/network-management.rst
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
Network Management
|
||||||
|
==================
|
||||||
|
|
||||||
|
Proxmox Backup Server provides both a web interface and a command line tool for
|
||||||
|
network configuration. You can find the configuration options in the web
|
||||||
|
interface under the **Network Interfaces** section of the **Configuration** menu
|
||||||
|
tree item. The command line tool is accessed via the ``network`` subcommand.
|
||||||
|
These interfaces allow you to carry out some basic network management tasks,
|
||||||
|
such as adding, configuring, and removing network interfaces.
|
||||||
|
|
||||||
|
.. note:: Any changes made to the network configuration are not
|
||||||
|
applied, until you click on **Apply Configuration** or enter the ``network
|
||||||
|
reload`` command. This allows you to make many changes at once. It also allows
|
||||||
|
you to ensure that your changes are correct before applying them, as making a
|
||||||
|
mistake here can render the server inaccessible over the network.
|
||||||
|
|
||||||
|
To get a list of available interfaces, use the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network list
|
||||||
|
┌───────┬────────┬───────────┬────────┬─────────────┬──────────────┬──────────────┐
|
||||||
|
│ name │ type │ autostart │ method │ address │ gateway │ ports/slaves │
|
||||||
|
╞═══════╪════════╪═══════════╪════════╪═════════════╪══════════════╪══════════════╡
|
||||||
|
│ bond0 │ bond │ 1 │ static │ x.x.x.x/x │ x.x.x.x │ ens18 ens19 │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens18 │ eth │ 1 │ manual │ │ │ │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens19 │ eth │ 1 │ manual │ │ │ │
|
||||||
|
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-network-create-bond.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a network interface
|
||||||
|
|
||||||
|
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||||
|
parameters. For example, you may want to set up a bond, for the purpose of
|
||||||
|
network redundancy. The following command shows a template for creating the bond shown
|
||||||
|
in the list above:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
|
||||||
|
|
||||||
|
You can make changes to the configuration of a network interface with the
|
||||||
|
``update`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network update bond0 --cidr y.y.y.y/y
|
||||||
|
|
||||||
|
You can also remove a network interface:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network remove bond0
|
||||||
|
|
||||||
|
The pending changes for the network configuration file will appear at the bottom of the
|
||||||
|
web interface. You can also view these changes, by using the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network changes
|
||||||
|
|
||||||
|
If you would like to cancel all changes at this point, you can either click on
|
||||||
|
the **Revert** button or use the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network revert
|
||||||
|
|
||||||
|
If you are happy with the changes and would like to write them into the
|
||||||
|
configuration file, select **Apply Configuration**. The corresponding command
|
||||||
|
is:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network reload
|
||||||
|
|
||||||
|
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||||
|
command, from the package ``ifupdown2``. This package is included within the
|
||||||
|
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||||
|
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
||||||
|
|
||||||
|
You can also configure DNS settings, from the **DNS** section
|
||||||
|
of **Configuration** or by using the ``dns`` subcommand of
|
||||||
|
``proxmox-backup-manager``.
|
||||||
|
|
49
docs/pve-integration.rst
Normal file
49
docs/pve-integration.rst
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
.. _pve-integration:
|
||||||
|
|
||||||
|
`Proxmox VE`_ Integration
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
|
node. The following example uses ``store2`` as storage name, and
|
||||||
|
assumes the server address is ``localhost``, and you want to connect
|
||||||
|
as ``user1@pbs``.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# pvesm add pbs store2 --server localhost --datastore store2
|
||||||
|
# pvesm set store2 --username user1@pbs --password <secret>
|
||||||
|
|
||||||
|
.. note:: If you would rather not pass your password as plain text, you can pass
|
||||||
|
the ``--password`` parameter, without any arguments. This will cause the
|
||||||
|
program to prompt you for a password upon entering the command.
|
||||||
|
|
||||||
|
If your backup server uses a self signed certificate, you need to add
|
||||||
|
the certificate fingerprint to the configuration. You can get the
|
||||||
|
fingerprint by running the following command on the backup server:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager cert info | grep Fingerprint
|
||||||
|
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Please add that fingerprint to your configuration to establish a trust
|
||||||
|
relationship:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
After that you should be able to see storage status with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# pvesm status --storage store2
|
||||||
|
Name Type Status Total Used Available %
|
||||||
|
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||||
|
|
||||||
|
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
||||||
|
containers in the same way you would for any other storage device within the
|
||||||
|
environment (see `PVE Admin Guide: Backup and Restore
|
||||||
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||||
|
|
||||||
|
|
5
docs/pxar-tool.rst
Normal file
5
docs/pxar-tool.rst
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
pxar Command Line Tool
|
||||||
|
======================
|
||||||
|
|
||||||
|
.. include:: pxar/description.rst
|
||||||
|
|
@ -47,7 +47,7 @@ by running:
|
|||||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
Be aware that the shell itself will try to expand all of the glob patterns before
|
||||||
invoking ``pxar``.
|
invoking ``pxar``.
|
||||||
In order to avoid this, all globs have to be quoted correctly.
|
In order to avoid this, all globs have to be quoted correctly.
|
||||||
|
|
||||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||||
match more than one pattern. This allows you to use more complex
|
match more than one pattern. This allows you to use more complex
|
||||||
file exclusion/inclusion behavior. However, it is recommended to use
|
file exclusion/inclusion behavior. However, it is recommended to use
|
||||||
|
244
docs/storage.rst
Normal file
244
docs/storage.rst
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
Storage
|
||||||
|
=======
|
||||||
|
|
||||||
|
Disk Management
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-disks.png
|
||||||
|
:align: right
|
||||||
|
:alt: List of disks
|
||||||
|
|
||||||
|
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||||
|
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||||
|
disks, create various filesystems, and get information about the disks.
|
||||||
|
|
||||||
|
To view the disks connected to the system, navigate to **Administration ->
|
||||||
|
Disks** in the web interface or use the ``list`` subcommand of
|
||||||
|
``disk``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk list
|
||||||
|
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
|
||||||
|
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
|
||||||
|
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
|
||||||
|
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
|
||||||
|
|
||||||
|
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk initialize sdX
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-disks-dir-create.png
|
||||||
|
:align: right
|
||||||
|
:alt: Create a directory
|
||||||
|
|
||||||
|
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||||
|
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
||||||
|
web interface and creating one from there. The following command creates an
|
||||||
|
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||||
|
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||||
|
create a datastore at the location ``/mnt/datastore/store1``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
|
||||||
|
:align: right
|
||||||
|
:alt: Create ZFS
|
||||||
|
|
||||||
|
You can also create a ``zpool`` with various raid levels from **Administration
|
||||||
|
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
||||||
|
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||||
|
mounts it on the root directory (default):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||||
|
|
||||||
|
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||||
|
create a datastore from the disk.
|
||||||
|
|
||||||
|
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||||
|
filesystems and zpools respectively.
|
||||||
|
|
||||||
|
Proxmox Backup Server uses the package smartmontools. This is a set of tools
|
||||||
|
used to monitor and control the S.M.A.R.T. system for local hard disks. If a
|
||||||
|
disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||||
|
display S.M.A.R.T. attributes from the web interface or by using the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk smart-attributes sdX
|
||||||
|
|
||||||
|
.. note:: This functionality may also be accessed directly through the use of
|
||||||
|
the ``smartctl`` command, which comes as part of the smartmontools package
|
||||||
|
(see ``man smartctl`` for more details).
|
||||||
|
|
||||||
|
|
||||||
|
.. _datastore_intro:
|
||||||
|
|
||||||
|
:term:`DataStore`
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
A datastore refers to a location at which backups are stored. The current
|
||||||
|
implementation uses a directory inside a standard Unix file system (``ext4``,
|
||||||
|
``xfs`` or ``zfs``) to store the backup data.
|
||||||
|
|
||||||
|
Datastores are identified by a simple *ID*. You can configure this
|
||||||
|
when setting up the datastore. The configuration information for datastores
|
||||||
|
is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
||||||
|
|
||||||
|
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||||
|
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||||
|
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||||
|
directory entries. This requirement excludes certain filesystems and
|
||||||
|
filesystem configuration from being supported for a datastore. For example,
|
||||||
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
|
|
||||||
|
Datastore Configuration
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore.png
|
||||||
|
:align: right
|
||||||
|
:alt: Datastore Overview
|
||||||
|
|
||||||
|
You can configure multiple datastores. Minimum one datastore needs to be
|
||||||
|
configured. The datastore is identified by a simple *name* and points to a
|
||||||
|
directory on the filesystem. Each datastore also has associated retention
|
||||||
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
|
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||||
|
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||||
|
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||||
|
|
||||||
|
|
||||||
|
Creating a Datastore
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
||||||
|
:align: right
|
||||||
|
:alt: Create a datastore
|
||||||
|
|
||||||
|
You can create a new datastore from the web GUI, by navigating to **Datastore** in
|
||||||
|
the menu tree and clicking **Create**. Here:
|
||||||
|
|
||||||
|
* *Name* refers to the name of the datastore
|
||||||
|
* *Backing Path* is the path to the directory upon which you want to create the
|
||||||
|
datastore
|
||||||
|
* *GC Schedule* refers to the time and intervals at which garbage collection
|
||||||
|
runs
|
||||||
|
* *Prune Schedule* refers to the frequency at which pruning takes place
|
||||||
|
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`backup-pruning`).
|
||||||
|
|
||||||
|
Alternatively you can create a new datastore from the command line. The
|
||||||
|
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||||
|
|
||||||
|
|
||||||
|
Managing Datastores
|
||||||
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To list existing datastores from the command line run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore list
|
||||||
|
┌────────┬──────────────────────┬─────────────────────────────┐
|
||||||
|
│ name │ path │ comment │
|
||||||
|
╞════════╪══════════════════════╪═════════════════════════════╡
|
||||||
|
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||||
|
└────────┴──────────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
You can change the garbage collection and prune settings of a datastore, by
|
||||||
|
editing the datastore from the GUI or by using the ``update`` subcommand. For
|
||||||
|
example, the below command changes the garbage collection schedule using the
|
||||||
|
``update`` subcommand and prints the properties of the datastore with the
|
||||||
|
``show`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore update store1 --gc-schedule 'Tue 04:27'
|
||||||
|
# proxmox-backup-manager datastore show store1
|
||||||
|
┌────────────────┬─────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞════════════════╪═════════════════════════════╡
|
||||||
|
│ name │ store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ path │ /backup/disk1/store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ comment │ This is my default storage. │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ gc-schedule │ Tue 04:27 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ keep-last │ 7 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ prune-schedule │ daily │
|
||||||
|
└────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
Finally, it is possible to remove the datastore configuration:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore remove store1
|
||||||
|
|
||||||
|
.. note:: The above command removes only the datastore configuration. It does
|
||||||
|
not delete any data from the underlying directory.
|
||||||
|
|
||||||
|
|
||||||
|
File Layout
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
After creating a datastore, the following default layout will appear:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1
|
||||||
|
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||||
|
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||||
|
|
||||||
|
`.lock` is an empty file used for process locking.
|
||||||
|
|
||||||
|
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||||
|
directories will store the chunked data after a backup operation has been executed.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1/.chunks
|
||||||
|
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||||
|
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||||
|
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||||
|
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||||
|
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||||
|
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||||
|
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||||
|
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||||
|
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||||
|
...
|
||||||
|
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||||
|
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||||
|
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||||
|
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||||
|
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||||
|
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||||
|
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||||
|
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||||
|
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||||
|
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||||
|
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||||
|
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||||
|
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||||
|
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||||
|
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||||
|
|
||||||
|
|
57
docs/system-requirements.rst
Normal file
57
docs/system-requirements.rst
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
System Requirements
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
We recommend using high quality server hardware when running Proxmox Backup in
|
||||||
|
production. To further decrease the impact of a failed host, you can set up
|
||||||
|
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
|
||||||
|
from other Proxmox Backup Server instances.
|
||||||
|
|
||||||
|
Minimum Server Requirements, for Evaluation
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
These minimum requirements are for evaluation purposes only and should not be
|
||||||
|
used in production.
|
||||||
|
|
||||||
|
* CPU: 64bit (*x86-64* or *AMD64*), 2+ Cores
|
||||||
|
|
||||||
|
* Memory (RAM): 2 GB RAM
|
||||||
|
|
||||||
|
* Hard drive: more than 8GB of space.
|
||||||
|
|
||||||
|
* Network card (NIC)
|
||||||
|
|
||||||
|
|
||||||
|
Recommended Server System Requirements
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
* CPU: Modern AMD or Intel 64-bit based CPU, with at least 4 cores
|
||||||
|
|
||||||
|
* Memory: minimum 4 GiB for the OS, filesystem cache and Proxmox Backup Server
|
||||||
|
daemons. Add at least another GiB per TiB storage space.
|
||||||
|
|
||||||
|
* OS storage:
|
||||||
|
|
||||||
|
* 32 GiB, or more, free storage space
|
||||||
|
* Use a hardware RAID with battery protected write cache (*BBU*) or a
|
||||||
|
redundant ZFS setup (ZFS is not compatible with a hardware RAID
|
||||||
|
controller).
|
||||||
|
|
||||||
|
* Backup storage:
|
||||||
|
|
||||||
|
* Use only SSDs, for best results
|
||||||
|
* If HDDs are used: Using a metadata cache is highly recommended, for example,
|
||||||
|
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.
|
||||||
|
|
||||||
|
* Redundant Multi-GBit/s network interface cards (NICs)
|
||||||
|
|
||||||
|
|
||||||
|
Supported Web Browsers for Accessing the Web Interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To access the server's web-based user interface, we recommend using one of the
|
||||||
|
following browsers:
|
||||||
|
|
||||||
|
* Firefox, a release from the current year, or the latest Extended Support Release
|
||||||
|
* Chrome, a release from the current year
|
||||||
|
* Microsoft's currently supported version of Edge
|
||||||
|
* Safari, a release from the current year
|
118
docs/terminology.rst
Normal file
118
docs/terminology.rst
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
Terminology
|
||||||
|
===========
|
||||||
|
|
||||||
|
Backup Content
|
||||||
|
--------------
|
||||||
|
|
||||||
|
When doing deduplication, there are different strategies to get
|
||||||
|
optimal results in terms of performance and/or deduplication rates.
|
||||||
|
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||||
|
sized chunks.
|
||||||
|
|
||||||
|
Fixed sized chunking requires minimal CPU power, and is used to
|
||||||
|
backup virtual machine images.
|
||||||
|
|
||||||
|
Variable sized chunking needs more CPU power, but is essential to get
|
||||||
|
good deduplication rates for file archives.
|
||||||
|
|
||||||
|
The Proxmox Backup Server supports both strategies.
|
||||||
|
|
||||||
|
|
||||||
|
Image Archives: ``<name>.img``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This is used for virtual machine images and other large binary
|
||||||
|
data. Content is split into fixed-sized chunks.
|
||||||
|
|
||||||
|
|
||||||
|
File Archives: ``<name>.pxar``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||||
|
|
||||||
|
A file archive stores a full directory tree. Content is stored using
|
||||||
|
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||||
|
is optimized to achieve good deduplication rates.
|
||||||
|
|
||||||
|
|
||||||
|
Binary Data (BLOBs)
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This type is used to store smaller (< 16MB) binary data such as
|
||||||
|
configuration files. Larger files should be stored as image archive.
|
||||||
|
|
||||||
|
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||||
|
file archive to store whole directory trees.
|
||||||
|
|
||||||
|
|
||||||
|
Catalog File: ``catalog.pcat1``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The catalog file is an index for file archives. It contains
|
||||||
|
the list of files and is used to speed up search operations.
|
||||||
|
|
||||||
|
|
||||||
|
The Manifest: ``index.json``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The manifest contains the list of all backup files, their
|
||||||
|
sizes and checksums. It is used to verify the consistency of a
|
||||||
|
backup.
|
||||||
|
|
||||||
|
|
||||||
|
Backup Type
|
||||||
|
-----------
|
||||||
|
|
||||||
|
The backup server groups backups by *type*, where *type* is one of:
|
||||||
|
|
||||||
|
``vm``
|
||||||
|
This type is used for :term:`virtual machine`\ s. Typically
|
||||||
|
consists of the virtual machine's configuration file and an image archive
|
||||||
|
for each disk.
|
||||||
|
|
||||||
|
``ct``
|
||||||
|
This type is used for :term:`container`\ s. Consists of the container's
|
||||||
|
configuration and a single file archive for the filesystem content.
|
||||||
|
|
||||||
|
``host``
|
||||||
|
This type is used for backups created from within the backed up machine.
|
||||||
|
Typically this would be a physical host but could also be a virtual machine
|
||||||
|
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
||||||
|
|
||||||
|
|
||||||
|
Backup ID
|
||||||
|
---------
|
||||||
|
|
||||||
|
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||||
|
type backups normally use the hostname.
|
||||||
|
|
||||||
|
|
||||||
|
Backup Time
|
||||||
|
-----------
|
||||||
|
|
||||||
|
The time when the backup was made.
|
||||||
|
|
||||||
|
|
||||||
|
Backup Group
|
||||||
|
------------
|
||||||
|
|
||||||
|
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||||
|
may contain one or more backup snapshots.
|
||||||
|
|
||||||
|
|
||||||
|
Backup Snapshot
|
||||||
|
---------------
|
||||||
|
|
||||||
|
The triplet ``<type>/<ID>/<time>`` is called a backup snapshot. It
|
||||||
|
uniquely identifies a specific backup within a datastore.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
:caption: Backup Snapshot Examples
|
||||||
|
|
||||||
|
vm/104/2019-10-09T08:01:06Z
|
||||||
|
host/elsa/2019-11-08T09:48:14Z
|
||||||
|
|
||||||
|
As you can see, the time format is RFC3399_ with Coordinated
|
||||||
|
Universal Time (UTC_, identified by the trailing *Z*).
|
||||||
|
|
||||||
|
|
186
docs/user-management.rst
Normal file
186
docs/user-management.rst
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
.. _user_mgmt:
|
||||||
|
|
||||||
|
User Management
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
User Configuration
|
||||||
|
------------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-user-management.png
|
||||||
|
:align: right
|
||||||
|
:alt: User management
|
||||||
|
|
||||||
|
Proxmox Backup Server supports several authentication realms, and you need to
|
||||||
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
|
authenticate as Linux system user (Users need to exist on the
|
||||||
|
system).
|
||||||
|
|
||||||
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
|
``/etc/proxmox-backup/shadow.json``.
|
||||||
|
|
||||||
|
After installation, there is a single user ``root@pam``, which
|
||||||
|
corresponds to the Unix superuser. User configuration information is stored in the file
|
||||||
|
``/etc/proxmox-backup/user.cfg``. You can use the
|
||||||
|
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||||
|
users:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user list
|
||||||
|
┌─────────────┬────────┬────────┬───────────┬──────────┬────────────────┬────────────────────┐
|
||||||
|
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||||
|
╞═════════════╪════════╪════════╪═══════════╪══════════╪════════════════╪════════════════════╡
|
||||||
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
|
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
The superuser has full administration rights on everything, so you
|
||||||
|
normally want to add other users with less privileges. You can create a new
|
||||||
|
user with the ``user create`` subcommand or through the web interface, under
|
||||||
|
**Configuration -> User Management**. The ``create`` subcommand lets you specify
|
||||||
|
many options like ``--email`` or ``--password``. You can update or change any
|
||||||
|
user properties using the ``update`` subcommand later (**Edit** in the GUI):
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||||
|
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||||
|
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||||
|
|
||||||
|
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||||
|
|
||||||
|
|
||||||
|
The resulting user list looks like this:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user list
|
||||||
|
┌──────────┬────────┬────────┬───────────┬──────────┬──────────────────┬──────────────────┐
|
||||||
|
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||||
|
╞══════════╪════════╪════════╪═══════════╪══════════╪══════════════════╪══════════════════╡
|
||||||
|
│ john@pbs │ 1 │ │ John │ Smith │ john@example.com │ An example user. │
|
||||||
|
├──────────┼────────┼────────┼───────────┼──────────┼──────────────────┼──────────────────┤
|
||||||
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
|
Newly created users do not have any permissions. Please read the next
|
||||||
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
|
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user update john@pbs --enable 0
|
||||||
|
|
||||||
|
Or completely remove the user with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user remove john@pbs
|
||||||
|
|
||||||
|
|
||||||
|
.. _user_acl:
|
||||||
|
|
||||||
|
Access Control
|
||||||
|
--------------
|
||||||
|
|
||||||
|
By default new users do not have any permission. Instead you need to
|
||||||
|
specify what is allowed and what is not. You can do this by assigning
|
||||||
|
roles to users on specific objects like datastores or remotes. The
|
||||||
|
following roles exist:
|
||||||
|
|
||||||
|
**NoAccess**
|
||||||
|
Disable Access - nothing is allowed.
|
||||||
|
|
||||||
|
**Admin**
|
||||||
|
Can do anything.
|
||||||
|
|
||||||
|
**Audit**
|
||||||
|
Can view things, but is not allowed to change settings.
|
||||||
|
|
||||||
|
**DatastoreAdmin**
|
||||||
|
Can do anything on datastores.
|
||||||
|
|
||||||
|
**DatastoreAudit**
|
||||||
|
Can view datastore settings and list content. But
|
||||||
|
is not allowed to read the actual data.
|
||||||
|
|
||||||
|
**DatastoreReader**
|
||||||
|
Can Inspect datastore content and can do restores.
|
||||||
|
|
||||||
|
**DatastoreBackup**
|
||||||
|
Can backup and restore owned backups.
|
||||||
|
|
||||||
|
**DatastorePowerUser**
|
||||||
|
Can backup, restore, and prune owned backups.
|
||||||
|
|
||||||
|
**RemoteAdmin**
|
||||||
|
Can do anything on remotes.
|
||||||
|
|
||||||
|
**RemoteAudit**
|
||||||
|
Can view remote settings.
|
||||||
|
|
||||||
|
**RemoteSyncOperator**
|
||||||
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-permissions-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add permissions for user
|
||||||
|
|
||||||
|
Access permission information is stored in ``/etc/proxmox-backup/acl.cfg``. The
|
||||||
|
file contains 5 fields, separated using a colon (':') as a delimiter. A typical
|
||||||
|
entry takes the form:
|
||||||
|
|
||||||
|
``acl:1:/datastore:john@pbs:DatastoreBackup``
|
||||||
|
|
||||||
|
The data represented in each field is as follows:
|
||||||
|
|
||||||
|
#. ``acl`` identifier
|
||||||
|
#. A ``1`` or ``0``, representing whether propagation is enabled or disabled,
|
||||||
|
respectively
|
||||||
|
#. The object on which the permission is set. This can be a specific object
|
||||||
|
(single datastore, remote, etc.) or a top level object, which with
|
||||||
|
propagation enabled, represents all children of the object also.
|
||||||
|
#. The user for which the permission is set
|
||||||
|
#. The role being set
|
||||||
|
|
||||||
|
You can manage datastore permissions from **Configuration -> Permissions** in the
|
||||||
|
web interface. Likewise, you can use the ``acl`` subcommand to manage and
|
||||||
|
monitor user permissions from the command line. For example, the command below
|
||||||
|
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
|
||||||
|
``store1``, located at ``/backup/disk1/store1``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
|
||||||
|
|
||||||
|
You can monitor the roles of each user using the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager acl list
|
||||||
|
┌──────────┬──────────────────┬───────────┬────────────────┐
|
||||||
|
│ ugid │ path │ propagate │ roleid │
|
||||||
|
╞══════════╪══════════════════╪═══════════╪════════════════╡
|
||||||
|
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
|
||||||
|
└──────────┴──────────────────┴───────────┴────────────────┘
|
||||||
|
|
||||||
|
A single user can be assigned multiple permission sets for different datastores.
|
||||||
|
|
||||||
|
.. Note::
|
||||||
|
Naming convention is important here. For datastores on the host,
|
||||||
|
you must use the convention ``/datastore/{storename}``. For example, to set
|
||||||
|
permissions for a datastore mounted at ``/mnt/backup/disk4/store2``, you would use
|
||||||
|
``/datastore/store2`` for the path. For remote stores, use the convention
|
||||||
|
``/remote/{remote}/{storename}``, where ``{remote}`` signifies the name of the
|
||||||
|
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||||
|
the remote.
|
||||||
|
|
||||||
|
|
@ -32,7 +32,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
let client = HttpClient::new(host, username, options)?;
|
let client = HttpClient::new(host, 8007, username, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
let client = HttpClient::new(host, username, options)?;
|
let client = HttpClient::new(host, 8007, username, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::epoch_i64();
|
let backup_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ pub fn update_acl(
|
|||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut tree, expected_digest) = acl::config()?;
|
let (mut tree, expected_digest) = acl::config()?;
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ pub fn list_users(
|
|||||||
/// Create new user.
|
/// Create new user.
|
||||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let user: user::User = serde_json::from_value(param)?;
|
let user: user::User = serde_json::from_value(param)?;
|
||||||
|
|
||||||
@ -211,7 +211,7 @@ pub fn update_user(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ pub fn update_user(
|
|||||||
/// Remove a user from the configuration file.
|
/// Remove a user from the configuration file.
|
||||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
|
@ -518,7 +518,14 @@ pub fn verify(
|
|||||||
|
|
||||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||||
let mut res = Vec::new();
|
let mut res = Vec::new();
|
||||||
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
if !verify_backup_dir(
|
||||||
|
datastore,
|
||||||
|
&backup_dir,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker.clone(),
|
||||||
|
worker.upid().clone(),
|
||||||
|
)? {
|
||||||
res.push(backup_dir.to_string());
|
res.push(backup_dir.to_string());
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
@ -530,10 +537,11 @@ pub fn verify(
|
|||||||
corrupt_chunks,
|
corrupt_chunks,
|
||||||
None,
|
None,
|
||||||
worker.clone(),
|
worker.clone(),
|
||||||
|
worker.upid(),
|
||||||
)?;
|
)?;
|
||||||
failed_dirs
|
failed_dirs
|
||||||
} else {
|
} else {
|
||||||
verify_all_backups(datastore, worker.clone())?
|
verify_all_backups(datastore, worker.clone(), worker.upid())?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if failed_dirs.len() > 0 {
|
||||||
worker.log("Failed to verify following snapshots:");
|
worker.log("Failed to verify following snapshots:");
|
||||||
@ -770,7 +778,7 @@ fn start_garbage_collection(
|
|||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log(format!("starting garbage collection on store {}", store));
|
worker.log(format!("starting garbage collection on store {}", store));
|
||||||
datastore.garbage_collection(&worker)
|
datastore.garbage_collection(&*worker, worker.upid())
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -1484,6 +1492,51 @@ fn set_notes(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"new-owner": {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Change owner of a backup group
|
||||||
|
fn set_backup_owner(
|
||||||
|
store: String,
|
||||||
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
new_owner: Userid,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
if !user_info.is_active_user(&new_owner) {
|
||||||
|
bail!("user '{}' is inactive or non-existent", new_owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
datastore.set_owner(&backup_group, &new_owner, true)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||||
(
|
(
|
||||||
@ -1491,6 +1544,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
&Router::new()
|
&Router::new()
|
||||||
.get(&API_METHOD_CATALOG)
|
.get(&API_METHOD_CATALOG)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"change-owner",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_SET_BACKUP_OWNER)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"download",
|
"download",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
|
@ -200,7 +200,7 @@ async move {
|
|||||||
};
|
};
|
||||||
if benchmark {
|
if benchmark {
|
||||||
env.log("benchmark finished successfully");
|
env.log("benchmark finished successfully");
|
||||||
env.remove_backup()?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
@ -222,7 +222,7 @@ async move {
|
|||||||
(Err(err), Err(_)) => {
|
(Err(err), Err(_)) => {
|
||||||
env.log(format!("backup failed: {}", err));
|
env.log(format!("backup failed: {}", err));
|
||||||
env.log("removing failed backup");
|
env.log("removing failed backup");
|
||||||
env.remove_backup()?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
Err(err)
|
Err(err)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ use proxmox::tools::digest_to_hex;
|
|||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::api2::types::{Userid, SnapshotVerifyState, VerifyState};
|
use crate::api2::types::Userid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
@ -66,8 +66,8 @@ struct FixedWriterState {
|
|||||||
incremental: bool,
|
incremental: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// key=digest, value=(length, existance checked)
|
// key=digest, value=length
|
||||||
type KnownChunksMap = HashMap<[u8;32], (u32, bool)>;
|
type KnownChunksMap = HashMap<[u8;32], u32>;
|
||||||
|
|
||||||
struct SharedBackupState {
|
struct SharedBackupState {
|
||||||
finished: bool,
|
finished: bool,
|
||||||
@ -156,7 +156,7 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
state.known_chunks.insert(digest, (length, false));
|
state.known_chunks.insert(digest, length);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ impl BackupEnvironment {
|
|||||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||||
|
|
||||||
// register chunk
|
// register chunk
|
||||||
state.known_chunks.insert(digest, (size, true));
|
state.known_chunks.insert(digest, size);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -231,7 +231,7 @@ impl BackupEnvironment {
|
|||||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||||
|
|
||||||
// register chunk
|
// register chunk
|
||||||
state.known_chunks.insert(digest, (size, true));
|
state.known_chunks.insert(digest, size);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -240,7 +240,7 @@ impl BackupEnvironment {
|
|||||||
let state = self.state.lock().unwrap();
|
let state = self.state.lock().unwrap();
|
||||||
|
|
||||||
match state.known_chunks.get(digest) {
|
match state.known_chunks.get(digest) {
|
||||||
Some((len, _)) => Some(*len),
|
Some(len) => Some(*len),
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -457,47 +457,6 @@ impl BackupEnvironment {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure all chunks referenced in this backup actually exist.
|
|
||||||
/// Only call *after* all writers have been closed, to avoid race with GC.
|
|
||||||
/// In case of error, mark the previous backup as 'verify failed'.
|
|
||||||
fn verify_chunk_existance(&self, known_chunks: &KnownChunksMap) -> Result<(), Error> {
|
|
||||||
for (digest, (_, checked)) in known_chunks.iter() {
|
|
||||||
if !checked && !self.datastore.chunk_path(digest).0.exists() {
|
|
||||||
let mark_msg = if let Some(ref last_backup) = self.last_backup {
|
|
||||||
let last_dir = &last_backup.backup_dir;
|
|
||||||
let verify_state = SnapshotVerifyState {
|
|
||||||
state: VerifyState::Failed,
|
|
||||||
upid: self.worker.upid().clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = proxmox::try_block!{
|
|
||||||
let (mut manifest, _) = self.datastore.load_manifest(last_dir)?;
|
|
||||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
|
||||||
self.datastore.store_manifest(last_dir, serde_json::to_value(manifest)?)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) = res {
|
|
||||||
format!("tried marking previous snapshot as bad, \
|
|
||||||
but got error accessing manifest: {}", err)
|
|
||||||
} else {
|
|
||||||
"marked previous snapshot as bad, please use \
|
|
||||||
'verify' for a detailed check".to_owned()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
"internal error: no base backup registered to mark invalid".to_owned()
|
|
||||||
};
|
|
||||||
|
|
||||||
bail!(
|
|
||||||
"chunk '{}' was attempted to be reused but doesn't exist - {}",
|
|
||||||
digest_to_hex(digest),
|
|
||||||
mark_msg
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mark backup as finished
|
/// Mark backup as finished
|
||||||
pub fn finish_backup(&self) -> Result<(), Error> {
|
pub fn finish_backup(&self) -> Result<(), Error> {
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
@ -534,8 +493,6 @@ impl BackupEnvironment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.verify_chunk_existance(&state.known_chunks)?;
|
|
||||||
|
|
||||||
// marks the backup as successful
|
// marks the backup as successful
|
||||||
state.finished = true;
|
state.finished = true;
|
||||||
|
|
||||||
|
@ -61,12 +61,15 @@ impl Future for UploadChunk {
|
|||||||
let (is_duplicate, compressed_size) = match proxmox::try_block! {
|
let (is_duplicate, compressed_size) = match proxmox::try_block! {
|
||||||
let mut chunk = DataBlob::from_raw(raw_data)?;
|
let mut chunk = DataBlob::from_raw(raw_data)?;
|
||||||
|
|
||||||
chunk.verify_unencrypted(this.size as usize, &this.digest)?;
|
tools::runtime::block_in_place(|| {
|
||||||
|
chunk.verify_unencrypted(this.size as usize, &this.digest)?;
|
||||||
|
|
||||||
// always comput CRC at server side
|
// always comput CRC at server side
|
||||||
chunk.set_crc(chunk.compute_crc());
|
chunk.set_crc(chunk.compute_crc());
|
||||||
|
|
||||||
|
this.store.insert_chunk(&chunk, &this.digest)
|
||||||
|
})
|
||||||
|
|
||||||
this.store.insert_chunk(&chunk, &this.digest)
|
|
||||||
} {
|
} {
|
||||||
Ok(res) => res,
|
Ok(res) => res,
|
||||||
Err(err) => break err,
|
Err(err) => break err,
|
||||||
|
@ -11,7 +11,7 @@ use crate::api2::types::*;
|
|||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||||
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -106,13 +106,13 @@ pub fn list_datastores(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_MODIFY, false),
|
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Create new datastore config.
|
/// Create new datastore config.
|
||||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||||
|
|
||||||
@ -277,7 +277,7 @@ pub fn update_datastore(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
// pass/compare digest
|
// pass/compare digest
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
@ -375,13 +375,13 @@ pub fn update_datastore(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Remove a datastore configuration.
|
/// Remove a datastore configuration.
|
||||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
|
|
||||||
|
@ -60,6 +60,12 @@ pub fn list_remotes(
|
|||||||
host: {
|
host: {
|
||||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
},
|
},
|
||||||
|
port: {
|
||||||
|
description: "The (optional) port.",
|
||||||
|
type: u16,
|
||||||
|
optional: true,
|
||||||
|
default: 8007,
|
||||||
|
},
|
||||||
userid: {
|
userid: {
|
||||||
type: Userid,
|
type: Userid,
|
||||||
},
|
},
|
||||||
@ -79,7 +85,7 @@ pub fn list_remotes(
|
|||||||
/// Create new remote.
|
/// Create new remote.
|
||||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let mut data = param.clone();
|
let mut data = param.clone();
|
||||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||||
@ -136,6 +142,8 @@ pub enum DeletableProperty {
|
|||||||
comment,
|
comment,
|
||||||
/// Delete the fingerprint property.
|
/// Delete the fingerprint property.
|
||||||
fingerprint,
|
fingerprint,
|
||||||
|
/// Delete the port property.
|
||||||
|
port,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -153,6 +161,11 @@ pub enum DeletableProperty {
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
},
|
},
|
||||||
|
port: {
|
||||||
|
description: "The (optional) port.",
|
||||||
|
type: u16,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
userid: {
|
userid: {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Userid,
|
||||||
@ -188,6 +201,7 @@ pub fn update_remote(
|
|||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
host: Option<String>,
|
host: Option<String>,
|
||||||
|
port: Option<u16>,
|
||||||
userid: Option<Userid>,
|
userid: Option<Userid>,
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
fingerprint: Option<String>,
|
fingerprint: Option<String>,
|
||||||
@ -195,7 +209,7 @@ pub fn update_remote(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = remote::config()?;
|
let (mut config, expected_digest) = remote::config()?;
|
||||||
|
|
||||||
@ -211,6 +225,7 @@ pub fn update_remote(
|
|||||||
match delete_prop {
|
match delete_prop {
|
||||||
DeletableProperty::comment => { data.comment = None; },
|
DeletableProperty::comment => { data.comment = None; },
|
||||||
DeletableProperty::fingerprint => { data.fingerprint = None; },
|
DeletableProperty::fingerprint => { data.fingerprint = None; },
|
||||||
|
DeletableProperty::port => { data.port = None; },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,6 +239,7 @@ pub fn update_remote(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(host) = host { data.host = host; }
|
if let Some(host) = host { data.host = host; }
|
||||||
|
if port.is_some() { data.port = port; }
|
||||||
if let Some(userid) = userid { data.userid = userid; }
|
if let Some(userid) = userid { data.userid = userid; }
|
||||||
if let Some(password) = password { data.password = password; }
|
if let Some(password) = password { data.password = password; }
|
||||||
|
|
||||||
@ -256,7 +272,7 @@ pub fn update_remote(
|
|||||||
/// Remove a remote from the configuration file.
|
/// Remove a remote from the configuration file.
|
||||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = remote::config()?;
|
let (mut config, expected_digest) = remote::config()?;
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ pub fn list_sync_jobs(
|
|||||||
/// Create a new sync job.
|
/// Create a new sync job.
|
||||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||||
|
|
||||||
@ -187,7 +187,7 @@ pub fn update_sync_job(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
// pass/compare digest
|
// pass/compare digest
|
||||||
let (mut config, expected_digest) = sync::config()?;
|
let (mut config, expected_digest) = sync::config()?;
|
||||||
@ -250,7 +250,7 @@ pub fn update_sync_job(
|
|||||||
/// Remove a sync job configuration
|
/// Remove a sync job configuration
|
||||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = sync::config()?;
|
let (mut config, expected_digest) = sync::config()?;
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ use crate::server::WorkerTask;
|
|||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
|
||||||
|
use crate::tools::systemd;
|
||||||
|
|
||||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||||
.schema();
|
.schema();
|
||||||
@ -355,6 +357,11 @@ pub fn create_zpool(
|
|||||||
let output = crate::tools::run_command(command, None)?;
|
let output = crate::tools::run_command(command, None)?;
|
||||||
worker.log(output);
|
worker.log(output);
|
||||||
|
|
||||||
|
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
|
||||||
|
let import_unit = format!("zfs-import@{}.service", systemd::escape_unit(&name, false));
|
||||||
|
systemd::enable_unit(&import_unit)?;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(compression) = compression {
|
if let Some(compression) = compression {
|
||||||
let mut command = std::process::Command::new("zfs");
|
let mut command = std::process::Command::new("zfs");
|
||||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||||
|
@ -241,7 +241,7 @@ pub fn create_interface(
|
|||||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||||
|
|
||||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, _digest) = network::config()?;
|
let (mut config, _digest) = network::config()?;
|
||||||
|
|
||||||
@ -505,7 +505,7 @@ pub fn update_interface(
|
|||||||
param: Value,
|
param: Value,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = network::config()?;
|
let (mut config, expected_digest) = network::config()?;
|
||||||
|
|
||||||
@ -646,7 +646,7 @@ pub fn update_interface(
|
|||||||
/// Remove network interface configuration.
|
/// Remove network interface configuration.
|
||||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = network::config()?;
|
let (mut config, expected_digest) = network::config()?;
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ use proxmox::{identity, list_subdirs_api_method, sortable};
|
|||||||
|
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::server::{self, UPID, TaskState};
|
use crate::server::{self, UPID, TaskState, TaskListInfoIterator};
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
@ -303,6 +303,7 @@ pub fn list_tasks(
|
|||||||
limit: u64,
|
limit: u64,
|
||||||
errors: bool,
|
errors: bool,
|
||||||
running: bool,
|
running: bool,
|
||||||
|
userfilter: Option<String>,
|
||||||
param: Value,
|
param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
@ -315,57 +316,55 @@ pub fn list_tasks(
|
|||||||
|
|
||||||
let store = param["store"].as_str();
|
let store = param["store"].as_str();
|
||||||
|
|
||||||
let userfilter = param["userfilter"].as_str();
|
let list = TaskListInfoIterator::new(running)?;
|
||||||
|
|
||||||
let list = server::read_task_list()?;
|
let result: Vec<TaskListItem> = list
|
||||||
|
.take_while(|info| !info.is_err())
|
||||||
|
.filter_map(|info| {
|
||||||
|
let info = match info {
|
||||||
|
Ok(info) => info,
|
||||||
|
Err(_) => return None,
|
||||||
|
};
|
||||||
|
|
||||||
let mut result = vec![];
|
if !list_all && info.upid.userid != userid { return None; }
|
||||||
|
|
||||||
let mut count = 0;
|
if let Some(userid) = &userfilter {
|
||||||
|
if !info.upid.userid.as_str().contains(userid) { return None; }
|
||||||
for info in list {
|
|
||||||
if !list_all && info.upid.userid != userid { continue; }
|
|
||||||
|
|
||||||
|
|
||||||
if let Some(userid) = userfilter {
|
|
||||||
if !info.upid.userid.as_str().contains(userid) { continue; }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(store) = store {
|
if let Some(store) = store {
|
||||||
// Note: useful to select all tasks spawned by proxmox-backup-client
|
// Note: useful to select all tasks spawned by proxmox-backup-client
|
||||||
let worker_id = match &info.upid.worker_id {
|
let worker_id = match &info.upid.worker_id {
|
||||||
Some(w) => w,
|
Some(w) => w,
|
||||||
None => continue, // skip
|
None => return None, // skip
|
||||||
};
|
};
|
||||||
|
|
||||||
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
||||||
info.upid.worker_type == "prune"
|
info.upid.worker_type == "prune"
|
||||||
{
|
{
|
||||||
let prefix = format!("{}_", store);
|
let prefix = format!("{}_", store);
|
||||||
if !worker_id.starts_with(&prefix) { continue; }
|
if !worker_id.starts_with(&prefix) { return None; }
|
||||||
} else if info.upid.worker_type == "garbage_collection" {
|
} else if info.upid.worker_type == "garbage_collection" {
|
||||||
if worker_id != store { continue; }
|
if worker_id != store { return None; }
|
||||||
} else {
|
} else {
|
||||||
continue; // skip
|
return None; // skip
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref state) = info.state {
|
match info.state {
|
||||||
if running { continue; }
|
Some(_) if running => return None,
|
||||||
match state {
|
Some(crate::server::TaskState::OK { .. }) if errors => return None,
|
||||||
crate::server::TaskState::OK { .. } if errors => continue,
|
_ => {},
|
||||||
_ => {},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count as u64) < start {
|
Some(info.into())
|
||||||
count += 1;
|
}).skip(start as usize)
|
||||||
continue;
|
.take(limit as usize)
|
||||||
} else {
|
.collect();
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
let mut count = result.len() + start as usize;
|
||||||
|
if result.len() > 0 && result.len() >= limit as usize { // we have a 'virtual' entry as long as we have any new
|
||||||
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["total"] = Value::from(count);
|
rpcenv["total"] = Value::from(count);
|
||||||
|
@ -55,12 +55,13 @@ pub async fn get_pull_parameters(
|
|||||||
.password(Some(remote.password.clone()))
|
.password(Some(remote.password.clone()))
|
||||||
.fingerprint(remote.fingerprint.clone());
|
.fingerprint(remote.fingerprint.clone());
|
||||||
|
|
||||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
let src_repo = BackupRepository::new(Some(remote.userid.clone()), Some(remote.host.clone()), remote.port, remote_store.to_string());
|
||||||
|
|
||||||
|
let client = HttpClient::new(&src_repo.host(), src_repo.port(), &src_repo.user(), options)?;
|
||||||
let _auth_info = client.login() // make sure we can auth
|
let _auth_info = client.login() // make sure we can auth
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||||
|
|
||||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
|
||||||
|
|
||||||
Ok((client, src_repo, tgt_store))
|
Ok((client, src_repo, tgt_store))
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ use crate::api2::types::*;
|
|||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::{WorkerTask, H2Service};
|
use crate::server::{WorkerTask, H2Service};
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::api2::helpers;
|
use crate::api2::helpers;
|
||||||
|
|
||||||
@ -58,7 +58,15 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
|
let priv_read = privs & PRIV_DATASTORE_READ != 0;
|
||||||
|
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
|
||||||
|
|
||||||
|
// priv_backup needs owner check further down below!
|
||||||
|
if !priv_read && !priv_backup {
|
||||||
|
bail!("no permissions on /datastore/{}", store);
|
||||||
|
}
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -83,6 +91,13 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
if !priv_read {
|
||||||
|
let owner = datastore.get_owner(backup_dir.group())?;
|
||||||
|
if owner != userid {
|
||||||
|
bail!("backup owner check failed!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let path = datastore.base_path();
|
let path = datastore.base_path();
|
||||||
|
|
||||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||||
@ -194,6 +209,27 @@ fn download_file(
|
|||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
env.log(format!("download {:?}", path.clone()));
|
env.log(format!("download {:?}", path.clone()));
|
||||||
|
|
||||||
|
let index: Option<Box<dyn IndexFile + Send>> = match archive_type(&file_name)? {
|
||||||
|
ArchiveType::FixedIndex => {
|
||||||
|
let index = env.datastore.open_fixed_reader(&path)?;
|
||||||
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
ArchiveType::DynamicIndex => {
|
||||||
|
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||||
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
_ => { None }
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(index) = index {
|
||||||
|
env.log(format!("register chunks in '{}' as downloadable.", file_name));
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
env.register_chunk(info.digest);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
helpers::create_download_response(path).await
|
helpers::create_download_response(path).await
|
||||||
}.boxed()
|
}.boxed()
|
||||||
@ -224,6 +260,11 @@ fn download_chunk(
|
|||||||
let digest_str = tools::required_string_param(¶m, "digest")?;
|
let digest_str = tools::required_string_param(¶m, "digest")?;
|
||||||
let digest = proxmox::tools::hex_to_digest(digest_str)?;
|
let digest = proxmox::tools::hex_to_digest(digest_str)?;
|
||||||
|
|
||||||
|
if !env.check_chunk_access(digest) {
|
||||||
|
env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str));
|
||||||
|
return Err(http_err!(UNAUTHORIZED, "download chunk {} not allowed", digest_str));
|
||||||
|
}
|
||||||
|
|
||||||
let (path, _) = env.datastore.chunk_path(&digest);
|
let (path, _) = env.datastore.chunk_path(&digest);
|
||||||
let path2 = path.clone();
|
let path2 = path.clone();
|
||||||
|
|
||||||
@ -286,7 +327,7 @@ fn download_chunk_old(
|
|||||||
|
|
||||||
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&speedtest),
|
&ApiHandler::AsyncHttp(&speedtest),
|
||||||
&ObjectSchema::new("Test 4M block download speed.", &[])
|
&ObjectSchema::new("Test 1M block download speed.", &[])
|
||||||
);
|
);
|
||||||
|
|
||||||
fn speedtest(
|
fn speedtest(
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
//use anyhow::{bail, format_err, Error};
|
use std::sync::{Arc,RwLock};
|
||||||
use std::sync::Arc;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ pub struct ReaderEnvironment {
|
|||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
pub datastore: Arc<DataStore>,
|
pub datastore: Arc<DataStore>,
|
||||||
pub backup_dir: BackupDir,
|
pub backup_dir: BackupDir,
|
||||||
// state: Arc<Mutex<SharedBackupState>>
|
allowed_chunks: Arc<RwLock<HashSet<[u8;32]>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReaderEnvironment {
|
impl ReaderEnvironment {
|
||||||
@ -45,7 +45,7 @@ impl ReaderEnvironment {
|
|||||||
debug: false,
|
debug: false,
|
||||||
formatter: &JSON_FORMATTER,
|
formatter: &JSON_FORMATTER,
|
||||||
backup_dir,
|
backup_dir,
|
||||||
//state: Arc::new(Mutex::new(state)),
|
allowed_chunks: Arc::new(RwLock::new(HashSet::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,6 +57,15 @@ impl ReaderEnvironment {
|
|||||||
if self.debug { self.worker.log(msg); }
|
if self.debug { self.worker.log(msg); }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn register_chunk(&self, digest: [u8;32]) {
|
||||||
|
let mut allowed_chunks = self.allowed_chunks.write().unwrap();
|
||||||
|
allowed_chunks.insert(digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_chunk_access(&self, digest: [u8;32]) -> bool {
|
||||||
|
self.allowed_chunks.read().unwrap().contains(&digest)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RpcEnvironment for ReaderEnvironment {
|
impl RpcEnvironment for ReaderEnvironment {
|
||||||
|
@ -17,6 +17,7 @@ use crate::api2::types::{
|
|||||||
RRDMode,
|
RRDMode,
|
||||||
RRDTimeFrameResolution,
|
RRDTimeFrameResolution,
|
||||||
TaskListItem,
|
TaskListItem,
|
||||||
|
TaskStateType,
|
||||||
Userid,
|
Userid,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -182,10 +183,23 @@ fn datastore_status(
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
since: {
|
since: {
|
||||||
type: u64,
|
type: i64,
|
||||||
description: "Only list tasks since this UNIX epoch.",
|
description: "Only list tasks since this UNIX epoch.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
typefilter: {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Only list tasks, whose type contains this string.",
|
||||||
|
},
|
||||||
|
statusfilter: {
|
||||||
|
optional: true,
|
||||||
|
type: Array,
|
||||||
|
description: "Only list tasks which have any one of the listed status.",
|
||||||
|
items: {
|
||||||
|
type: TaskStateType,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -200,6 +214,9 @@ fn datastore_status(
|
|||||||
)]
|
)]
|
||||||
/// List tasks.
|
/// List tasks.
|
||||||
pub fn list_tasks(
|
pub fn list_tasks(
|
||||||
|
since: Option<i64>,
|
||||||
|
typefilter: Option<String>,
|
||||||
|
statusfilter: Option<Vec<TaskStateType>>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
@ -209,13 +226,49 @@ pub fn list_tasks(
|
|||||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
let since = since.unwrap_or_else(|| 0);
|
||||||
|
|
||||||
// TODO: replace with call that gets all task since 'since' epoch
|
let list: Vec<TaskListItem> = server::TaskListInfoIterator::new(false)?
|
||||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
.take_while(|info| {
|
||||||
.into_iter()
|
match info {
|
||||||
.map(TaskListItem::from)
|
Ok(info) => info.upid.starttime > since,
|
||||||
.filter(|entry| list_all || entry.user == userid)
|
Err(_) => false
|
||||||
.collect();
|
}
|
||||||
|
})
|
||||||
|
.filter_map(|info| {
|
||||||
|
match info {
|
||||||
|
Ok(info) => {
|
||||||
|
if list_all || info.upid.userid == userid {
|
||||||
|
if let Some(filter) = &typefilter {
|
||||||
|
if !info.upid.worker_type.contains(filter) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(filters) = &statusfilter {
|
||||||
|
if let Some(state) = &info.state {
|
||||||
|
let statetype = match state {
|
||||||
|
server::TaskState::OK { .. } => TaskStateType::OK,
|
||||||
|
server::TaskState::Unknown { .. } => TaskStateType::Unknown,
|
||||||
|
server::TaskState::Error { .. } => TaskStateType::Error,
|
||||||
|
server::TaskState::Warning { .. } => TaskStateType::Warning,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !filters.contains(&statetype) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Ok(TaskListItem::from(info)))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => Some(Err(err))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<TaskListItem>, Error>>()?;
|
||||||
|
|
||||||
Ok(list.into())
|
Ok(list.into())
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox::api::{api, schema::*};
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||||
|
|
||||||
use crate::backup::CryptMode;
|
use crate::backup::CryptMode;
|
||||||
use crate::server::UPID;
|
use crate::server::UPID;
|
||||||
@ -30,7 +30,7 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
|||||||
});
|
});
|
||||||
|
|
||||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||||
|
|
||||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||||
@ -63,9 +63,9 @@ const_regex!{
|
|||||||
|
|
||||||
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||||
|
|
||||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||||
|
|
||||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||||
|
|
||||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
@ -662,6 +662,20 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum TaskStateType {
|
||||||
|
/// Ok
|
||||||
|
OK,
|
||||||
|
/// Warning
|
||||||
|
Warning,
|
||||||
|
/// Error
|
||||||
|
Error,
|
||||||
|
/// Unknown
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
|
@ -131,13 +131,13 @@ impl std::ops::Deref for Username {
|
|||||||
|
|
||||||
impl Borrow<UsernameRef> for Username {
|
impl Borrow<UsernameRef> for Username {
|
||||||
fn borrow(&self) -> &UsernameRef {
|
fn borrow(&self) -> &UsernameRef {
|
||||||
UsernameRef::new(self.as_str())
|
UsernameRef::new(self.0.as_str())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<UsernameRef> for Username {
|
impl AsRef<UsernameRef> for Username {
|
||||||
fn as_ref(&self) -> &UsernameRef {
|
fn as_ref(&self) -> &UsernameRef {
|
||||||
UsernameRef::new(self.as_str())
|
self.borrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,13 +204,13 @@ impl std::ops::Deref for Realm {
|
|||||||
|
|
||||||
impl Borrow<RealmRef> for Realm {
|
impl Borrow<RealmRef> for Realm {
|
||||||
fn borrow(&self) -> &RealmRef {
|
fn borrow(&self) -> &RealmRef {
|
||||||
RealmRef::new(self.as_str())
|
RealmRef::new(self.0.as_str())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<RealmRef> for Realm {
|
impl AsRef<RealmRef> for Realm {
|
||||||
fn as_ref(&self) -> &RealmRef {
|
fn as_ref(&self) -> &RealmRef {
|
||||||
RealmRef::new(self.as_str())
|
self.borrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,10 +397,7 @@ impl TryFrom<String> for Userid {
|
|||||||
|
|
||||||
impl PartialEq<str> for Userid {
|
impl PartialEq<str> for Userid {
|
||||||
fn eq(&self, rhs: &str) -> bool {
|
fn eq(&self, rhs: &str) -> bool {
|
||||||
rhs.len() > self.name_len + 2 // make sure range access below is allowed
|
self.data == *rhs
|
||||||
&& rhs.starts_with(self.name().as_str())
|
|
||||||
&& rhs.as_bytes()[self.name_len] == b'@'
|
|
||||||
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ use std::process::{Command, Stdio};
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::ffi::{CString, CStr};
|
use std::ffi::{CString, CStr};
|
||||||
|
|
||||||
use base64;
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
@ -25,8 +24,7 @@ impl ProxmoxAuthenticator for PAM {
|
|||||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||||
auth.get_handler().set_credentials(username.as_str(), password);
|
auth.get_handler().set_credentials(username.as_str(), password);
|
||||||
auth.authenticate()?;
|
auth.authenticate()?;
|
||||||
return Ok(());
|
Ok(())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
@ -99,7 +97,7 @@ pub fn encrypt_pw(password: &str) -> Result<String, Error> {
|
|||||||
|
|
||||||
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
||||||
let verify = crypt(password.as_bytes(), enc_password)?;
|
let verify = crypt(password.as_bytes(), enc_password)?;
|
||||||
if &verify != enc_password {
|
if verify != enc_password {
|
||||||
bail!("invalid credentials");
|
bail!("invalid credentials");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -11,7 +11,7 @@ use crate::tools;
|
|||||||
use crate::api2::types::GarbageCollectionStatus;
|
use crate::api2::types::GarbageCollectionStatus;
|
||||||
|
|
||||||
use super::DataBlob;
|
use super::DataBlob;
|
||||||
use crate::server::WorkerTask;
|
use crate::task::TaskState;
|
||||||
|
|
||||||
/// File system based chunk store
|
/// File system based chunk store
|
||||||
pub struct ChunkStore {
|
pub struct ChunkStore {
|
||||||
@ -278,7 +278,7 @@ impl ChunkStore {
|
|||||||
oldest_writer: i64,
|
oldest_writer: i64,
|
||||||
phase1_start_time: i64,
|
phase1_start_time: i64,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &WorkerTask,
|
worker: &dyn TaskState,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
use nix::sys::stat::fstatat;
|
use nix::sys::stat::fstatat;
|
||||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||||
@ -297,10 +297,15 @@ impl ChunkStore {
|
|||||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||||
if last_percentage != percentage {
|
if last_percentage != percentage {
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
crate::task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: phase2 {}% (processed {} chunks)",
|
||||||
|
percentage,
|
||||||
|
chunk_count,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.check_abort()?;
|
||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
let (dirfd, entry) = match entry {
|
let (dirfd, entry) = match entry {
|
||||||
@ -334,12 +339,13 @@ impl ChunkStore {
|
|||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||||
Err(err) =>
|
Err(err) =>
|
||||||
worker.warn(format!(
|
crate::task_warn!(
|
||||||
|
worker,
|
||||||
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
|
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
|
||||||
filename,
|
filename,
|
||||||
self.name,
|
self.name,
|
||||||
err,
|
err,
|
||||||
)),
|
),
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
status.removed_bad += 1;
|
status.removed_bad += 1;
|
||||||
status.removed_bytes += stat.st_size as u64;
|
status.removed_bytes += stat.st_size as u64;
|
||||||
@ -351,11 +357,12 @@ impl ChunkStore {
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// some other error, warn user and keep .bad file around too
|
// some other error, warn user and keep .bad file around too
|
||||||
worker.warn(format!(
|
crate::task_warn!(
|
||||||
|
worker,
|
||||||
"error during stat on '{:?}' - {}",
|
"error during stat on '{:?}' - {}",
|
||||||
orig_filename,
|
orig_filename,
|
||||||
err,
|
err,
|
||||||
));
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if stat.st_atime < min_atime {
|
} else if stat.st_atime < min_atime {
|
||||||
|
@ -18,11 +18,12 @@ use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
|||||||
use super::index::*;
|
use super::index::*;
|
||||||
use super::{DataBlob, ArchiveType, archive_type};
|
use super::{DataBlob, ArchiveType, archive_type};
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::server::WorkerTask;
|
use crate::task::TaskState;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::tools::format::HumanByte;
|
use crate::tools::format::HumanByte;
|
||||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||||
|
use crate::server::UPID;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||||
@ -411,25 +412,34 @@ impl DataStore {
|
|||||||
index: I,
|
index: I,
|
||||||
file_name: &Path, // only used for error reporting
|
file_name: &Path, // only used for error reporting
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &WorkerTask,
|
worker: &dyn TaskState,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
status.index_file_count += 1;
|
status.index_file_count += 1;
|
||||||
status.index_data_bytes += index.index_bytes();
|
status.index_data_bytes += index.index_bytes();
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
worker.fail_on_abort()?;
|
worker.check_abort()?;
|
||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||||
worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
|
crate::task_warn!(
|
||||||
proxmox::tools::digest_to_hex(digest), file_name, err));
|
worker,
|
||||||
|
"warning: unable to access chunk {}, required by {:?} - {}",
|
||||||
|
proxmox::tools::digest_to_hex(digest),
|
||||||
|
file_name,
|
||||||
|
err,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
|
fn mark_used_chunks(
|
||||||
|
&self,
|
||||||
|
status: &mut GarbageCollectionStatus,
|
||||||
|
worker: &dyn TaskState,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let image_list = self.list_images()?;
|
let image_list = self.list_images()?;
|
||||||
|
|
||||||
@ -441,7 +451,7 @@ impl DataStore {
|
|||||||
|
|
||||||
for path in image_list {
|
for path in image_list {
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.check_abort()?;
|
||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Ok(archive_type) = archive_type(&path) {
|
if let Ok(archive_type) = archive_type(&path) {
|
||||||
@ -457,8 +467,13 @@ impl DataStore {
|
|||||||
|
|
||||||
let percentage = done*100/image_count;
|
let percentage = done*100/image_count;
|
||||||
if percentage > last_percentage {
|
if percentage > last_percentage {
|
||||||
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
crate::task_log!(
|
||||||
percentage, done, image_count));
|
worker,
|
||||||
|
"percentage done: phase1 {}% ({} of {} index files)",
|
||||||
|
percentage,
|
||||||
|
done,
|
||||||
|
image_count,
|
||||||
|
);
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -474,46 +489,72 @@ impl DataStore {
|
|||||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
|
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
||||||
|
|
||||||
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
||||||
|
|
||||||
|
// avoids that we run GC if an old daemon process has still a
|
||||||
|
// running backup writer, which is not save as we have no "oldest
|
||||||
|
// writer" information and thus no safe atime cutoff
|
||||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||||
|
|
||||||
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
let phase1_start_time = proxmox::tools::time::epoch_i64();
|
||||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||||
|
|
||||||
let mut gc_status = GarbageCollectionStatus::default();
|
let mut gc_status = GarbageCollectionStatus::default();
|
||||||
gc_status.upid = Some(worker.to_string());
|
gc_status.upid = Some(upid.to_string());
|
||||||
|
|
||||||
worker.log("Start GC phase1 (mark used chunks)");
|
crate::task_log!(worker, "Start GC phase1 (mark used chunks)");
|
||||||
|
|
||||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
self.mark_used_chunks(&mut gc_status, worker)?;
|
||||||
|
|
||||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)");
|
||||||
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
self.chunk_store.sweep_unused_chunks(
|
||||||
|
oldest_writer,
|
||||||
|
phase1_start_time,
|
||||||
|
&mut gc_status,
|
||||||
|
worker,
|
||||||
|
)?;
|
||||||
|
|
||||||
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
crate::task_log!(
|
||||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
worker,
|
||||||
|
"Removed garbage: {}",
|
||||||
|
HumanByte::from(gc_status.removed_bytes),
|
||||||
|
);
|
||||||
|
crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
|
||||||
if gc_status.pending_bytes > 0 {
|
if gc_status.pending_bytes > 0 {
|
||||||
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
crate::task_log!(
|
||||||
|
worker,
|
||||||
|
"Pending removals: {} (in {} chunks)",
|
||||||
|
HumanByte::from(gc_status.pending_bytes),
|
||||||
|
gc_status.pending_chunks,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if gc_status.removed_bad > 0 {
|
if gc_status.removed_bad > 0 {
|
||||||
worker.log(&format!("Removed bad files: {}", gc_status.removed_bad));
|
crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
crate::task_log!(
|
||||||
|
worker,
|
||||||
|
"Original data usage: {}",
|
||||||
|
HumanByte::from(gc_status.index_data_bytes),
|
||||||
|
);
|
||||||
|
|
||||||
if gc_status.index_data_bytes > 0 {
|
if gc_status.index_data_bytes > 0 {
|
||||||
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||||
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
crate::task_log!(
|
||||||
|
worker,
|
||||||
|
"On-Disk usage: {} ({:.2}%)",
|
||||||
|
HumanByte::from(gc_status.disk_bytes),
|
||||||
|
comp_per,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
||||||
|
|
||||||
if gc_status.disk_chunks > 0 {
|
if gc_status.disk_chunks > 0 {
|
||||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||||
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||||
}
|
}
|
||||||
|
|
||||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||||
|
@ -90,12 +90,6 @@ impl DynamicIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
||||||
if let Err(err) =
|
|
||||||
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockSharedNonblock)
|
|
||||||
{
|
|
||||||
bail!("unable to get shared lock - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||||
file.seek(SeekFrom::Start(0))?;
|
file.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
|
@ -65,12 +65,6 @@ impl FixedIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
||||||
if let Err(err) =
|
|
||||||
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockSharedNonblock)
|
|
||||||
{
|
|
||||||
bail!("unable to get shared lock - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
file.seek(SeekFrom::Start(0))?;
|
file.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
let header_size = std::mem::size_of::<FixedIndexHeader>();
|
let header_size = std::mem::size_of::<FixedIndexHeader>();
|
||||||
|
@ -5,13 +5,24 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::{
|
||||||
use crate::api2::types::*;
|
api2::types::*,
|
||||||
|
backup::{
|
||||||
use super::{
|
DataStore,
|
||||||
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
DataBlob,
|
||||||
CryptMode,
|
BackupGroup,
|
||||||
FileInfo, ArchiveType, archive_type,
|
BackupDir,
|
||||||
|
BackupInfo,
|
||||||
|
IndexFile,
|
||||||
|
CryptMode,
|
||||||
|
FileInfo,
|
||||||
|
ArchiveType,
|
||||||
|
archive_type,
|
||||||
|
},
|
||||||
|
server::UPID,
|
||||||
|
task::TaskState,
|
||||||
|
task_log,
|
||||||
|
tools::ParallelHandler,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
@ -42,7 +53,7 @@ fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInf
|
|||||||
fn rename_corrupted_chunk(
|
fn rename_corrupted_chunk(
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
digest: &[u8;32],
|
digest: &[u8;32],
|
||||||
worker: Arc<WorkerTask>,
|
worker: &dyn TaskState,
|
||||||
) {
|
) {
|
||||||
let (path, digest_str) = datastore.chunk_path(digest);
|
let (path, digest_str) = datastore.chunk_path(digest);
|
||||||
|
|
||||||
@ -55,133 +66,112 @@ fn rename_corrupted_chunk(
|
|||||||
|
|
||||||
match std::fs::rename(&path, &new_path) {
|
match std::fs::rename(&path, &new_path) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
worker.log(format!("corrupted chunk renamed to {:?}", &new_path));
|
task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
match err.kind() {
|
match err.kind() {
|
||||||
std::io::ErrorKind::NotFound => { /* ignored */ },
|
std::io::ErrorKind::NotFound => { /* ignored */ },
|
||||||
_ => worker.log(format!("could not rename corrupted chunk {:?} - {}", &path, err))
|
_ => task_log!(worker, "could not rename corrupted chunk {:?} - {}", &path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// We use a separate thread to read/load chunks, so that we can do
|
|
||||||
// load and verify in parallel to increase performance.
|
|
||||||
fn chunk_reader_thread(
|
|
||||||
datastore: Arc<DataStore>,
|
|
||||||
index: Box<dyn IndexFile + Send>,
|
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
|
||||||
errors: Arc<AtomicUsize>,
|
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
|
||||||
|
|
||||||
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
|
||||||
|
|
||||||
std::thread::spawn(move|| {
|
|
||||||
for pos in 0..index.index_count() {
|
|
||||||
let info = index.chunk_info(pos).unwrap();
|
|
||||||
let size = info.range.end - info.range.start;
|
|
||||||
|
|
||||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
|
||||||
continue; // already verified
|
|
||||||
}
|
|
||||||
|
|
||||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
|
||||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
|
||||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
match datastore.load_chunk(&info.digest) {
|
|
||||||
Err(err) => {
|
|
||||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
|
||||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
rename_corrupted_chunk(datastore.clone(), &info.digest, worker.clone());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Ok(chunk) => {
|
|
||||||
if sender.send((chunk, info.digest, size)).is_err() {
|
|
||||||
break; // receiver gone - simply stop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
receiver
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_index_chunks(
|
fn verify_index_chunks(
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
index: Box<dyn IndexFile + Send>,
|
index: Box<dyn IndexFile + Send>,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
crypt_mode: CryptMode,
|
crypt_mode: CryptMode,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let errors = Arc::new(AtomicUsize::new(0));
|
let errors = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
let chunk_channel = chunk_reader_thread(
|
|
||||||
datastore.clone(),
|
|
||||||
index,
|
|
||||||
verified_chunks.clone(),
|
|
||||||
corrupt_chunks.clone(),
|
|
||||||
errors.clone(),
|
|
||||||
worker.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut read_bytes = 0;
|
let mut read_bytes = 0;
|
||||||
let mut decoded_bytes = 0;
|
let mut decoded_bytes = 0;
|
||||||
|
|
||||||
loop {
|
let worker2 = Arc::clone(&worker);
|
||||||
|
let datastore2 = Arc::clone(&datastore);
|
||||||
|
let corrupt_chunks2 = Arc::clone(&corrupt_chunks);
|
||||||
|
let verified_chunks2 = Arc::clone(&verified_chunks);
|
||||||
|
let errors2 = Arc::clone(&errors);
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
let decoder_pool = ParallelHandler::new(
|
||||||
|
"verify chunk decoder", 4,
|
||||||
|
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
||||||
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
|
Err(err) => {
|
||||||
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
|
task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
|
||||||
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
Ok(mode) => mode,
|
||||||
|
};
|
||||||
|
|
||||||
|
if chunk_crypt_mode != crypt_mode {
|
||||||
|
task_log!(
|
||||||
|
worker2,
|
||||||
|
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||||
|
chunk_crypt_mode,
|
||||||
|
crypt_mode
|
||||||
|
);
|
||||||
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||||
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
|
task_log!(worker2, "{}", err);
|
||||||
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
|
rename_corrupted_chunk(datastore2.clone(), &digest, &worker2);
|
||||||
|
} else {
|
||||||
|
verified_chunks2.lock().unwrap().insert(digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
crate::tools::fail_on_shutdown()?;
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
let (chunk, digest, size) = match chunk_channel.recv() {
|
let info = index.chunk_info(pos).unwrap();
|
||||||
Ok(tuple) => tuple,
|
let size = info.size();
|
||||||
Err(std::sync::mpsc::RecvError) => break,
|
|
||||||
};
|
|
||||||
|
|
||||||
read_bytes += chunk.raw_size();
|
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||||
decoded_bytes += size;
|
continue; // already verified
|
||||||
|
|
||||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
|
||||||
Err(err) => {
|
|
||||||
corrupt_chunks.lock().unwrap().insert(digest);
|
|
||||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
Ok(mode) => mode,
|
|
||||||
};
|
|
||||||
|
|
||||||
if chunk_crypt_mode != crypt_mode {
|
|
||||||
worker.log(format!(
|
|
||||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
|
||||||
chunk_crypt_mode,
|
|
||||||
crypt_mode
|
|
||||||
));
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||||
corrupt_chunks.lock().unwrap().insert(digest);
|
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||||
worker.log(format!("{}", err));
|
task_log!(worker, "chunk {} was marked as corrupt", digest_str);
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
rename_corrupted_chunk(datastore.clone(), &digest, worker.clone());
|
continue;
|
||||||
} else {
|
}
|
||||||
verified_chunks.lock().unwrap().insert(digest);
|
|
||||||
|
match datastore.load_chunk(&info.digest) {
|
||||||
|
Err(err) => {
|
||||||
|
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
|
task_log!(worker, "can't verify chunk, load failed - {}", err);
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
rename_corrupted_chunk(datastore.clone(), &info.digest, &worker);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Ok(chunk) => {
|
||||||
|
read_bytes += chunk.raw_size();
|
||||||
|
decoder_pool.send((chunk, info.digest, size))?;
|
||||||
|
decoded_bytes += size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decoder_pool.complete()?;
|
||||||
|
|
||||||
let elapsed = start_time.elapsed().as_secs_f64();
|
let elapsed = start_time.elapsed().as_secs_f64();
|
||||||
|
|
||||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
||||||
@ -192,8 +182,16 @@ fn verify_index_chunks(
|
|||||||
|
|
||||||
let error_count = errors.load(Ordering::SeqCst);
|
let error_count = errors.load(Ordering::SeqCst);
|
||||||
|
|
||||||
worker.log(format!(" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
task_log!(
|
||||||
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
worker,
|
||||||
|
" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||||
|
read_bytes_mib,
|
||||||
|
decoded_bytes_mib,
|
||||||
|
elapsed,
|
||||||
|
read_speed,
|
||||||
|
decode_speed,
|
||||||
|
error_count,
|
||||||
|
);
|
||||||
|
|
||||||
if errors.load(Ordering::SeqCst) > 0 {
|
if errors.load(Ordering::SeqCst) > 0 {
|
||||||
bail!("chunks could not be verified");
|
bail!("chunks could not be verified");
|
||||||
@ -208,7 +206,7 @@ fn verify_fixed_index(
|
|||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
@ -225,7 +223,14 @@ fn verify_fixed_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
verify_index_chunks(
|
||||||
|
datastore,
|
||||||
|
Box::new(index),
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
info.chunk_crypt_mode(),
|
||||||
|
worker,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_dynamic_index(
|
fn verify_dynamic_index(
|
||||||
@ -234,7 +239,7 @@ fn verify_dynamic_index(
|
|||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
@ -251,7 +256,14 @@ fn verify_dynamic_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
verify_index_chunks(
|
||||||
|
datastore,
|
||||||
|
Box::new(index),
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
info.chunk_crypt_mode(),
|
||||||
|
worker,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a single backup snapshot
|
/// Verify a single backup snapshot
|
||||||
@ -268,25 +280,32 @@ pub fn verify_backup_dir(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: Arc<WorkerTask>
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
|
upid: UPID,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"verify {}:{} - manifest load error: {}",
|
||||||
|
datastore.name(),
|
||||||
|
backup_dir,
|
||||||
|
err,
|
||||||
|
);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
|
||||||
|
|
||||||
let mut error_count = 0;
|
let mut error_count = 0;
|
||||||
|
|
||||||
let mut verify_result = VerifyState::Ok;
|
let mut verify_result = VerifyState::Ok;
|
||||||
for info in manifest.files() {
|
for info in manifest.files() {
|
||||||
let result = proxmox::try_block!({
|
let result = proxmox::try_block!({
|
||||||
worker.log(format!(" check {}", info.filename));
|
task_log!(worker, " check {}", info.filename);
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex =>
|
ArchiveType::FixedIndex =>
|
||||||
verify_fixed_index(
|
verify_fixed_index(
|
||||||
@ -310,11 +329,18 @@ pub fn verify_backup_dir(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.check_abort()?;
|
||||||
crate::tools::fail_on_shutdown()?;
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"verify {}:{}/{} failed: {}",
|
||||||
|
datastore.name(),
|
||||||
|
backup_dir,
|
||||||
|
info.filename,
|
||||||
|
err,
|
||||||
|
);
|
||||||
error_count += 1;
|
error_count += 1;
|
||||||
verify_result = VerifyState::Failed;
|
verify_result = VerifyState::Failed;
|
||||||
}
|
}
|
||||||
@ -323,7 +349,7 @@ pub fn verify_backup_dir(
|
|||||||
|
|
||||||
let verify_state = SnapshotVerifyState {
|
let verify_state = SnapshotVerifyState {
|
||||||
state: verify_result,
|
state: verify_result,
|
||||||
upid: worker.upid().clone(),
|
upid,
|
||||||
};
|
};
|
||||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||||
@ -345,19 +371,26 @@ pub fn verify_backup_group(
|
|||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
|
upid: &UPID,
|
||||||
) -> Result<(usize, Vec<String>), Error> {
|
) -> Result<(usize, Vec<String>), Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"verify group {}:{} - unable to list backups: {}",
|
||||||
|
datastore.name(),
|
||||||
|
group,
|
||||||
|
err,
|
||||||
|
);
|
||||||
return Ok((0, errors));
|
return Ok((0, errors));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
task_log!(worker, "verify group {}:{}", datastore.name(), group);
|
||||||
|
|
||||||
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||||
|
|
||||||
@ -365,13 +398,26 @@ pub fn verify_backup_group(
|
|||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for info in list {
|
for info in list {
|
||||||
count += 1;
|
count += 1;
|
||||||
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
if !verify_backup_dir(
|
||||||
|
datastore.clone(),
|
||||||
|
&info.backup_dir,
|
||||||
|
verified_chunks.clone(),
|
||||||
|
corrupt_chunks.clone(),
|
||||||
|
worker.clone(),
|
||||||
|
upid.clone(),
|
||||||
|
)? {
|
||||||
errors.push(info.backup_dir.to_string());
|
errors.push(info.backup_dir.to_string());
|
||||||
}
|
}
|
||||||
if snapshot_count != 0 {
|
if snapshot_count != 0 {
|
||||||
let pos = done + count;
|
let pos = done + count;
|
||||||
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||||
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: {:.2}% ({} of {} snapshots)",
|
||||||
|
percentage,
|
||||||
|
pos,
|
||||||
|
snapshot_count,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,8 +431,11 @@ pub fn verify_backup_group(
|
|||||||
/// Returns
|
/// Returns
|
||||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
pub fn verify_all_backups(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
|
upid: &UPID,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||||
@ -395,7 +444,12 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
|||||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"verify datastore {} - unable to list backups: {}",
|
||||||
|
datastore.name(),
|
||||||
|
err,
|
||||||
|
);
|
||||||
return Ok(errors);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -413,7 +467,7 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
|||||||
// start with 64 chunks since we assume there are few corrupt ones
|
// start with 64 chunks since we assume there are few corrupt ones
|
||||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
|
||||||
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
task_log!(worker, "verify datastore {} ({} snapshots)", datastore.name(), snapshot_count);
|
||||||
|
|
||||||
let mut done = 0;
|
let mut done = 0;
|
||||||
for group in list {
|
for group in list {
|
||||||
@ -424,6 +478,7 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
|||||||
corrupt_chunks.clone(),
|
corrupt_chunks.clone(),
|
||||||
Some((done, snapshot_count)),
|
Some((done, snapshot_count)),
|
||||||
worker.clone(),
|
worker.clone(),
|
||||||
|
upid,
|
||||||
)?;
|
)?;
|
||||||
errors.append(&mut group_errors);
|
errors.append(&mut group_errors);
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@ use proxmox_backup::api2::types::*;
|
|||||||
use proxmox_backup::api2::version;
|
use proxmox_backup::api2::version;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::pxar::catalog::*;
|
use proxmox_backup::pxar::catalog::*;
|
||||||
|
use proxmox_backup::config::user::complete_user_name;
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
archive_type,
|
archive_type,
|
||||||
decrypt_key,
|
decrypt_key,
|
||||||
@ -192,7 +193,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error> {
|
||||||
|
|
||||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||||
|
|
||||||
@ -211,7 +212,7 @@ fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
|||||||
.fingerprint_cache(true)
|
.fingerprint_cache(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
HttpClient::new(server, userid, options)
|
HttpClient::new(server, port, userid, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn view_task_result(
|
async fn view_task_result(
|
||||||
@ -365,7 +366,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
||||||
|
|
||||||
@ -412,6 +413,45 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
group: {
|
||||||
|
type: String,
|
||||||
|
description: "Backup group.",
|
||||||
|
},
|
||||||
|
"new-owner": {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Change owner of a backup group
|
||||||
|
async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
|
param.as_object_mut().unwrap().remove("repository");
|
||||||
|
|
||||||
|
let group: BackupGroup = group.parse()?;
|
||||||
|
|
||||||
|
param["backup-type"] = group.backup_type().into();
|
||||||
|
param["backup-id"] = group.backup_id().into();
|
||||||
|
|
||||||
|
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
||||||
|
client.post(&path, Some(param)).await?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -438,7 +478,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||||
Some(path.parse()?)
|
Some(path.parse()?)
|
||||||
@ -503,7 +543,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot: BackupDir = path.parse()?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
||||||
|
|
||||||
@ -533,7 +573,7 @@ async fn api_login(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
client.login().await?;
|
client.login().await?;
|
||||||
|
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
@ -590,7 +630,7 @@ async fn api_version(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let repo = extract_repository_from_value(¶m);
|
let repo = extract_repository_from_value(¶m);
|
||||||
if let Ok(repo) = repo {
|
if let Ok(repo) = repo {
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
match client.get("api2/json/version", None).await {
|
match client.get("api2/json/version", None).await {
|
||||||
Ok(mut result) => version_info["server"] = result["data"].take(),
|
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||||
@ -640,7 +680,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
||||||
|
|
||||||
@ -684,7 +724,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
||||||
|
|
||||||
@ -996,7 +1036,7 @@ async fn create_backup(
|
|||||||
|
|
||||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
||||||
@ -1299,7 +1339,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
@ -1472,7 +1512,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot: BackupDir = snapshot.parse()?;
|
let snapshot: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
@ -1543,7 +1583,7 @@ fn prune<'a>(
|
|||||||
async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||||
|
|
||||||
@ -1626,7 +1666,7 @@ async fn status(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
||||||
|
|
||||||
@ -1671,7 +1711,7 @@ async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
|||||||
.fingerprint_cache(true)
|
.fingerprint_cache(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
let client = match HttpClient::new(repo.host(), repo.user(), options) {
|
let client = match HttpClient::new(repo.host(), repo.port(), repo.user(), options) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
_ => return Value::Null,
|
_ => return Value::Null,
|
||||||
};
|
};
|
||||||
@ -1817,17 +1857,29 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
|||||||
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| tools::format::strip_server_file_expenstion(&v))
|
.map(|v| tools::format::strip_server_file_extension(&v))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|v| {
|
.filter_map(|name| {
|
||||||
let name = tools::format::strip_server_file_expenstion(&v);
|
if name.ends_with(".pxar.didx") {
|
||||||
if name.ends_with(".pxar") {
|
Some(tools::format::strip_server_file_extension(name))
|
||||||
Some(name)
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
complete_server_file_name(arg, param)
|
||||||
|
.iter()
|
||||||
|
.filter_map(|name| {
|
||||||
|
if name.ends_with(".img.fidx") {
|
||||||
|
Some(tools::format::strip_server_file_extension(name))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -1955,6 +2007,12 @@ fn main() {
|
|||||||
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
||||||
.completion_cb("repository", complete_repository);
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
|
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
||||||
|
.arg_param(&["group", "new-owner"])
|
||||||
|
.completion_cb("group", complete_backup_group)
|
||||||
|
.completion_cb("new-owner", complete_user_name)
|
||||||
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("backup", backup_cmd_def)
|
.insert("backup", backup_cmd_def)
|
||||||
.insert("upload-log", upload_log_cmd_def)
|
.insert("upload-log", upload_log_cmd_def)
|
||||||
@ -1970,10 +2028,13 @@ fn main() {
|
|||||||
.insert("status", status_cmd_def)
|
.insert("status", status_cmd_def)
|
||||||
.insert("key", key::cli())
|
.insert("key", key::cli())
|
||||||
.insert("mount", mount_cmd_def())
|
.insert("mount", mount_cmd_def())
|
||||||
|
.insert("map", map_cmd_def())
|
||||||
|
.insert("unmap", unmap_cmd_def())
|
||||||
.insert("catalog", catalog_mgmt_cli())
|
.insert("catalog", catalog_mgmt_cli())
|
||||||
.insert("task", task_mgmt_cli())
|
.insert("task", task_mgmt_cli())
|
||||||
.insert("version", version_cmd_def)
|
.insert("version", version_cmd_def)
|
||||||
.insert("benchmark", benchmark_cmd_def);
|
.insert("benchmark", benchmark_cmd_def)
|
||||||
|
.insert("change-owner", change_owner_cmd_def);
|
||||||
|
|
||||||
let rpcenv = CliEnvironment::new();
|
let rpcenv = CliEnvironment::new();
|
||||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||||
|
@ -62,10 +62,10 @@ fn connect() -> Result<HttpClient, Error> {
|
|||||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||||
.sign(private_auth_key(), None)?;
|
.sign(private_auth_key(), None)?;
|
||||||
options = options.password(Some(ticket));
|
options = options.password(Some(ticket));
|
||||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||||
} else {
|
} else {
|
||||||
options = options.ticket_cache(true).interactive(true);
|
options = options.ticket_cache(true).interactive(true);
|
||||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(client)
|
Ok(client)
|
||||||
@ -410,6 +410,7 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
|
|||||||
|
|
||||||
let client = HttpClient::new(
|
let client = HttpClient::new(
|
||||||
&remote.host,
|
&remote.host,
|
||||||
|
remote.port.unwrap_or(8007),
|
||||||
&remote.userid,
|
&remote.userid,
|
||||||
options,
|
options,
|
||||||
)?;
|
)?;
|
||||||
|
@ -69,7 +69,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
let key_path = configdir!("/proxy.key");
|
let key_path = configdir!("/proxy.key");
|
||||||
let cert_path = configdir!("/proxy.pem");
|
let cert_path = configdir!("/proxy.pem");
|
||||||
|
|
||||||
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
|
||||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
||||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||||
acceptor.set_certificate_chain_file(cert_path)
|
acceptor.set_certificate_chain_file(cert_path)
|
||||||
@ -198,6 +198,7 @@ async fn schedule_tasks() -> Result<(), Error> {
|
|||||||
schedule_datastore_prune().await;
|
schedule_datastore_prune().await;
|
||||||
schedule_datastore_verification().await;
|
schedule_datastore_verification().await;
|
||||||
schedule_datastore_sync_jobs().await;
|
schedule_datastore_sync_jobs().await;
|
||||||
|
schedule_task_log_rotate().await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -305,7 +306,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
worker.log(format!("starting garbage collection on store {}", store));
|
worker.log(format!("starting garbage collection on store {}", store));
|
||||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||||
|
|
||||||
let result = datastore.garbage_collection(&worker);
|
let result = datastore.garbage_collection(&*worker, worker.upid());
|
||||||
|
|
||||||
let status = worker.create_state(&result);
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
@ -556,7 +557,8 @@ async fn schedule_datastore_verification() {
|
|||||||
worker.log(format!("starting verification on store {}", store2));
|
worker.log(format!("starting verification on store {}", store2));
|
||||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||||
let result = try_block!({
|
let result = try_block!({
|
||||||
let failed_dirs = verify_all_backups(datastore, worker.clone())?;
|
let failed_dirs =
|
||||||
|
verify_all_backups(datastore, worker.clone(), worker.upid())?;
|
||||||
if failed_dirs.len() > 0 {
|
if failed_dirs.len() > 0 {
|
||||||
worker.log("Failed to verify following snapshots:");
|
worker.log("Failed to verify following snapshots:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
@ -655,6 +657,101 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn schedule_task_log_rotate() {
|
||||||
|
use proxmox_backup::{
|
||||||
|
config::jobstate::{self, Job},
|
||||||
|
server::rotate_task_log_archive,
|
||||||
|
};
|
||||||
|
use proxmox_backup::server::WorkerTask;
|
||||||
|
use proxmox_backup::tools::systemd::time::{
|
||||||
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
|
let worker_type = "logrotate";
|
||||||
|
let job_id = "task-archive";
|
||||||
|
|
||||||
|
let last = match jobstate::last_run_time(worker_type, job_id) {
|
||||||
|
Ok(time) => time,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("could not get last run time of task log archive rotation: {}", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// schedule daily at 00:00 like normal logrotate
|
||||||
|
let schedule = "00:00";
|
||||||
|
|
||||||
|
let event = match parse_calendar_event(schedule) {
|
||||||
|
Ok(event) => event,
|
||||||
|
Err(err) => {
|
||||||
|
// should not happen?
|
||||||
|
eprintln!("unable to parse schedule '{}' - {}", schedule, err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let next = match compute_next_event(&event, last, false) {
|
||||||
|
Ok(Some(next)) => next,
|
||||||
|
Ok(None) => return,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
|
if next > now {
|
||||||
|
// if we never ran the rotation, schedule instantly
|
||||||
|
match jobstate::JobState::load(worker_type, job_id) {
|
||||||
|
Ok(state) => match state {
|
||||||
|
jobstate::JobState::Created { .. } => {},
|
||||||
|
_ => return,
|
||||||
|
},
|
||||||
|
_ => return,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut job = match Job::new(worker_type, job_id) {
|
||||||
|
Ok(job) => job,
|
||||||
|
Err(_) => return, // could not get lock
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
|
worker_type,
|
||||||
|
Some(job_id.to_string()),
|
||||||
|
Userid::backup_userid().clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
job.start(&worker.upid().to_string())?;
|
||||||
|
worker.log(format!("starting task log rotation"));
|
||||||
|
// one entry has normally about ~100-150 bytes
|
||||||
|
let max_size = 500000; // at least 5000 entries
|
||||||
|
let max_files = 20; // at least 100000 entries
|
||||||
|
let result = try_block!({
|
||||||
|
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||||
|
if has_rotated {
|
||||||
|
worker.log(format!("task log archive was rotated"));
|
||||||
|
} else {
|
||||||
|
worker.log(format!("task log archive was not rotated"));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
|
if let Err(err) = job.finish(status) {
|
||||||
|
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
},
|
||||||
|
) {
|
||||||
|
eprintln!("unable to start task log rotation: {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
async fn run_stat_generator() {
|
async fn run_stat_generator() {
|
||||||
|
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
@ -21,7 +21,6 @@ use proxmox_backup::backup::{
|
|||||||
load_and_decrypt_key,
|
load_and_decrypt_key,
|
||||||
CryptConfig,
|
CryptConfig,
|
||||||
KeyDerivationConfig,
|
KeyDerivationConfig,
|
||||||
DataBlob,
|
|
||||||
DataChunkBuilder,
|
DataChunkBuilder,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -87,7 +86,7 @@ struct BenchmarkResult {
|
|||||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||||
tls: Speed {
|
tls: Speed {
|
||||||
speed: None,
|
speed: None,
|
||||||
top: 1_000_000.0 * 690.0, // TLS to localhost, AMD Ryzen 7 2700X
|
top: 1_000_000.0 * 1235.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||||
},
|
},
|
||||||
sha256: Speed {
|
sha256: Speed {
|
||||||
speed: None,
|
speed: None,
|
||||||
@ -226,7 +225,7 @@ async fn test_upload_speed(
|
|||||||
|
|
||||||
let backup_time = proxmox::tools::time::epoch_i64();
|
let backup_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
if verbose { eprintln!("Connecting to backup server"); }
|
if verbose { eprintln!("Connecting to backup server"); }
|
||||||
|
@ -79,7 +79,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let client = BackupReader::start(
|
let client = BackupReader::start(
|
||||||
client,
|
client,
|
||||||
@ -153,7 +153,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
/// Shell to interactively inspect and restore snapshots.
|
/// Shell to interactively inspect and restore snapshots.
|
||||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::process::{Stdio, Command};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -13,6 +15,17 @@ use proxmox_backup::backup::{
|
|||||||
};
|
};
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Paperkey output format
|
||||||
|
pub enum PaperkeyFormat {
|
||||||
|
/// Format as Utf8 text. Includes QR codes as ascii-art.
|
||||||
|
Text,
|
||||||
|
/// Format as Html. Includes QR codes as png images.
|
||||||
|
Html,
|
||||||
|
}
|
||||||
|
|
||||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||||
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||||
|
|
||||||
@ -261,6 +274,55 @@ fn create_master_key() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
path: {
|
||||||
|
description: "Key file. Without this the default key's will be used.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
subject: {
|
||||||
|
description: "Include the specified subject as titel text.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
type: PaperkeyFormat,
|
||||||
|
description: "Output format. Text or Html.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Generate a printable, human readable text file containing the encryption key.
|
||||||
|
///
|
||||||
|
/// This also includes a scanable QR code for fast key restore.
|
||||||
|
fn paper_key(
|
||||||
|
path: Option<String>,
|
||||||
|
subject: Option<String>,
|
||||||
|
output_format: Option<PaperkeyFormat>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => {
|
||||||
|
let path = find_default_encryption_key()?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format_err!("no encryption file provided and no default file found")
|
||||||
|
})?;
|
||||||
|
path
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let data = file_get_contents(&path)?;
|
||||||
|
let data = std::str::from_utf8(&data)?;
|
||||||
|
|
||||||
|
let format = output_format.unwrap_or(PaperkeyFormat::Html);
|
||||||
|
|
||||||
|
match format {
|
||||||
|
PaperkeyFormat::Html => paperkey_html(data, subject),
|
||||||
|
PaperkeyFormat::Text => paperkey_text(data, subject),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn cli() -> CliCommandMap {
|
pub fn cli() -> CliCommandMap {
|
||||||
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||||
.arg_param(&["path"])
|
.arg_param(&["path"])
|
||||||
@ -275,9 +337,214 @@ pub fn cli() -> CliCommandMap {
|
|||||||
.arg_param(&["path"])
|
.arg_param(&["path"])
|
||||||
.completion_cb("path", tools::complete_file_name);
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
CliCommandMap::new()
|
CliCommandMap::new()
|
||||||
.insert("create", key_create_cmd_def)
|
.insert("create", key_create_cmd_def)
|
||||||
.insert("create-master-key", key_create_master_key_cmd_def)
|
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||||
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
|
.insert("paperkey", paper_key_cmd_def)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let img_size_pt = 500;
|
||||||
|
|
||||||
|
println!("<!DOCTYPE html>");
|
||||||
|
println!("<html lang=\"en\">");
|
||||||
|
println!("<head>");
|
||||||
|
println!("<meta charset=\"utf-8\">");
|
||||||
|
println!("<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">");
|
||||||
|
println!("<title>Proxmox Backup Paperkey</title>");
|
||||||
|
println!("<style type=\"text/css\">");
|
||||||
|
|
||||||
|
println!(" p {{");
|
||||||
|
println!(" font-size: 12pt;");
|
||||||
|
println!(" font-family: monospace;");
|
||||||
|
println!(" white-space: pre-wrap;");
|
||||||
|
println!(" line-break: anywhere;");
|
||||||
|
println!(" }}");
|
||||||
|
|
||||||
|
println!("</style>");
|
||||||
|
|
||||||
|
println!("</head>");
|
||||||
|
|
||||||
|
println!("<body>");
|
||||||
|
|
||||||
|
if let Some(subject) = subject {
|
||||||
|
println!("<p>Subject: {}</p>", subject);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
|
||||||
|
let lines: Vec<String> = data.lines()
|
||||||
|
.map(|s| s.trim_end())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.map(String::from)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
|
||||||
|
bail!("unexpected key format");
|
||||||
|
}
|
||||||
|
|
||||||
|
if lines.len() < 20 {
|
||||||
|
bail!("unexpected key format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const BLOCK_SIZE: usize = 20;
|
||||||
|
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
|
||||||
|
|
||||||
|
for i in 0..blocks {
|
||||||
|
let start = i*BLOCK_SIZE;
|
||||||
|
let mut end = start + BLOCK_SIZE;
|
||||||
|
if end > lines.len() {
|
||||||
|
end = lines.len();
|
||||||
|
}
|
||||||
|
let data = &lines[start..end];
|
||||||
|
|
||||||
|
println!("<div style=\"page-break-inside: avoid;page-break-after: always\">");
|
||||||
|
println!("<p>");
|
||||||
|
|
||||||
|
for l in start..end {
|
||||||
|
println!("{:02}: {}", l, lines[l]);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("</p>");
|
||||||
|
|
||||||
|
let data = data.join("\n");
|
||||||
|
let qr_code = generate_qr_code("png", data.as_bytes())?;
|
||||||
|
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||||
|
|
||||||
|
println!("<center>");
|
||||||
|
println!("<img");
|
||||||
|
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||||
|
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||||
|
println!("</center>");
|
||||||
|
println!("</div>");
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("</body>");
|
||||||
|
println!("</html>");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||||
|
let key_text = serde_json::to_string_pretty(&key_config)?;
|
||||||
|
|
||||||
|
println!("<div style=\"page-break-inside: avoid\">");
|
||||||
|
|
||||||
|
println!("<p>");
|
||||||
|
|
||||||
|
println!("-----BEGIN PROXMOX BACKUP KEY-----");
|
||||||
|
|
||||||
|
for line in key_text.lines() {
|
||||||
|
println!("{}", line);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("-----END PROXMOX BACKUP KEY-----");
|
||||||
|
|
||||||
|
println!("</p>");
|
||||||
|
|
||||||
|
let qr_code = generate_qr_code("png", key_text.as_bytes())?;
|
||||||
|
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||||
|
|
||||||
|
println!("<center>");
|
||||||
|
println!("<img");
|
||||||
|
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||||
|
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||||
|
println!("</center>");
|
||||||
|
|
||||||
|
println!("</div>");
|
||||||
|
|
||||||
|
println!("</body>");
|
||||||
|
println!("</html>");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
if let Some(subject) = subject {
|
||||||
|
println!("Subject: {}\n", subject);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
|
||||||
|
let lines: Vec<String> = data.lines()
|
||||||
|
.map(|s| s.trim_end())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.map(String::from)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
|
||||||
|
bail!("unexpected key format");
|
||||||
|
}
|
||||||
|
|
||||||
|
if lines.len() < 20 {
|
||||||
|
bail!("unexpected key format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const BLOCK_SIZE: usize = 5;
|
||||||
|
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
|
||||||
|
|
||||||
|
for i in 0..blocks {
|
||||||
|
let start = i*BLOCK_SIZE;
|
||||||
|
let mut end = start + BLOCK_SIZE;
|
||||||
|
if end > lines.len() {
|
||||||
|
end = lines.len();
|
||||||
|
}
|
||||||
|
let data = &lines[start..end];
|
||||||
|
|
||||||
|
for l in start..end {
|
||||||
|
println!("{:-2}: {}", l, lines[l]);
|
||||||
|
}
|
||||||
|
let data = data.join("\n");
|
||||||
|
let qr_code = generate_qr_code("utf8i", data.as_bytes())?;
|
||||||
|
let qr_code = String::from_utf8(qr_code)
|
||||||
|
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||||
|
println!("{}", qr_code);
|
||||||
|
println!("{}", char::from(12u8)); // page break
|
||||||
|
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||||
|
let key_text = serde_json::to_string_pretty(&key_config)?;
|
||||||
|
|
||||||
|
println!("-----BEGIN PROXMOX BACKUP KEY-----");
|
||||||
|
println!("{}", key_text);
|
||||||
|
println!("-----END PROXMOX BACKUP KEY-----");
|
||||||
|
|
||||||
|
let qr_code = generate_qr_code("utf8i", key_text.as_bytes())?;
|
||||||
|
let qr_code = String::from_utf8(qr_code)
|
||||||
|
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||||
|
|
||||||
|
println!("{}", qr_code);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_qr_code(output_type: &str, data: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
|
let mut child = Command::new("qrencode")
|
||||||
|
.args(&["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let stdin = child.stdin.as_mut()
|
||||||
|
.ok_or_else(|| format_err!("Failed to open stdin"))?;
|
||||||
|
stdin.write_all(data)
|
||||||
|
.map_err(|_| format_err!("Failed to write to stdin"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let output = child.wait_with_output()
|
||||||
|
.map_err(|_| format_err!("Failed to read stdout"))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)?;
|
||||||
|
|
||||||
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@ use std::sync::Arc;
|
|||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::RawFd;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::hash::BuildHasher;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -10,6 +12,7 @@ use tokio::signal::unix::{signal, SignalKind};
|
|||||||
use nix::unistd::{fork, ForkResult, pipe};
|
use nix::unistd::{fork, ForkResult, pipe};
|
||||||
use futures::select;
|
use futures::select;
|
||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
|
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::{sortable, identity};
|
||||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||||
@ -23,6 +26,7 @@ use proxmox_backup::backup::{
|
|||||||
BackupDir,
|
BackupDir,
|
||||||
BackupGroup,
|
BackupGroup,
|
||||||
BufferedDynamicReader,
|
BufferedDynamicReader,
|
||||||
|
AsyncIndexReader,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
@ -31,6 +35,7 @@ use crate::{
|
|||||||
REPO_URL_SCHEMA,
|
REPO_URL_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
complete_pxar_archive_name,
|
complete_pxar_archive_name,
|
||||||
|
complete_img_archive_name,
|
||||||
complete_group_or_snapshot,
|
complete_group_or_snapshot,
|
||||||
complete_repository,
|
complete_repository,
|
||||||
record_repository,
|
record_repository,
|
||||||
@ -50,7 +55,37 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
|||||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||||
("repository", true, &REPO_URL_SCHEMA),
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const API_METHOD_MAP: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&mount),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
|
||||||
|
WARNING: Only do this with *trusted* backups!",
|
||||||
|
&sorted!([
|
||||||
|
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||||
|
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||||
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
|
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||||
|
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const API_METHOD_UNMAP: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&unmap),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Unmap a loop device mapped with 'map' and release all resources.",
|
||||||
|
&sorted!([
|
||||||
|
("name", true, &StringSchema::new(
|
||||||
|
concat!("Archive name, path to loopdev (/dev/loopX) or loop device number. ",
|
||||||
|
"Omit to list all current mappings and force cleaning up leftover instances.")
|
||||||
|
).schema()),
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
@ -65,6 +100,34 @@ pub fn mount_cmd_def() -> CliCommand {
|
|||||||
.completion_cb("target", tools::complete_file_name)
|
.completion_cb("target", tools::complete_file_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn map_cmd_def() -> CliCommand {
|
||||||
|
|
||||||
|
CliCommand::new(&API_METHOD_MAP)
|
||||||
|
.arg_param(&["snapshot", "archive-name"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||||
|
.completion_cb("archive-name", complete_img_archive_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn unmap_cmd_def() -> CliCommand {
|
||||||
|
|
||||||
|
CliCommand::new(&API_METHOD_UNMAP)
|
||||||
|
.arg_param(&["name"])
|
||||||
|
.completion_cb("name", complete_mapping_names)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
|
||||||
|
-> Vec<String>
|
||||||
|
{
|
||||||
|
match tools::fuse_loop::find_all_mappings() {
|
||||||
|
Ok(mappings) => mappings
|
||||||
|
.filter_map(|(name, _)| {
|
||||||
|
tools::systemd::unescape_unit(&name).ok()
|
||||||
|
}).collect(),
|
||||||
|
Err(_) => Vec::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn mount(
|
fn mount(
|
||||||
param: Value,
|
param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
@ -100,8 +163,9 @@ fn mount(
|
|||||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
let target = tools::required_string_param(¶m, "target")?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
let target = param["target"].as_str();
|
||||||
|
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
@ -124,9 +188,17 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
if let None = target {
|
||||||
|
bail!("use the 'mount' command to mount pxar archives");
|
||||||
|
}
|
||||||
format!("{}.didx", archive_name)
|
format!("{}.didx", archive_name)
|
||||||
|
} else if archive_name.ends_with(".img") {
|
||||||
|
if let Some(_) = target {
|
||||||
|
bail!("use the 'map' command to map drive images");
|
||||||
|
}
|
||||||
|
format!("{}.fidx", archive_name)
|
||||||
} else {
|
} else {
|
||||||
bail!("Can only mount pxar archives.");
|
bail!("Can only mount/map pxar archives and drive images.");
|
||||||
};
|
};
|
||||||
|
|
||||||
let client = BackupReader::start(
|
let client = BackupReader::start(
|
||||||
@ -143,25 +215,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||||
|
|
||||||
if server_archive_name.ends_with(".didx") {
|
let daemonize = || -> Result<(), Error> {
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
let archive_size = reader.archive_size();
|
|
||||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
|
||||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
|
||||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
|
||||||
let options = OsStr::new("ro,default_permissions");
|
|
||||||
|
|
||||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
|
||||||
decoder,
|
|
||||||
&options,
|
|
||||||
false,
|
|
||||||
Path::new(target),
|
|
||||||
)
|
|
||||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
|
||||||
|
|
||||||
if let Some(pipe) = pipe {
|
if let Some(pipe) = pipe {
|
||||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
// Finish creation of daemon by redirecting filedescriptors.
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
@ -182,15 +236,132 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
nix::unistd::close(pipe).unwrap();
|
nix::unistd::close(pipe).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = OsStr::new("ro,default_permissions");
|
||||||
|
|
||||||
|
// handle SIGINT and SIGTERM
|
||||||
|
let mut interrupt_int = signal(SignalKind::interrupt())?;
|
||||||
|
let mut interrupt_term = signal(SignalKind::terminate())?;
|
||||||
|
let mut interrupt = futures::future::select(interrupt_int.next(), interrupt_term.next());
|
||||||
|
|
||||||
|
if server_archive_name.ends_with(".didx") {
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
|
||||||
|
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||||
|
decoder,
|
||||||
|
&options,
|
||||||
|
false,
|
||||||
|
Path::new(target.unwrap()),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
daemonize()?;
|
||||||
|
|
||||||
select! {
|
select! {
|
||||||
res = session.fuse() => res?,
|
res = session.fuse() => res?,
|
||||||
_ = interrupt.recv().fuse() => {
|
_ = interrupt => {
|
||||||
// exit on interrupted
|
// exit on interrupted
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if server_archive_name.ends_with(".fidx") {
|
||||||
|
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||||
|
let size = index.index_bytes();
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new());
|
||||||
|
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
|
||||||
|
let name_escaped = tools::systemd::escape_unit(name, false);
|
||||||
|
|
||||||
|
let mut session = tools::fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
|
||||||
|
let loopdev = session.loopdev_path.clone();
|
||||||
|
|
||||||
|
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
|
||||||
|
let (mut abort_send, abort_recv) = futures::channel::mpsc::channel(1);
|
||||||
|
let mut st_recv = st_recv.fuse();
|
||||||
|
let mut session_fut = session.main(st_send, abort_recv).boxed().fuse();
|
||||||
|
|
||||||
|
// poll until loop file is mapped (or errors)
|
||||||
|
select! {
|
||||||
|
res = session_fut => {
|
||||||
|
bail!("FUSE session unexpectedly ended before loop file mapping");
|
||||||
|
},
|
||||||
|
res = st_recv.try_next() => {
|
||||||
|
if let Err(err) = res {
|
||||||
|
// init went wrong, abort now
|
||||||
|
abort_send.try_send(()).map_err(|err|
|
||||||
|
format_err!("error while sending abort signal - {}", err))?;
|
||||||
|
// ignore and keep original error cause
|
||||||
|
let _ = session_fut.await;
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// daemonize only now to be able to print mapped loopdev or startup errors
|
||||||
|
println!("Image '{}' mapped on {}", name, loopdev);
|
||||||
|
daemonize()?;
|
||||||
|
|
||||||
|
// continue polling until complete or interrupted (which also happens on unmap)
|
||||||
|
select! {
|
||||||
|
res = session_fut => res?,
|
||||||
|
_ = interrupt => {
|
||||||
|
// exit on interrupted
|
||||||
|
abort_send.try_send(()).map_err(|err|
|
||||||
|
format_err!("error while sending abort signal - {}", err))?;
|
||||||
|
session_fut.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Image unmapped");
|
||||||
} else {
|
} else {
|
||||||
bail!("unknown archive file extension (expected .pxar)");
|
bail!("unknown archive file extension (expected .pxar or .img)");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unmap(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let mut name = match param["name"].as_str() {
|
||||||
|
Some(name) => name.to_owned(),
|
||||||
|
None => {
|
||||||
|
tools::fuse_loop::cleanup_unused_run_files(None);
|
||||||
|
let mut any = false;
|
||||||
|
for (backing, loopdev) in tools::fuse_loop::find_all_mappings()? {
|
||||||
|
let name = tools::systemd::unescape_unit(&backing)?;
|
||||||
|
println!("{}:\t{}", loopdev.unwrap_or("(unmapped)".to_owned()), name);
|
||||||
|
any = true;
|
||||||
|
}
|
||||||
|
if !any {
|
||||||
|
println!("Nothing mapped.");
|
||||||
|
}
|
||||||
|
return Ok(Value::Null);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// allow loop device number alone
|
||||||
|
if let Ok(num) = name.parse::<u8>() {
|
||||||
|
name = format!("/dev/loop{}", num);
|
||||||
|
}
|
||||||
|
|
||||||
|
if name.starts_with("/dev/loop") {
|
||||||
|
tools::fuse_loop::unmap_loopdev(name)?;
|
||||||
|
} else {
|
||||||
|
let name = tools::systemd::escape_unit(&name, false);
|
||||||
|
tools::fuse_loop::unmap_name(name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
|
@ -48,7 +48,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
|||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||||
let running = !param["all"].as_bool().unwrap_or(false);
|
let running = !param["all"].as_bool().unwrap_or(false);
|
||||||
@ -96,7 +96,7 @@ async fn task_log(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
let upid = tools::required_string_param(¶m, "upid")?;
|
let upid = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
display_task_log(client, upid, true).await?;
|
display_task_log(client, upid, true).await?;
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ async fn task_stop(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||||
|
|
||||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||||
let _ = client.delete(&path, None).await?;
|
let _ = client.delete(&path, None).await?;
|
||||||
|
@ -54,7 +54,7 @@ impl BackupReader {
|
|||||||
"store": datastore,
|
"store": datastore,
|
||||||
"debug": debug,
|
"debug": debug,
|
||||||
});
|
});
|
||||||
let req = HttpClient::request_builder(client.server(), "GET", "/api2/json/reader", Some(param)).unwrap();
|
let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
|
||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
|
||||||
|
|
||||||
|
@ -19,14 +19,22 @@ pub struct BackupRepository {
|
|||||||
user: Option<Userid>,
|
user: Option<Userid>,
|
||||||
/// The host name or IP address
|
/// The host name or IP address
|
||||||
host: Option<String>,
|
host: Option<String>,
|
||||||
|
/// The port
|
||||||
|
port: Option<u16>,
|
||||||
/// The name of the datastore
|
/// The name of the datastore
|
||||||
store: String,
|
store: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupRepository {
|
impl BackupRepository {
|
||||||
|
|
||||||
pub fn new(user: Option<Userid>, host: Option<String>, store: String) -> Self {
|
pub fn new(user: Option<Userid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
|
||||||
Self { user, host, store }
|
let host = match host {
|
||||||
|
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
|
||||||
|
Some(format!("[{}]", host))
|
||||||
|
},
|
||||||
|
other => other,
|
||||||
|
};
|
||||||
|
Self { user, host, port, store }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn user(&self) -> &Userid {
|
pub fn user(&self) -> &Userid {
|
||||||
@ -43,6 +51,13 @@ impl BackupRepository {
|
|||||||
"localhost"
|
"localhost"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn port(&self) -> u16 {
|
||||||
|
if let Some(port) = self.port {
|
||||||
|
return port;
|
||||||
|
}
|
||||||
|
8007
|
||||||
|
}
|
||||||
|
|
||||||
pub fn store(&self) -> &str {
|
pub fn store(&self) -> &str {
|
||||||
&self.store
|
&self.store
|
||||||
}
|
}
|
||||||
@ -50,13 +65,12 @@ impl BackupRepository {
|
|||||||
|
|
||||||
impl fmt::Display for BackupRepository {
|
impl fmt::Display for BackupRepository {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
if let Some(ref user) = self.user {
|
match (&self.user, &self.host, self.port) {
|
||||||
write!(f, "{}@{}:{}", user, self.host(), self.store)
|
(Some(user), _, _) => write!(f, "{}@{}:{}:{}", user, self.host(), self.port(), self.store),
|
||||||
} else if let Some(ref host) = self.host {
|
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
|
||||||
write!(f, "{}:{}", host, self.store)
|
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
|
||||||
} else {
|
(None, None, None) => write!(f, "{}", self.store),
|
||||||
write!(f, "{}", self.store)
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +90,8 @@ impl std::str::FromStr for BackupRepository {
|
|||||||
Ok(Self {
|
Ok(Self {
|
||||||
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
||||||
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
||||||
store: cap[3].to_owned(),
|
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
|
||||||
|
store: cap[4].to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ impl BackupWriter {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let req = HttpClient::request_builder(
|
let req = HttpClient::request_builder(
|
||||||
client.server(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
client.server(), client.port(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ impl BackupWriter {
|
|||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
|
crate::tools::format::strip_server_file_extension(archive_name.clone())
|
||||||
};
|
};
|
||||||
if archive_name != CATALOG_NAME {
|
if archive_name != CATALOG_NAME {
|
||||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||||
|
@ -99,6 +99,7 @@ impl HttpClientOptions {
|
|||||||
pub struct HttpClient {
|
pub struct HttpClient {
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector>,
|
||||||
server: String,
|
server: String,
|
||||||
|
port: u16,
|
||||||
fingerprint: Arc<Mutex<Option<String>>>,
|
fingerprint: Arc<Mutex<Option<String>>>,
|
||||||
first_auth: BroadcastFuture<()>,
|
first_auth: BroadcastFuture<()>,
|
||||||
auth: Arc<RwLock<AuthInfo>>,
|
auth: Arc<RwLock<AuthInfo>>,
|
||||||
@ -250,6 +251,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
|||||||
impl HttpClient {
|
impl HttpClient {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
server: &str,
|
server: &str,
|
||||||
|
port: u16,
|
||||||
userid: &Userid,
|
userid: &Userid,
|
||||||
mut options: HttpClientOptions,
|
mut options: HttpClientOptions,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
@ -338,7 +340,7 @@ impl HttpClient {
|
|||||||
let authinfo = auth2.read().unwrap().clone();
|
let authinfo = auth2.read().unwrap().clone();
|
||||||
(authinfo.userid, authinfo.ticket)
|
(authinfo.userid, authinfo.ticket)
|
||||||
};
|
};
|
||||||
match Self::credentials(client2.clone(), server2.clone(), userid, ticket).await {
|
match Self::credentials(client2.clone(), server2.clone(), port, userid, ticket).await {
|
||||||
Ok(auth) => {
|
Ok(auth) => {
|
||||||
if use_ticket_cache & &prefix2.is_some() {
|
if use_ticket_cache & &prefix2.is_some() {
|
||||||
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||||
@ -358,6 +360,7 @@ impl HttpClient {
|
|||||||
let login_future = Self::credentials(
|
let login_future = Self::credentials(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
server.to_owned(),
|
server.to_owned(),
|
||||||
|
port,
|
||||||
userid.to_owned(),
|
userid.to_owned(),
|
||||||
password.to_owned(),
|
password.to_owned(),
|
||||||
).map_ok({
|
).map_ok({
|
||||||
@ -377,6 +380,7 @@ impl HttpClient {
|
|||||||
Ok(Self {
|
Ok(Self {
|
||||||
client,
|
client,
|
||||||
server: String::from(server),
|
server: String::from(server),
|
||||||
|
port,
|
||||||
fingerprint: verified_fingerprint,
|
fingerprint: verified_fingerprint,
|
||||||
auth,
|
auth,
|
||||||
ticket_abort,
|
ticket_abort,
|
||||||
@ -486,7 +490,7 @@ impl HttpClient {
|
|||||||
path: &str,
|
path: &str,
|
||||||
data: Option<Value>,
|
data: Option<Value>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let req = Self::request_builder(&self.server, "GET", path, data).unwrap();
|
let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
|
||||||
self.request(req).await
|
self.request(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,7 +499,7 @@ impl HttpClient {
|
|||||||
path: &str,
|
path: &str,
|
||||||
data: Option<Value>,
|
data: Option<Value>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let req = Self::request_builder(&self.server, "DELETE", path, data).unwrap();
|
let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
|
||||||
self.request(req).await
|
self.request(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,7 +508,7 @@ impl HttpClient {
|
|||||||
path: &str,
|
path: &str,
|
||||||
data: Option<Value>,
|
data: Option<Value>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let req = Self::request_builder(&self.server, "POST", path, data).unwrap();
|
let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
|
||||||
self.request(req).await
|
self.request(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -513,7 +517,7 @@ impl HttpClient {
|
|||||||
path: &str,
|
path: &str,
|
||||||
output: &mut (dyn Write + Send),
|
output: &mut (dyn Write + Send),
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
|
||||||
|
|
||||||
let client = self.client.clone();
|
let client = self.client.clone();
|
||||||
|
|
||||||
@ -549,7 +553,7 @@ impl HttpClient {
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let path = path.trim_matches('/');
|
let path = path.trim_matches('/');
|
||||||
let mut url = format!("https://{}:8007/{}", &self.server, path);
|
let mut url = format!("https://{}:{}/{}", &self.server, self.port, path);
|
||||||
|
|
||||||
if let Some(data) = data {
|
if let Some(data) = data {
|
||||||
let query = tools::json_object_to_query(data).unwrap();
|
let query = tools::json_object_to_query(data).unwrap();
|
||||||
@ -624,11 +628,12 @@ impl HttpClient {
|
|||||||
async fn credentials(
|
async fn credentials(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector>,
|
||||||
server: String,
|
server: String,
|
||||||
|
port: u16,
|
||||||
username: Userid,
|
username: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
) -> Result<AuthInfo, Error> {
|
) -> Result<AuthInfo, Error> {
|
||||||
let data = json!({ "username": username, "password": password });
|
let data = json!({ "username": username, "password": password });
|
||||||
let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap();
|
let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
|
||||||
let cred = Self::api_request(client, req).await?;
|
let cred = Self::api_request(client, req).await?;
|
||||||
let auth = AuthInfo {
|
let auth = AuthInfo {
|
||||||
userid: cred["data"]["username"].as_str().unwrap().parse()?,
|
userid: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||||
@ -672,9 +677,13 @@ impl HttpClient {
|
|||||||
&self.server
|
&self.server
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
pub fn port(&self) -> u16 {
|
||||||
|
self.port
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
||||||
let path = path.trim_matches('/');
|
let path = path.trim_matches('/');
|
||||||
let url: Uri = format!("https://{}:8007/{}", server, path).parse()?;
|
let url: Uri = format!("https://{}:{}/{}", server, port, path).parse()?;
|
||||||
|
|
||||||
if let Some(data) = data {
|
if let Some(data) = data {
|
||||||
if method == "POST" {
|
if method == "POST" {
|
||||||
@ -687,7 +696,7 @@ impl HttpClient {
|
|||||||
return Ok(request);
|
return Ok(request);
|
||||||
} else {
|
} else {
|
||||||
let query = tools::json_object_to_query(data)?;
|
let query = tools::json_object_to_query(data)?;
|
||||||
let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?;
|
let url: Uri = format!("https://{}:{}/{}?{}", server, port, path, query).parse()?;
|
||||||
let request = Request::builder()
|
let request = Request::builder()
|
||||||
.method(method)
|
.method(method)
|
||||||
.uri(url)
|
.uri(url)
|
||||||
|
@ -251,8 +251,8 @@ async fn pull_snapshot(
|
|||||||
Err(err) => {
|
Err(err) => {
|
||||||
match err.downcast_ref::<HttpError>() {
|
match err.downcast_ref::<HttpError>() {
|
||||||
Some(HttpError { code, message }) => {
|
Some(HttpError { code, message }) => {
|
||||||
match code {
|
match *code {
|
||||||
&StatusCode::NOT_FOUND => {
|
StatusCode::NOT_FOUND => {
|
||||||
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
||||||
return Ok(());
|
return Ok(());
|
||||||
},
|
},
|
||||||
@ -395,6 +395,7 @@ pub async fn pull_group(
|
|||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
delete: bool,
|
delete: bool,
|
||||||
|
progress: Option<(usize, usize)>, // (groups_done, group_count)
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
|
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
|
||||||
@ -416,14 +417,24 @@ pub async fn pull_group(
|
|||||||
|
|
||||||
let mut remote_snapshots = std::collections::HashSet::new();
|
let mut remote_snapshots = std::collections::HashSet::new();
|
||||||
|
|
||||||
|
let (per_start, per_group) = if let Some((groups_done, group_count)) = progress {
|
||||||
|
let per_start = (groups_done as f64)/(group_count as f64);
|
||||||
|
let per_group = 1.0/(group_count as f64);
|
||||||
|
(per_start, per_group)
|
||||||
|
} else {
|
||||||
|
(0.0, 1.0)
|
||||||
|
};
|
||||||
|
|
||||||
// start with 16384 chunks (up to 65GB)
|
// start with 16384 chunks (up to 65GB)
|
||||||
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
|
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
|
||||||
|
|
||||||
for item in list {
|
let snapshot_count = list.len();
|
||||||
|
|
||||||
|
for (pos, item) in list.into_iter().enumerate() {
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||||
|
|
||||||
// in-progress backups can't be synced
|
// in-progress backups can't be synced
|
||||||
if let None = item.size {
|
if item.size.is_none() {
|
||||||
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -440,7 +451,7 @@ pub async fn pull_group(
|
|||||||
.password(Some(auth_info.ticket.clone()))
|
.password(Some(auth_info.ticket.clone()))
|
||||||
.fingerprint(fingerprint.clone());
|
.fingerprint(fingerprint.clone());
|
||||||
|
|
||||||
let new_client = HttpClient::new(src_repo.host(), src_repo.user(), options)?;
|
let new_client = HttpClient::new(src_repo.host(), src_repo.port(), src_repo.user(), options)?;
|
||||||
|
|
||||||
let reader = BackupReader::start(
|
let reader = BackupReader::start(
|
||||||
new_client,
|
new_client,
|
||||||
@ -452,7 +463,13 @@ pub async fn pull_group(
|
|||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await?;
|
let result = pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await;
|
||||||
|
|
||||||
|
let percentage = (pos as f64)/(snapshot_count as f64);
|
||||||
|
let percentage = per_start + percentage*per_group;
|
||||||
|
worker.log(format!("percentage done: {:.2}%", percentage*100.0));
|
||||||
|
|
||||||
|
result?; // stop on error
|
||||||
}
|
}
|
||||||
|
|
||||||
if delete {
|
if delete {
|
||||||
@ -502,7 +519,9 @@ pub async fn pull_store(
|
|||||||
new_groups.insert(BackupGroup::new(&item.backup_type, &item.backup_id));
|
new_groups.insert(BackupGroup::new(&item.backup_type, &item.backup_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
for item in list {
|
let group_count = list.len();
|
||||||
|
|
||||||
|
for (groups_done, item) in list.into_iter().enumerate() {
|
||||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||||
|
|
||||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
||||||
@ -510,14 +529,22 @@ pub async fn pull_store(
|
|||||||
if userid != owner { // only the owner is allowed to create additional snapshots
|
if userid != owner { // only the owner is allowed to create additional snapshots
|
||||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||||
item.backup_type, item.backup_id, userid, owner));
|
item.backup_type, item.backup_id, userid, owner));
|
||||||
errors = true;
|
errors = true; // do not stop here, instead continue
|
||||||
continue; // do not stop here, instead continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = pull_group(worker, client, src_repo, tgt_store.clone(), &group, delete).await {
|
} else {
|
||||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
|
||||||
errors = true;
|
if let Err(err) = pull_group(
|
||||||
continue; // do not stop here, instead continue
|
worker,
|
||||||
|
client,
|
||||||
|
src_repo,
|
||||||
|
tgt_store.clone(),
|
||||||
|
&group,
|
||||||
|
delete,
|
||||||
|
Some((groups_done, group_count)),
|
||||||
|
).await {
|
||||||
|
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
||||||
|
errors = true; // do not stop here, instead continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
//use std::os::unix::io::FromRawFd;
|
//use std::os::unix::io::FromRawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
@ -38,9 +38,8 @@ impl Drop for PxarBackupStream {
|
|||||||
impl PxarBackupStream {
|
impl PxarBackupStream {
|
||||||
pub fn new<W: Write + Send + 'static>(
|
pub fn new<W: Write + Send + 'static>(
|
||||||
dir: Dir,
|
dir: Dir,
|
||||||
_path: PathBuf,
|
|
||||||
device_set: Option<HashSet<u64>>,
|
device_set: Option<HashSet<u64>>,
|
||||||
_verbose: bool,
|
verbose: bool,
|
||||||
skip_lost_and_found: bool,
|
skip_lost_and_found: bool,
|
||||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||||
patterns: Vec<MatchEntry>,
|
patterns: Vec<MatchEntry>,
|
||||||
@ -70,7 +69,12 @@ impl PxarBackupStream {
|
|||||||
crate::pxar::Flags::DEFAULT,
|
crate::pxar::Flags::DEFAULT,
|
||||||
device_set,
|
device_set,
|
||||||
skip_lost_and_found,
|
skip_lost_and_found,
|
||||||
|_| Ok(()),
|
|path| {
|
||||||
|
if verbose {
|
||||||
|
println!("{:?}", path);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
entries_max,
|
entries_max,
|
||||||
Some(&mut *catalog_guard),
|
Some(&mut *catalog_guard),
|
||||||
) {
|
) {
|
||||||
@ -97,11 +101,9 @@ impl PxarBackupStream {
|
|||||||
entries_max: usize,
|
entries_max: usize,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||||
let path = std::path::PathBuf::from(dirname);
|
|
||||||
|
|
||||||
Self::new(
|
Self::new(
|
||||||
dir,
|
dir,
|
||||||
path,
|
|
||||||
device_set,
|
device_set,
|
||||||
verbose,
|
verbose,
|
||||||
skip_lost_and_found,
|
skip_lost_and_found,
|
||||||
|
@ -12,42 +12,47 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
use serde::de::{value, IntoDeserializer};
|
use serde::de::{value, IntoDeserializer};
|
||||||
|
|
||||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||||
use proxmox::constnamemap;
|
use proxmox::constnamedbitmap;
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Userid;
|
||||||
|
|
||||||
// define Privilege bitfield
|
// define Privilege bitfield
|
||||||
|
|
||||||
constnamemap! {
|
constnamedbitmap! {
|
||||||
/// Contains a list of Privileges
|
/// Contains a list of Privileges
|
||||||
PRIVILEGES: u64 => {
|
PRIVILEGES: u64 => {
|
||||||
PRIV_SYS_AUDIT("Sys.Audit") = 1 << 0;
|
PRIV_SYS_AUDIT("Sys.Audit");
|
||||||
PRIV_SYS_MODIFY("Sys.Modify") = 1 << 1;
|
PRIV_SYS_MODIFY("Sys.Modify");
|
||||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement") = 1 << 2;
|
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||||
|
|
||||||
PRIV_DATASTORE_AUDIT("Datastore.Audit") = 1 << 3;
|
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||||
PRIV_DATASTORE_MODIFY("Datastore.Modify") = 1 << 4;
|
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||||
PRIV_DATASTORE_READ("Datastore.Read") = 1 << 5;
|
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||||
|
PRIV_DATASTORE_READ("Datastore.Read");
|
||||||
|
|
||||||
/// Datastore.Backup also requires backup ownership
|
/// Datastore.Backup also requires backup ownership
|
||||||
PRIV_DATASTORE_BACKUP("Datastore.Backup") = 1 << 6;
|
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||||
/// Datastore.Prune also requires backup ownership
|
/// Datastore.Prune also requires backup ownership
|
||||||
PRIV_DATASTORE_PRUNE("Datastore.Prune") = 1 << 7;
|
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||||
|
|
||||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify") = 1 << 8;
|
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||||
|
|
||||||
PRIV_REMOTE_AUDIT("Remote.Audit") = 1 << 9;
|
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||||
PRIV_REMOTE_MODIFY("Remote.Modify") = 1 << 10;
|
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||||
PRIV_REMOTE_READ("Remote.Read") = 1 << 11;
|
PRIV_REMOTE_READ("Remote.Read");
|
||||||
PRIV_REMOTE_PRUNE("Remote.Prune") = 1 << 12;
|
PRIV_REMOTE_PRUNE("Remote.Prune");
|
||||||
|
|
||||||
PRIV_SYS_CONSOLE("Sys.Console") = 1 << 13;
|
PRIV_SYS_CONSOLE("Sys.Console");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Admin always has all privileges. It can do everything except a few actions
|
||||||
|
/// which are limited to the 'root@pam` superuser
|
||||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||||
|
|
||||||
|
/// NoAccess can be used to remove privileges from specific paths
|
||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
|
|
||||||
pub const ROLE_AUDIT: u64 =
|
pub const ROLE_AUDIT: u64 =
|
||||||
|
@ -96,9 +96,7 @@ impl CachedUserInfo {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedUserInfo {
|
|
||||||
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
||||||
userid == "root@pam"
|
userid == "root@pam"
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ where
|
|||||||
{
|
{
|
||||||
let mut path = path.as_ref().to_path_buf();
|
let mut path = path.as_ref().to_path_buf();
|
||||||
path.set_extension("lck");
|
path.set_extension("lck");
|
||||||
let lock = open_file_locked(&path, Duration::new(10, 0))?;
|
let lock = open_file_locked(&path, Duration::new(10, 0), true)?;
|
||||||
let backup_user = crate::backup::backup_user()?;
|
let backup_user = crate::backup::backup_user()?;
|
||||||
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||||
Ok(lock)
|
Ok(lock)
|
||||||
|
@ -149,7 +149,7 @@ pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error>
|
|||||||
.output()
|
.output()
|
||||||
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
|
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
|
||||||
|
|
||||||
let diff = crate::tools::command_output(output, Some(|c| c == 0 || c == 1))
|
let diff = crate::tools::command_output_as_string(output, Some(|c| c == 0 || c == 1))
|
||||||
.map_err(|err| format_err!("diff failed: {}", err))?;
|
.map_err(|err| format_err!("diff failed: {}", err))?;
|
||||||
|
|
||||||
Ok(diff)
|
Ok(diff)
|
||||||
|
@ -39,6 +39,11 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
|
|||||||
host: {
|
host: {
|
||||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
},
|
},
|
||||||
|
port: {
|
||||||
|
optional: true,
|
||||||
|
description: "The (optional) port",
|
||||||
|
type: u16,
|
||||||
|
},
|
||||||
userid: {
|
userid: {
|
||||||
type: Userid,
|
type: Userid,
|
||||||
},
|
},
|
||||||
@ -58,6 +63,8 @@ pub struct Remote {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
pub host: String,
|
pub host: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub port: Option<u16>,
|
||||||
pub userid: Userid,
|
pub userid: Userid,
|
||||||
#[serde(skip_serializing_if="String::is_empty")]
|
#[serde(skip_serializing_if="String::is_empty")]
|
||||||
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
pub mod task;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod buildcfg;
|
pub mod buildcfg;
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
(self.callback)(Path::new(OsStr::from_bytes(file_name)))?;
|
(self.callback)(&file_entry.path)?;
|
||||||
self.path = file_entry.path;
|
self.path = file_entry.path;
|
||||||
self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
|
self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
|
||||||
.map_err(|err| self.wrap_err(err))?;
|
.map_err(|err| self.wrap_err(err))?;
|
||||||
|
@ -60,7 +60,7 @@ impl RRA {
|
|||||||
|
|
||||||
let min_time = epoch - (RRD_DATA_ENTRIES as u64)*reso;
|
let min_time = epoch - (RRD_DATA_ENTRIES as u64)*reso;
|
||||||
let min_time = (min_time/reso + 1)*reso;
|
let min_time = (min_time/reso + 1)*reso;
|
||||||
let mut t = last_update - (RRD_DATA_ENTRIES as u64)*reso;
|
let mut t = last_update.saturating_sub((RRD_DATA_ENTRIES as u64)*reso);
|
||||||
let mut index = ((t/reso) % (RRD_DATA_ENTRIES as u64)) as usize;
|
let mut index = ((t/reso) % (RRD_DATA_ENTRIES as u64)) as usize;
|
||||||
for _ in 0..RRD_DATA_ENTRIES {
|
for _ in 0..RRD_DATA_ENTRIES {
|
||||||
t += reso; index = (index + 1) % RRD_DATA_ENTRIES;
|
t += reso; index = (index + 1) % RRD_DATA_ENTRIES;
|
||||||
@ -272,7 +272,7 @@ impl RRD {
|
|||||||
t += reso; index = (index + 1) % RRD_DATA_ENTRIES;
|
t += reso; index = (index + 1) % RRD_DATA_ENTRIES;
|
||||||
}
|
}
|
||||||
|
|
||||||
(start, reso, list.into())
|
(start, reso, list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_raw(mut raw: &[u8]) -> Result<Self, std::io::Error> {
|
pub fn from_raw(mut raw: &[u8]) -> Result<Self, std::io::Error> {
|
||||||
@ -289,7 +289,7 @@ impl RRD {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rrd.magic != PROXMOX_RRD_MAGIC_1_0 {
|
if rrd.magic != PROXMOX_RRD_MAGIC_1_0 {
|
||||||
let msg = format!("wrong magic number");
|
let msg = "wrong magic number".to_string();
|
||||||
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,9 +18,21 @@ use tokio::time::Instant;
|
|||||||
use url::form_urlencoded;
|
use url::form_urlencoded;
|
||||||
|
|
||||||
use proxmox::http_err;
|
use proxmox::http_err;
|
||||||
use proxmox::api::{ApiHandler, ApiMethod, HttpError};
|
use proxmox::api::{
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType, check_api_permission};
|
ApiHandler,
|
||||||
use proxmox::api::schema::{ObjectSchema, parse_simple_value, verify_json_object, parse_parameter_strings};
|
ApiMethod,
|
||||||
|
HttpError,
|
||||||
|
Permission,
|
||||||
|
RpcEnvironment,
|
||||||
|
RpcEnvironmentType,
|
||||||
|
check_api_permission,
|
||||||
|
};
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
ObjectSchema,
|
||||||
|
parse_parameter_strings,
|
||||||
|
parse_simple_value,
|
||||||
|
verify_json_object,
|
||||||
|
};
|
||||||
|
|
||||||
use super::environment::RestEnvironment;
|
use super::environment::RestEnvironment;
|
||||||
use super::formatter::*;
|
use super::formatter::*;
|
||||||
@ -128,9 +140,10 @@ impl tower_service::Service<Request<Body>> for ApiService {
|
|||||||
let path = req.uri().path().to_owned();
|
let path = req.uri().path().to_owned();
|
||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
|
|
||||||
|
let config = Arc::clone(&self.api_config);
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
handle_request(self.api_config.clone(), req)
|
async move {
|
||||||
.map(move |result| match result {
|
match handle_request(config, req).await {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
log_response(&peer, method, &path, &res);
|
log_response(&peer, method, &path, &res);
|
||||||
Ok::<_, Self::Error>(res)
|
Ok::<_, Self::Error>(res)
|
||||||
@ -148,8 +161,9 @@ impl tower_service::Service<Request<Body>> for ApiService {
|
|||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
.boxed()
|
}
|
||||||
|
.boxed()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,16 +329,16 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
|||||||
|
|
||||||
fn get_index(
|
fn get_index(
|
||||||
userid: Option<Userid>,
|
userid: Option<Userid>,
|
||||||
token: Option<String>,
|
csrf_token: Option<String>,
|
||||||
language: Option<String>,
|
language: Option<String>,
|
||||||
api: &Arc<ApiConfig>,
|
api: &Arc<ApiConfig>,
|
||||||
parts: Parts,
|
parts: Parts,
|
||||||
) -> Response<Body> {
|
) -> Response<Body> {
|
||||||
|
|
||||||
let nodename = proxmox::tools::nodename();
|
let nodename = proxmox::tools::nodename();
|
||||||
let userid = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
|
let user = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
|
||||||
|
|
||||||
let token = token.unwrap_or_else(|| String::from(""));
|
let csrf_token = csrf_token.unwrap_or_else(|| String::from(""));
|
||||||
|
|
||||||
let mut debug = false;
|
let mut debug = false;
|
||||||
let mut template_file = "index";
|
let mut template_file = "index";
|
||||||
@ -348,19 +362,16 @@ fn get_index(
|
|||||||
|
|
||||||
let data = json!({
|
let data = json!({
|
||||||
"NodeName": nodename,
|
"NodeName": nodename,
|
||||||
"UserName": userid,
|
"UserName": user,
|
||||||
"CSRFPreventionToken": token,
|
"CSRFPreventionToken": csrf_token,
|
||||||
"language": lang,
|
"language": lang,
|
||||||
"debug": debug,
|
"debug": debug,
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut ct = "text/html";
|
let (ct, index) = match api.render_template(template_file, &data) {
|
||||||
|
Ok(index) => ("text/html", index),
|
||||||
let index = match api.render_template(template_file, &data) {
|
|
||||||
Ok(index) => index,
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
ct = "text/plain";
|
("text/plain", format!("Error rendering template: {}", err))
|
||||||
format!("Error rendering template: {}", err)
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -492,7 +503,6 @@ fn check_auth(
|
|||||||
|
|
||||||
if method != hyper::Method::GET {
|
if method != hyper::Method::GET {
|
||||||
if let Some(token) = token {
|
if let Some(token) = token {
|
||||||
println!("CSRF prevention token: {:?}", token);
|
|
||||||
verify_csrf_prevention_token(csrf_secret(), &userid, &token, -300, ticket_lifetime)?;
|
verify_csrf_prevention_token(csrf_secret(), &userid, &token, -300, ticket_lifetime)?;
|
||||||
} else {
|
} else {
|
||||||
bail!("missing CSRF prevention token");
|
bail!("missing CSRF prevention token");
|
||||||
@ -502,7 +512,7 @@ fn check_auth(
|
|||||||
Ok(userid)
|
Ok(userid)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Response<Body>, Error> {
|
async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Response<Body>, Error> {
|
||||||
|
|
||||||
let (parts, body) = req.into_parts();
|
let (parts, body) = req.into_parts();
|
||||||
|
|
||||||
@ -535,13 +545,16 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut uri_param = HashMap::new();
|
let mut uri_param = HashMap::new();
|
||||||
|
let api_method = api.find_method(&components[2..], method.clone(), &mut uri_param);
|
||||||
|
|
||||||
if comp_len == 4 && components[2] == "access" && (
|
let mut auth_required = true;
|
||||||
(components[3] == "ticket" && method == hyper::Method::POST) ||
|
if let Some(api_method) = api_method {
|
||||||
(components[3] == "domains" && method == hyper::Method::GET)
|
if let Permission::World = *api_method.access.permission {
|
||||||
) {
|
auth_required = false; // no auth for endpoints with World permission
|
||||||
// explicitly allow those calls without auth
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
if auth_required {
|
||||||
let (ticket, token, _) = extract_auth_data(&parts.headers);
|
let (ticket, token, _) = extract_auth_data(&parts.headers);
|
||||||
match check_auth(&method, &ticket, &token, &user_info) {
|
match check_auth(&method, &ticket, &token, &user_info) {
|
||||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
||||||
@ -554,7 +567,7 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match api.find_method(&components[2..], method, &mut uri_param) {
|
match api_method {
|
||||||
None => {
|
None => {
|
||||||
let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
|
let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
|
||||||
return Ok((formatter.format_error)(err));
|
return Ok((formatter.format_error)(err));
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{Read, BufRead, BufReader};
|
use std::path::Path;
|
||||||
|
use std::io::{Read, Write, BufRead, BufReader};
|
||||||
use std::panic::UnwindSafe;
|
use std::panic::UnwindSafe;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@ -19,6 +20,7 @@ use proxmox::tools::fs::{create_path, open_file_locked, replace_file, CreateOpti
|
|||||||
|
|
||||||
use super::UPID;
|
use super::UPID;
|
||||||
|
|
||||||
|
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
||||||
use crate::tools::FileLogger;
|
use crate::tools::FileLogger;
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Userid;
|
||||||
|
|
||||||
@ -31,6 +33,10 @@ pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
|
|||||||
pub const PROXMOX_BACKUP_TASK_DIR: &str = PROXMOX_BACKUP_TASK_DIR_M!();
|
pub const PROXMOX_BACKUP_TASK_DIR: &str = PROXMOX_BACKUP_TASK_DIR_M!();
|
||||||
pub const PROXMOX_BACKUP_TASK_LOCK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/.active.lock");
|
pub const PROXMOX_BACKUP_TASK_LOCK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/.active.lock");
|
||||||
pub const PROXMOX_BACKUP_ACTIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/active");
|
pub const PROXMOX_BACKUP_ACTIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/active");
|
||||||
|
pub const PROXMOX_BACKUP_INDEX_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/index");
|
||||||
|
pub const PROXMOX_BACKUP_ARCHIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/archive");
|
||||||
|
|
||||||
|
const MAX_INDEX_TASKS: usize = 1000;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
|
static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
|
||||||
@ -325,86 +331,90 @@ pub struct TaskListInfo {
|
|||||||
pub state: Option<TaskState>, // endtime, status
|
pub state: Option<TaskState>, // endtime, status
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn lock_task_list_files(exclusive: bool) -> Result<std::fs::File, Error> {
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
|
||||||
|
let lock = open_file_locked(PROXMOX_BACKUP_TASK_LOCK_FN, std::time::Duration::new(10, 0), exclusive)?;
|
||||||
|
nix::unistd::chown(PROXMOX_BACKUP_TASK_LOCK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||||
|
|
||||||
|
Ok(lock)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// checks if the Task Archive is bigger that 'size_threshold' bytes, and
|
||||||
|
/// rotates it if it is
|
||||||
|
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
||||||
|
let _lock = lock_task_list_files(true)?;
|
||||||
|
let path = Path::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN);
|
||||||
|
let metadata = match path.metadata() {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
||||||
|
Err(err) => bail!("unable to open task archive - {}", err),
|
||||||
|
};
|
||||||
|
|
||||||
|
if metadata.len() > size_threshold {
|
||||||
|
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress).ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
logrotate.rotate(
|
||||||
|
CreateOptions::new()
|
||||||
|
.owner(backup_user.uid)
|
||||||
|
.group(backup_user.gid),
|
||||||
|
max_files,
|
||||||
|
)?;
|
||||||
|
Ok(true)
|
||||||
|
} else {
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// atomically read/update the task list, update status of finished tasks
|
// atomically read/update the task list, update status of finished tasks
|
||||||
// new_upid is added to the list when specified.
|
// new_upid is added to the list when specified.
|
||||||
// Returns a sorted list of known tasks,
|
fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||||
fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, Error> {
|
|
||||||
|
|
||||||
let backup_user = crate::backup::backup_user()?;
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
|
||||||
let lock = open_file_locked(PROXMOX_BACKUP_TASK_LOCK_FN, std::time::Duration::new(10, 0))?;
|
let lock = lock_task_list_files(true)?;
|
||||||
nix::unistd::chown(PROXMOX_BACKUP_TASK_LOCK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
|
||||||
|
|
||||||
let reader = match File::open(PROXMOX_BACKUP_ACTIVE_TASK_FN) {
|
let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN)?;
|
||||||
Ok(f) => Some(BufReader::new(f)),
|
let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?
|
||||||
Err(err) => {
|
.into_iter()
|
||||||
if err.kind() == std::io::ErrorKind::NotFound {
|
.filter_map(|info| {
|
||||||
None
|
if info.state.is_some() {
|
||||||
} else {
|
// this can happen when the active file still includes finished tasks
|
||||||
bail!("unable to open active worker {:?} - {}", PROXMOX_BACKUP_ACTIVE_TASK_FN, err);
|
finish_list.push(info);
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut active_list = vec![];
|
if !worker_is_active_local(&info.upid) {
|
||||||
let mut finish_list = vec![];
|
println!("Detected stopped UPID {}", &info.upid_str);
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
if let Some(lines) = reader.map(|r| r.lines()) {
|
let status = upid_read_status(&info.upid)
|
||||||
|
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||||
for line in lines {
|
finish_list.push(TaskListInfo {
|
||||||
let line = line?;
|
upid: info.upid,
|
||||||
match parse_worker_status_line(&line) {
|
upid_str: info.upid_str,
|
||||||
Err(err) => bail!("unable to parse active worker status '{}' - {}", line, err),
|
state: Some(status)
|
||||||
Ok((upid_str, upid, state)) => match state {
|
});
|
||||||
None if worker_is_active_local(&upid) => {
|
return None;
|
||||||
active_list.push(TaskListInfo { upid, upid_str, state: None });
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
println!("Detected stopped UPID {}", upid_str);
|
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
|
||||||
let status = upid_read_status(&upid)
|
|
||||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
|
||||||
finish_list.push(TaskListInfo {
|
|
||||||
upid, upid_str, state: Some(status)
|
|
||||||
});
|
|
||||||
},
|
|
||||||
Some(status) => {
|
|
||||||
finish_list.push(TaskListInfo {
|
|
||||||
upid, upid_str, state: Some(status)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
Some(info)
|
||||||
|
}).collect();
|
||||||
|
|
||||||
if let Some(upid) = new_upid {
|
if let Some(upid) = new_upid {
|
||||||
active_list.push(TaskListInfo { upid: upid.clone(), upid_str: upid.to_string(), state: None });
|
active_list.push(TaskListInfo { upid: upid.clone(), upid_str: upid.to_string(), state: None });
|
||||||
}
|
}
|
||||||
|
|
||||||
// assemble list without duplicates
|
let active_raw = render_task_list(&active_list);
|
||||||
// we include all active tasks,
|
|
||||||
// and fill up to 1000 entries with finished tasks
|
|
||||||
|
|
||||||
let max = 1000;
|
replace_file(
|
||||||
|
PROXMOX_BACKUP_ACTIVE_TASK_FN,
|
||||||
|
active_raw.as_bytes(),
|
||||||
|
CreateOptions::new()
|
||||||
|
.owner(backup_user.uid)
|
||||||
|
.group(backup_user.gid),
|
||||||
|
)?;
|
||||||
|
|
||||||
let mut task_hash = HashMap::new();
|
finish_list.sort_unstable_by(|a, b| {
|
||||||
|
|
||||||
for info in active_list {
|
|
||||||
task_hash.insert(info.upid_str.clone(), info);
|
|
||||||
}
|
|
||||||
|
|
||||||
for info in finish_list {
|
|
||||||
if task_hash.len() > max { break; }
|
|
||||||
if !task_hash.contains_key(&info.upid_str) {
|
|
||||||
task_hash.insert(info.upid_str.clone(), info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut task_list: Vec<TaskListInfo> = vec![];
|
|
||||||
for (_, info) in task_hash { task_list.push(info); }
|
|
||||||
|
|
||||||
task_list.sort_unstable_by(|b, a| { // lastest on top
|
|
||||||
match (&a.state, &b.state) {
|
match (&a.state, &b.state) {
|
||||||
(Some(s1), Some(s2)) => s1.cmp(&s2),
|
(Some(s1), Some(s2)) => s1.cmp(&s2),
|
||||||
(Some(_), None) => std::cmp::Ordering::Less,
|
(Some(_), None) => std::cmp::Ordering::Less,
|
||||||
@ -413,34 +423,198 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut raw = String::new();
|
|
||||||
for info in &task_list {
|
let start = if finish_list.len() > MAX_INDEX_TASKS {
|
||||||
if let Some(status) = &info.state {
|
finish_list.len() - MAX_INDEX_TASKS
|
||||||
raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, status.endtime(), status));
|
} else {
|
||||||
} else {
|
0
|
||||||
raw.push_str(&info.upid_str);
|
};
|
||||||
raw.push('\n');
|
|
||||||
}
|
let end = (start+MAX_INDEX_TASKS).min(finish_list.len());
|
||||||
}
|
|
||||||
|
let index_raw = if end > start {
|
||||||
|
render_task_list(&finish_list[start..end])
|
||||||
|
} else {
|
||||||
|
"".to_string()
|
||||||
|
};
|
||||||
|
|
||||||
replace_file(
|
replace_file(
|
||||||
PROXMOX_BACKUP_ACTIVE_TASK_FN,
|
PROXMOX_BACKUP_INDEX_TASK_FN,
|
||||||
raw.as_bytes(),
|
index_raw.as_bytes(),
|
||||||
CreateOptions::new()
|
CreateOptions::new()
|
||||||
.owner(backup_user.uid)
|
.owner(backup_user.uid)
|
||||||
.group(backup_user.gid),
|
.group(backup_user.gid),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
if !finish_list.is_empty() && start > 0 {
|
||||||
|
match std::fs::OpenOptions::new().append(true).create(true).open(PROXMOX_BACKUP_ARCHIVE_TASK_FN) {
|
||||||
|
Ok(mut writer) => {
|
||||||
|
for info in &finish_list[0..start] {
|
||||||
|
writer.write_all(render_task_line(&info).as_bytes())?;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(err) => bail!("could not write task archive - {}", err),
|
||||||
|
}
|
||||||
|
|
||||||
|
nix::unistd::chown(PROXMOX_BACKUP_ARCHIVE_TASK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||||
|
}
|
||||||
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
|
||||||
Ok(task_list)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a sorted list of known tasks
|
fn render_task_line(info: &TaskListInfo) -> String {
|
||||||
///
|
let mut raw = String::new();
|
||||||
/// The list is sorted by `(starttime, endtime)` in ascending order
|
if let Some(status) = &info.state {
|
||||||
pub fn read_task_list() -> Result<Vec<TaskListInfo>, Error> {
|
raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, status.endtime(), status));
|
||||||
update_active_workers(None)
|
} else {
|
||||||
|
raw.push_str(&info.upid_str);
|
||||||
|
raw.push('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
raw
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_task_list(list: &[TaskListInfo]) -> String {
|
||||||
|
let mut raw = String::new();
|
||||||
|
for info in list {
|
||||||
|
raw.push_str(&render_task_line(&info));
|
||||||
|
}
|
||||||
|
raw
|
||||||
|
}
|
||||||
|
|
||||||
|
// note this is not locked, caller has to make sure it is
|
||||||
|
// this will skip (and log) lines that are not valid status lines
|
||||||
|
fn read_task_file<R: Read>(reader: R) -> Result<Vec<TaskListInfo>, Error>
|
||||||
|
{
|
||||||
|
let reader = BufReader::new(reader);
|
||||||
|
let mut list = Vec::new();
|
||||||
|
for line in reader.lines() {
|
||||||
|
let line = line?;
|
||||||
|
match parse_worker_status_line(&line) {
|
||||||
|
Ok((upid_str, upid, state)) => list.push(TaskListInfo {
|
||||||
|
upid_str,
|
||||||
|
upid,
|
||||||
|
state
|
||||||
|
}),
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("unable to parse worker status '{}' - {}", line, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// note this is not locked, caller has to make sure it is
|
||||||
|
fn read_task_file_from_path<P>(path: P) -> Result<Vec<TaskListInfo>, Error>
|
||||||
|
where
|
||||||
|
P: AsRef<std::path::Path> + std::fmt::Debug,
|
||||||
|
{
|
||||||
|
let file = match File::open(&path) {
|
||||||
|
Ok(f) => f,
|
||||||
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
|
||||||
|
Err(err) => bail!("unable to open task list {:?} - {}", path, err),
|
||||||
|
};
|
||||||
|
|
||||||
|
read_task_file(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
enum TaskFile {
|
||||||
|
Active,
|
||||||
|
Index,
|
||||||
|
Archive,
|
||||||
|
End,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TaskListInfoIterator {
|
||||||
|
list: VecDeque<TaskListInfo>,
|
||||||
|
file: TaskFile,
|
||||||
|
archive: Option<LogRotateFiles>,
|
||||||
|
lock: Option<File>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaskListInfoIterator {
|
||||||
|
pub fn new(active_only: bool) -> Result<Self, Error> {
|
||||||
|
let (read_lock, active_list) = {
|
||||||
|
let lock = lock_task_list_files(false)?;
|
||||||
|
let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
|
||||||
|
|
||||||
|
let needs_update = active_list
|
||||||
|
.iter()
|
||||||
|
.any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
|
||||||
|
|
||||||
|
if needs_update {
|
||||||
|
drop(lock);
|
||||||
|
update_active_workers(None)?;
|
||||||
|
let lock = lock_task_list_files(false)?;
|
||||||
|
let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
|
||||||
|
(lock, active_list)
|
||||||
|
} else {
|
||||||
|
(lock, active_list)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let archive = if active_only {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true).ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||||
|
Some(logrotate.files())
|
||||||
|
};
|
||||||
|
|
||||||
|
let file = if active_only { TaskFile::End } else { TaskFile::Active };
|
||||||
|
let lock = if active_only { None } else { Some(read_lock) };
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
list: active_list.into(),
|
||||||
|
file,
|
||||||
|
archive,
|
||||||
|
lock,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for TaskListInfoIterator {
|
||||||
|
type Item = Result<TaskListInfo, Error>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
loop {
|
||||||
|
if let Some(element) = self.list.pop_back() {
|
||||||
|
return Some(Ok(element));
|
||||||
|
} else {
|
||||||
|
match self.file {
|
||||||
|
TaskFile::Active => {
|
||||||
|
let index = match read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN) {
|
||||||
|
Ok(index) => index,
|
||||||
|
Err(err) => return Some(Err(err)),
|
||||||
|
};
|
||||||
|
self.list.append(&mut index.into());
|
||||||
|
self.file = TaskFile::Index;
|
||||||
|
},
|
||||||
|
TaskFile::Index | TaskFile::Archive => {
|
||||||
|
if let Some(mut archive) = self.archive.take() {
|
||||||
|
if let Some(file) = archive.next() {
|
||||||
|
let list = match read_task_file(file) {
|
||||||
|
Ok(list) => list,
|
||||||
|
Err(err) => return Some(Err(err)),
|
||||||
|
};
|
||||||
|
self.list.append(&mut list.into());
|
||||||
|
self.archive = Some(archive);
|
||||||
|
self.file = TaskFile::Archive;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.file = TaskFile::End;
|
||||||
|
self.lock.take();
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
TaskFile::End => return None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Launch long running worker tasks.
|
/// Launch long running worker tasks.
|
||||||
@ -677,3 +851,19 @@ impl WorkerTask {
|
|||||||
&self.upid
|
&self.upid
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl crate::task::TaskState for WorkerTask {
|
||||||
|
fn check_abort(&self) -> Result<(), Error> {
|
||||||
|
self.fail_on_abort()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
|
||||||
|
match level {
|
||||||
|
log::Level::Error => self.warn(&message.to_string()),
|
||||||
|
log::Level::Warn => self.warn(&message.to_string()),
|
||||||
|
log::Level::Info => self.log(&message.to_string()),
|
||||||
|
log::Level::Debug => self.log(&format!("DEBUG: {}", message)),
|
||||||
|
log::Level::Trace => self.log(&format!("TRACE: {}", message)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
56
src/task.rs
Normal file
56
src/task.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use anyhow::Error;
|
||||||
|
|
||||||
|
/// `WorkerTask` methods commonly used from contexts otherwise not related to the API server.
|
||||||
|
pub trait TaskState {
|
||||||
|
/// If the task should be aborted, this should fail with a reasonable error message.
|
||||||
|
fn check_abort(&self) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Create a log message for this task.
|
||||||
|
fn log(&self, level: log::Level, message: &std::fmt::Arguments);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience implementation:
|
||||||
|
impl<T: TaskState + ?Sized> TaskState for std::sync::Arc<T> {
|
||||||
|
fn check_abort(&self) -> Result<(), Error> {
|
||||||
|
<T as TaskState>::check_abort(&*self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
|
||||||
|
<T as TaskState>::log(&*self, level, message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! task_error {
|
||||||
|
($task:expr, $($fmt:tt)+) => {{
|
||||||
|
$crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! task_warn {
|
||||||
|
($task:expr, $($fmt:tt)+) => {{
|
||||||
|
$crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! task_log {
|
||||||
|
($task:expr, $($fmt:tt)+) => {{
|
||||||
|
$crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! task_debug {
|
||||||
|
($task:expr, $($fmt:tt)+) => {{
|
||||||
|
$crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! task_trace {
|
||||||
|
($task:expr, $($fmt:tt)+) => {{
|
||||||
|
$crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
|
||||||
|
}};
|
||||||
|
}
|
20
src/tools.rs
20
src/tools.rs
@ -32,6 +32,9 @@ pub mod ticket;
|
|||||||
pub mod statistics;
|
pub mod statistics;
|
||||||
pub mod systemd;
|
pub mod systemd;
|
||||||
pub mod nom;
|
pub mod nom;
|
||||||
|
pub mod logrotate;
|
||||||
|
pub mod loopdev;
|
||||||
|
pub mod fuse_loop;
|
||||||
|
|
||||||
mod parallel_handler;
|
mod parallel_handler;
|
||||||
pub use parallel_handler::*;
|
pub use parallel_handler::*;
|
||||||
@ -404,7 +407,7 @@ pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> {
|
|||||||
pub fn command_output(
|
pub fn command_output(
|
||||||
output: std::process::Output,
|
output: std::process::Output,
|
||||||
exit_code_check: Option<fn(i32) -> bool>,
|
exit_code_check: Option<fn(i32) -> bool>,
|
||||||
) -> Result<String, Error> {
|
) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
match output.status.code() {
|
match output.status.code() {
|
||||||
@ -425,8 +428,19 @@ pub fn command_output(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let output = String::from_utf8(output.stdout)?;
|
Ok(output.stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to check result from std::process::Command output, returns String.
|
||||||
|
///
|
||||||
|
/// The exit_code_check() function should return true if the exit code
|
||||||
|
/// is considered successful.
|
||||||
|
pub fn command_output_as_string(
|
||||||
|
output: std::process::Output,
|
||||||
|
exit_code_check: Option<fn(i32) -> bool>,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let output = command_output(output, exit_code_check)?;
|
||||||
|
let output = String::from_utf8(output)?;
|
||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,7 +452,7 @@ pub fn run_command(
|
|||||||
let output = command.output()
|
let output = command.output()
|
||||||
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
|
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
|
||||||
|
|
||||||
let output = crate::tools::command_output(output, exit_code_check)
|
let output = crate::tools::command_output_as_string(output, exit_code_check)
|
||||||
.map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
|
.map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
|
||||||
|
|
||||||
Ok(output)
|
Ok(output)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
pub fn strip_server_file_expenstion(name: &str) -> String {
|
pub fn strip_server_file_extension(name: &str) -> String {
|
||||||
|
|
||||||
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
||||||
name[..name.len()-5].to_owned()
|
name[..name.len()-5].to_owned()
|
||||||
@ -12,7 +12,7 @@ pub fn strip_server_file_expenstion(name: &str) -> String {
|
|||||||
|
|
||||||
pub fn render_backup_file_list(files: &[String]) -> String {
|
pub fn render_backup_file_list(files: &[String]) -> String {
|
||||||
let mut files: Vec<String> = files.iter()
|
let mut files: Vec<String> = files.iter()
|
||||||
.map(|v| strip_server_file_expenstion(&v))
|
.map(|v| strip_server_file_extension(&v))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
files.sort();
|
files.sort();
|
||||||
|
423
src/tools/fuse_loop.rs
Normal file
423
src/tools/fuse_loop.rs
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
//! Map a raw data reader as a loop device via FUSE
|
||||||
|
|
||||||
|
use anyhow::{Error, format_err, bail};
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::fs::{File, remove_file, read_to_string, OpenOptions};
|
||||||
|
use std::io::SeekFrom;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use nix::sys::signal::{self, Signal};
|
||||||
|
|
||||||
|
use tokio::io::{AsyncRead, AsyncSeek, AsyncReadExt, AsyncSeekExt};
|
||||||
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
|
use futures::channel::mpsc::{Sender, Receiver};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::tools::time;
|
||||||
|
use proxmox_fuse::{*, requests::FuseRequest};
|
||||||
|
use super::loopdev;
|
||||||
|
use super::fs;
|
||||||
|
|
||||||
|
const RUN_DIR: &'static str = "/run/pbs-loopdev";
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub LOOPDEV_REGEX = r"^loop\d+$";
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents an ongoing FUSE-session that has been mapped onto a loop device.
|
||||||
|
/// Create with map_loop, then call 'main' and poll until startup_chan reports
|
||||||
|
/// success. Then, daemonize or otherwise finish setup, and continue polling
|
||||||
|
/// main's future until completion.
|
||||||
|
pub struct FuseLoopSession<R: AsyncRead + AsyncSeek + Unpin> {
|
||||||
|
session: Option<Fuse>,
|
||||||
|
stat: libc::stat,
|
||||||
|
reader: R,
|
||||||
|
fuse_path: String,
|
||||||
|
pid_path: String,
|
||||||
|
pub loopdev_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
||||||
|
|
||||||
|
/// Prepare for mapping the given reader as a block device node at
|
||||||
|
/// /dev/loopN. Creates a temporary file for FUSE and a PID file for unmap.
|
||||||
|
pub async fn map_loop<P: AsRef<str>>(size: u64, mut reader: R, name: P, options: &OsStr)
|
||||||
|
-> Result<Self, Error>
|
||||||
|
{
|
||||||
|
// attempt a single read to check if the reader is configured correctly
|
||||||
|
let _ = reader.read_u8().await?;
|
||||||
|
|
||||||
|
std::fs::create_dir_all(RUN_DIR)?;
|
||||||
|
let mut path = PathBuf::from(RUN_DIR);
|
||||||
|
path.push(name.as_ref());
|
||||||
|
let mut pid_path = path.clone();
|
||||||
|
pid_path.set_extension("pid");
|
||||||
|
|
||||||
|
// cleanup previous instance with same name
|
||||||
|
// if loopdev is actually still mapped, this will do nothing and the
|
||||||
|
// create_new below will fail as intended
|
||||||
|
cleanup_unused_run_files(Some(name.as_ref().to_owned()));
|
||||||
|
|
||||||
|
match OpenOptions::new().write(true).create_new(true).open(&path) {
|
||||||
|
Ok(_) => { /* file created, continue on */ },
|
||||||
|
Err(e) => {
|
||||||
|
if e.kind() == std::io::ErrorKind::AlreadyExists {
|
||||||
|
bail!("the given archive is already mapped, cannot map twice");
|
||||||
|
} else {
|
||||||
|
bail!("error while creating backing file ({:?}) - {}", &path, e);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
let session = Fuse::builder("pbs-block-dev")?
|
||||||
|
.options_os(options)?
|
||||||
|
.enable_read()
|
||||||
|
.build()?
|
||||||
|
.mount(&path)?;
|
||||||
|
|
||||||
|
let loopdev_path = loopdev::get_or_create_free_dev().map_err(|err| {
|
||||||
|
format_err!("loop-control GET_FREE failed - {}", err)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// write pidfile so unmap can later send us a signal to exit
|
||||||
|
Self::write_pidfile(&pid_path)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
session: Some(session),
|
||||||
|
reader,
|
||||||
|
stat: minimal_stat(size as i64),
|
||||||
|
fuse_path: path.to_string_lossy().into_owned(),
|
||||||
|
pid_path: pid_path.to_string_lossy().into_owned(),
|
||||||
|
loopdev_path,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_pidfile(path: &Path) -> Result<(), Error> {
|
||||||
|
let pid = unsafe { libc::getpid() };
|
||||||
|
let mut file = File::create(path)?;
|
||||||
|
write!(file, "{}", pid)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs the FUSE request loop and assigns the loop device. Will send a
|
||||||
|
/// message on startup_chan once the loop device is assigned (or assignment
|
||||||
|
/// fails). Send a message on abort_chan to trigger cleanup and exit FUSE.
|
||||||
|
/// An error on loopdev assignment does *not* automatically close the FUSE
|
||||||
|
/// handle or do cleanup, trigger abort_chan manually in case startup fails.
|
||||||
|
pub async fn main(
|
||||||
|
&mut self,
|
||||||
|
mut startup_chan: Sender<Result<(), Error>>,
|
||||||
|
abort_chan: Receiver<()>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
if let None = self.session {
|
||||||
|
panic!("internal error: fuse_loop::main called before ::map_loop");
|
||||||
|
}
|
||||||
|
let mut session = self.session.take().unwrap().fuse();
|
||||||
|
let mut abort_chan = abort_chan.fuse();
|
||||||
|
|
||||||
|
let (loopdev_path, fuse_path) = (self.loopdev_path.clone(), self.fuse_path.clone());
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
|
if let Err(err) = loopdev::assign(loopdev_path, fuse_path) {
|
||||||
|
let _ = startup_chan.try_send(Err(format_err!("error while assigning loop device - {}", err)));
|
||||||
|
} else {
|
||||||
|
// device is assigned successfully, which means not only is the
|
||||||
|
// loopdev ready, but FUSE is also okay, since the assignment
|
||||||
|
// would have failed otherwise
|
||||||
|
let _ = startup_chan.try_send(Ok(()));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let (loopdev_path, fuse_path, pid_path) =
|
||||||
|
(self.loopdev_path.clone(), self.fuse_path.clone(), self.pid_path.clone());
|
||||||
|
let cleanup = |session: futures::stream::Fuse<Fuse>| {
|
||||||
|
// only warn for errors on cleanup, if these fail nothing is lost
|
||||||
|
if let Err(err) = loopdev::unassign(&loopdev_path) {
|
||||||
|
eprintln!(
|
||||||
|
"cleanup: warning: could not unassign file {} from loop device {} - {}",
|
||||||
|
&fuse_path,
|
||||||
|
&loopdev_path,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// force close FUSE handle before attempting to remove backing file
|
||||||
|
std::mem::drop(session);
|
||||||
|
|
||||||
|
if let Err(err) = remove_file(&fuse_path) {
|
||||||
|
eprintln!(
|
||||||
|
"cleanup: warning: could not remove temporary file {} - {}",
|
||||||
|
&fuse_path,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if let Err(err) = remove_file(&pid_path) {
|
||||||
|
eprintln!(
|
||||||
|
"cleanup: warning: could not remove PID file {} - {}",
|
||||||
|
&pid_path,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select!{
|
||||||
|
_ = abort_chan.next() => {
|
||||||
|
// aborted, do cleanup and exit
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
req = session.try_next() => {
|
||||||
|
let res = match req? {
|
||||||
|
Some(Request::Lookup(req)) => {
|
||||||
|
let stat = self.stat;
|
||||||
|
let entry = EntryParam::simple(stat.st_ino, stat);
|
||||||
|
req.reply(&entry)
|
||||||
|
},
|
||||||
|
Some(Request::Getattr(req)) => {
|
||||||
|
req.reply(&self.stat, std::f64::MAX)
|
||||||
|
},
|
||||||
|
Some(Request::Read(req)) => {
|
||||||
|
match self.reader.seek(SeekFrom::Start(req.offset)).await {
|
||||||
|
Ok(_) => {
|
||||||
|
let mut buf = vec![0u8; req.size];
|
||||||
|
match self.reader.read_exact(&mut buf).await {
|
||||||
|
Ok(_) => {
|
||||||
|
req.reply(&buf)
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
req.io_fail(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
req.io_fail(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Some(_) => {
|
||||||
|
// only FUSE requests necessary for loop-mapping are implemented
|
||||||
|
eprintln!("Unimplemented FUSE request type encountered");
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
// FUSE connection closed
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(err) = res {
|
||||||
|
// error during FUSE reply, cleanup and exit
|
||||||
|
cleanup(session);
|
||||||
|
bail!(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// non-error FUSE exit
|
||||||
|
cleanup(session);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clean up leftover files as well as FUSE instances without a loop device
|
||||||
|
/// connected. Best effort, never returns an error.
|
||||||
|
/// If filter_name is Some("..."), only this name will be cleaned up.
|
||||||
|
pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
||||||
|
if let Ok(maps) = find_all_mappings() {
|
||||||
|
for (name, loopdev) in maps {
|
||||||
|
if loopdev.is_none() &&
|
||||||
|
(filter_name.is_none() || &name == filter_name.as_ref().unwrap())
|
||||||
|
{
|
||||||
|
let mut path = PathBuf::from(RUN_DIR);
|
||||||
|
path.push(&name);
|
||||||
|
|
||||||
|
// clean leftover FUSE instances (e.g. user called 'losetup -d' or similar)
|
||||||
|
// does nothing if files are already stagnant (e.g. instance crashed etc...)
|
||||||
|
if let Ok(_) = unmap_from_backing(&path, None) {
|
||||||
|
// we have reaped some leftover instance, tell the user
|
||||||
|
eprintln!(
|
||||||
|
"Cleaned up dangling mapping '{}': no loop device assigned",
|
||||||
|
&name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove remnant files
|
||||||
|
// these we're not doing anything, so no need to inform the user
|
||||||
|
let _ = remove_file(&path);
|
||||||
|
path.set_extension("pid");
|
||||||
|
let _ = remove_file(&path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_backing_file(loopdev: &str) -> Result<String, Error> {
|
||||||
|
let num = loopdev.split_at(9).1.parse::<u8>().map_err(|err|
|
||||||
|
format_err!("malformed loopdev path, does not end with valid number - {}", err))?;
|
||||||
|
|
||||||
|
let block_path = PathBuf::from(format!("/sys/devices/virtual/block/loop{}/loop/backing_file", num));
|
||||||
|
let backing_file = read_to_string(block_path).map_err(|err| {
|
||||||
|
if err.kind() == std::io::ErrorKind::NotFound {
|
||||||
|
format_err!("nothing mapped to {}", loopdev)
|
||||||
|
} else {
|
||||||
|
format_err!("error reading backing file - {}", err)
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let backing_file = backing_file.trim();
|
||||||
|
|
||||||
|
if !backing_file.starts_with(RUN_DIR) {
|
||||||
|
bail!(
|
||||||
|
"loopdev {} is in use, but not by proxmox-backup-client (mapped to '{}')",
|
||||||
|
loopdev,
|
||||||
|
backing_file,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(backing_file.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
// call in broken state: we found the mapping, but the client is already dead,
|
||||||
|
// only thing to do is clean up what we can
|
||||||
|
fn emerg_cleanup (loopdev: Option<&str>, mut backing_file: PathBuf) {
|
||||||
|
eprintln!(
|
||||||
|
"warning: found mapping with dead process ({:?}), attempting cleanup",
|
||||||
|
&backing_file
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(loopdev) = loopdev {
|
||||||
|
let _ = loopdev::unassign(loopdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
// killing the backing process does not cancel the FUSE mount automatically
|
||||||
|
let mut command = std::process::Command::new("fusermount");
|
||||||
|
command.arg("-u");
|
||||||
|
command.arg(&backing_file);
|
||||||
|
let _ = crate::tools::run_command(command, None);
|
||||||
|
|
||||||
|
let _ = remove_file(&backing_file);
|
||||||
|
backing_file.set_extension("pid");
|
||||||
|
let _ = remove_file(&backing_file);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(), Error> {
|
||||||
|
let mut pid_path = PathBuf::from(backing_file);
|
||||||
|
pid_path.set_extension("pid");
|
||||||
|
|
||||||
|
let pid_str = read_to_string(&pid_path).map_err(|err| {
|
||||||
|
if err.kind() == std::io::ErrorKind::NotFound {
|
||||||
|
emerg_cleanup(loopdev, backing_file.to_owned());
|
||||||
|
}
|
||||||
|
format_err!("error reading pidfile {:?}: {}", &pid_path, err)
|
||||||
|
})?;
|
||||||
|
let pid = pid_str.parse::<i32>().map_err(|err|
|
||||||
|
format_err!("malformed PID ({}) in pidfile - {}", pid_str, err))?;
|
||||||
|
|
||||||
|
let pid = Pid::from_raw(pid);
|
||||||
|
|
||||||
|
// send SIGINT to trigger cleanup and exit in target process
|
||||||
|
match signal::kill(pid, Signal::SIGINT) {
|
||||||
|
Ok(()) => {},
|
||||||
|
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||||
|
emerg_cleanup(loopdev, backing_file.to_owned());
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
}
|
||||||
|
|
||||||
|
// block until unmap is complete or timeout
|
||||||
|
let start = time::epoch_i64();
|
||||||
|
loop {
|
||||||
|
match signal::kill(pid, None) {
|
||||||
|
Ok(_) => {
|
||||||
|
// 10 second timeout, then assume failure
|
||||||
|
if (time::epoch_i64() - start) > 10 {
|
||||||
|
return Err(format_err!("timed out waiting for PID '{}' to exit", &pid));
|
||||||
|
}
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
|
},
|
||||||
|
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an Iterator over a set of currently active mappings, i.e.
|
||||||
|
/// FuseLoopSession instances. Returns ("backing-file-name", Some("/dev/loopX"))
|
||||||
|
/// where .1 is None when a user has manually called 'losetup -d' or similar but
|
||||||
|
/// the FUSE instance is still running.
|
||||||
|
pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String>)>, Error> {
|
||||||
|
// get map of all /dev/loop mappings belonging to us
|
||||||
|
let mut loopmap = HashMap::new();
|
||||||
|
for ent in fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
|
||||||
|
match ent {
|
||||||
|
Ok(ent) => {
|
||||||
|
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
|
||||||
|
match get_backing_file(&loopdev) {
|
||||||
|
Ok(file) => {
|
||||||
|
// insert filename only, strip RUN_DIR/
|
||||||
|
loopmap.insert(file[RUN_DIR.len()+1..].to_owned(), loopdev);
|
||||||
|
},
|
||||||
|
Err(_) => {},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(_) => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?
|
||||||
|
.filter_map(move |ent| {
|
||||||
|
match ent {
|
||||||
|
Ok(ent) => {
|
||||||
|
let file = ent.file_name().to_string_lossy();
|
||||||
|
if file == "." || file == ".." || file.ends_with(".pid") {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let loopdev = loopmap.get(file.as_ref()).map(String::to_owned);
|
||||||
|
Some((file.into_owned(), loopdev))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try and unmap a running proxmox-backup-client instance from the given
|
||||||
|
/// /dev/loopN device
|
||||||
|
pub fn unmap_loopdev<S: AsRef<str>>(loopdev: S) -> Result<(), Error> {
|
||||||
|
let loopdev = loopdev.as_ref();
|
||||||
|
if loopdev.len() < 10 || !loopdev.starts_with("/dev/loop") {
|
||||||
|
bail!("malformed loopdev path, must be in format '/dev/loopX'");
|
||||||
|
}
|
||||||
|
|
||||||
|
let backing_file = get_backing_file(loopdev)?;
|
||||||
|
unmap_from_backing(Path::new(&backing_file), Some(loopdev))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try and unmap a running proxmox-backup-client instance from the given name
|
||||||
|
pub fn unmap_name<S: AsRef<str>>(name: S) -> Result<(), Error> {
|
||||||
|
for (mapping, loopdev) in find_all_mappings()? {
|
||||||
|
if mapping.ends_with(name.as_ref()) {
|
||||||
|
let mut path = PathBuf::from(RUN_DIR);
|
||||||
|
path.push(&mapping);
|
||||||
|
return unmap_from_backing(&path, loopdev.as_deref());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(format_err!("no mapping for name '{}' found", name.as_ref()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn minimal_stat(size: i64) -> libc::stat {
|
||||||
|
let mut stat: libc::stat = unsafe { std::mem::zeroed() };
|
||||||
|
stat.st_mode = libc::S_IFREG;
|
||||||
|
stat.st_ino = 1;
|
||||||
|
stat.st_nlink = 1;
|
||||||
|
stat.st_size = size;
|
||||||
|
stat
|
||||||
|
}
|
184
src/tools/logrotate.rs
Normal file
184
src/tools/logrotate.rs
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::fs::{File, rename};
|
||||||
|
use std::os::unix::io::FromRawFd;
|
||||||
|
use std::io::Read;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use nix::unistd;
|
||||||
|
|
||||||
|
use proxmox::tools::fs::{CreateOptions, make_tmp_file, replace_file};
|
||||||
|
|
||||||
|
/// Used for rotating log files and iterating over them
|
||||||
|
pub struct LogRotate {
|
||||||
|
base_path: PathBuf,
|
||||||
|
compress: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogRotate {
|
||||||
|
/// Creates a new instance if the path given is a valid file name
|
||||||
|
/// (iow. does not end with ..)
|
||||||
|
/// 'compress' decides if compresses files will be created on
|
||||||
|
/// rotation, and if it will search '.zst' files when iterating
|
||||||
|
pub fn new<P: AsRef<Path>>(path: P, compress: bool) -> Option<Self> {
|
||||||
|
if path.as_ref().file_name().is_some() {
|
||||||
|
Some(Self {
|
||||||
|
base_path: path.as_ref().to_path_buf(),
|
||||||
|
compress,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over the logrotated file names that exist
|
||||||
|
pub fn file_names(&self) -> LogRotateFileNames {
|
||||||
|
LogRotateFileNames {
|
||||||
|
base_path: self.base_path.clone(),
|
||||||
|
count: 0,
|
||||||
|
compress: self.compress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over the logrotated file handles
|
||||||
|
pub fn files(&self) -> LogRotateFiles {
|
||||||
|
LogRotateFiles {
|
||||||
|
file_names: self.file_names(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Rotates the files up to 'max_files'
|
||||||
|
/// if the 'compress' option was given it will compress the newest file
|
||||||
|
///
|
||||||
|
/// e.g. rotates
|
||||||
|
/// foo.2.zst => foo.3.zst
|
||||||
|
/// foo.1.zst => foo.2.zst
|
||||||
|
/// foo => foo.1.zst
|
||||||
|
/// => foo
|
||||||
|
pub fn rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
||||||
|
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
||||||
|
if filenames.is_empty() {
|
||||||
|
return Ok(()); // no file means nothing to rotate
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
||||||
|
|
||||||
|
if self.compress {
|
||||||
|
next_filename.push(format!(".{}.zst", filenames.len()));
|
||||||
|
} else {
|
||||||
|
next_filename.push(format!(".{}", filenames.len()));
|
||||||
|
}
|
||||||
|
|
||||||
|
filenames.push(PathBuf::from(next_filename));
|
||||||
|
let count = filenames.len();
|
||||||
|
|
||||||
|
// rotate all but the first, that we maybe have to compress
|
||||||
|
for i in (1..count-1).rev() {
|
||||||
|
rename(&filenames[i], &filenames[i+1])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.compress {
|
||||||
|
let mut source = File::open(&filenames[0])?;
|
||||||
|
let (fd, tmp_path) = make_tmp_file(&filenames[1], options.clone())?;
|
||||||
|
let target = unsafe { File::from_raw_fd(fd) };
|
||||||
|
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
||||||
|
Ok(encoder) => encoder,
|
||||||
|
Err(err) => {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("creating zstd encoder failed - {}", err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("zstd encoding failed for file {:?} - {}", &filenames[1], err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = encoder.finish() {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("zstd finish failed for file {:?} - {}", &filenames[1], err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = rename(&tmp_path, &filenames[1]) {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("rename failed for file {:?} - {}", &filenames[1], err);
|
||||||
|
}
|
||||||
|
|
||||||
|
unistd::unlink(&filenames[0])?;
|
||||||
|
} else {
|
||||||
|
rename(&filenames[0], &filenames[1])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create empty original file
|
||||||
|
replace_file(&filenames[0], b"", options)?;
|
||||||
|
|
||||||
|
if let Some(max_files) = max_files {
|
||||||
|
// delete all files > max_files
|
||||||
|
for file in filenames.iter().skip(max_files) {
|
||||||
|
if let Err(err) = unistd::unlink(file) {
|
||||||
|
eprintln!("could not remove {:?}: {}", &file, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterator over logrotated file names
|
||||||
|
pub struct LogRotateFileNames {
|
||||||
|
base_path: PathBuf,
|
||||||
|
count: usize,
|
||||||
|
compress: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for LogRotateFileNames {
|
||||||
|
type Item = PathBuf;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
if self.count > 0 {
|
||||||
|
let mut path: std::ffi::OsString = self.base_path.clone().into();
|
||||||
|
|
||||||
|
path.push(format!(".{}", self.count));
|
||||||
|
self.count += 1;
|
||||||
|
|
||||||
|
if Path::new(&path).is_file() {
|
||||||
|
Some(path.into())
|
||||||
|
} else if self.compress {
|
||||||
|
path.push(".zst");
|
||||||
|
if Path::new(&path).is_file() {
|
||||||
|
Some(path.into())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else if self.base_path.is_file() {
|
||||||
|
self.count += 1;
|
||||||
|
Some(self.base_path.to_path_buf())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterator over logrotated files by returning a boxed reader
|
||||||
|
pub struct LogRotateFiles {
|
||||||
|
file_names: LogRotateFileNames,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for LogRotateFiles {
|
||||||
|
type Item = Box<dyn Read + Send>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
let filename = self.file_names.next()?;
|
||||||
|
let file = File::open(&filename).ok()?;
|
||||||
|
|
||||||
|
if filename.extension().unwrap_or(std::ffi::OsStr::new("")) == "zst" {
|
||||||
|
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
|
||||||
|
return Some(Box::new(encoder));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Box::new(file))
|
||||||
|
}
|
||||||
|
}
|
95
src/tools/loopdev.rs
Normal file
95
src/tools/loopdev.rs
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
//! Helpers to work with /dev/loop* devices
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use std::fs::{File, OpenOptions};
|
||||||
|
use std::path::Path;
|
||||||
|
use std::os::unix::io::{RawFd, AsRawFd};
|
||||||
|
|
||||||
|
const LOOP_CONTROL: &str = "/dev/loop-control";
|
||||||
|
const LOOP_NAME: &str = "/dev/loop";
|
||||||
|
|
||||||
|
/// Implements a subset of loop device ioctls necessary to assign and release
|
||||||
|
/// a single file from a free loopdev.
|
||||||
|
mod loop_ioctl {
|
||||||
|
use nix::{ioctl_none, ioctl_write_int_bad, ioctl_write_ptr_bad};
|
||||||
|
|
||||||
|
const LOOP_IOCTL: u16 = 0x4C; // 'L'
|
||||||
|
const LOOP_SET_FD: u16 = 0x00;
|
||||||
|
const LOOP_CLR_FD: u16 = 0x01;
|
||||||
|
const LOOP_SET_STATUS64: u16 = 0x04;
|
||||||
|
|
||||||
|
const LOOP_CTRL_GET_FREE: u16 = 0x82;
|
||||||
|
|
||||||
|
ioctl_write_int_bad!(ioctl_set_fd, (LOOP_IOCTL << 8) | LOOP_SET_FD);
|
||||||
|
ioctl_none!(ioctl_clr_fd, LOOP_IOCTL, LOOP_CLR_FD);
|
||||||
|
ioctl_none!(ioctl_ctrl_get_free, LOOP_IOCTL, LOOP_CTRL_GET_FREE);
|
||||||
|
ioctl_write_ptr_bad!(ioctl_set_status64, (LOOP_IOCTL << 8) | LOOP_SET_STATUS64, LoopInfo64);
|
||||||
|
|
||||||
|
pub const LO_FLAGS_READ_ONLY: u32 = 1;
|
||||||
|
pub const LO_FLAGS_PARTSCAN: u32 = 8;
|
||||||
|
|
||||||
|
const LO_NAME_SIZE: usize = 64;
|
||||||
|
const LO_KEY_SIZE: usize = 32;
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct LoopInfo64 {
|
||||||
|
pub lo_device: u64,
|
||||||
|
pub lo_inode: u64,
|
||||||
|
pub lo_rdevice: u64,
|
||||||
|
pub lo_offset: u64,
|
||||||
|
pub lo_sizelimit: u64,
|
||||||
|
pub lo_number: u32,
|
||||||
|
pub lo_encrypt_type: u32,
|
||||||
|
pub lo_encrypt_key_size: u32,
|
||||||
|
pub lo_flags: u32,
|
||||||
|
pub lo_file_name: [u8; LO_NAME_SIZE],
|
||||||
|
pub lo_crypt_name: [u8; LO_NAME_SIZE],
|
||||||
|
pub lo_encrypt_key: [u8; LO_KEY_SIZE],
|
||||||
|
pub lo_init: [u64; 2],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ioctl helpers create public fns, do not export them outside the module
|
||||||
|
// users should use the wrapper functions below
|
||||||
|
use loop_ioctl::*;
|
||||||
|
|
||||||
|
/// Use the GET_FREE ioctl to get or add a free loop device, of which the
|
||||||
|
/// /dev/loopN path will be returned. This is inherently racy because of the
|
||||||
|
/// delay between this and calling assign, but since assigning is atomic it
|
||||||
|
/// does not matter much and will simply cause assign to fail.
|
||||||
|
pub fn get_or_create_free_dev() -> Result<String, Error> {
|
||||||
|
let ctrl_file = File::open(LOOP_CONTROL)?;
|
||||||
|
let free_num = unsafe { ioctl_ctrl_get_free(ctrl_file.as_raw_fd())? };
|
||||||
|
let loop_file_path = format!("{}{}", LOOP_NAME, free_num);
|
||||||
|
Ok(loop_file_path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assign_dev(fd: RawFd, backing_fd: RawFd) -> Result<(), Error> {
|
||||||
|
unsafe { ioctl_set_fd(fd, backing_fd)?; }
|
||||||
|
|
||||||
|
// set required read-only flag and partscan for convenience
|
||||||
|
let mut info: LoopInfo64 = unsafe { std::mem::zeroed() };
|
||||||
|
info.lo_flags = LO_FLAGS_READ_ONLY | LO_FLAGS_PARTSCAN;
|
||||||
|
unsafe { ioctl_set_status64(fd, &info)?; }
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open the next available /dev/loopN file and assign the given path to
|
||||||
|
/// it as it's backing file in read-only mode.
|
||||||
|
pub fn assign<P: AsRef<Path>>(loop_dev: P, backing: P) -> Result<(), Error> {
|
||||||
|
let loop_file = File::open(loop_dev)?;
|
||||||
|
let backing_file = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(backing)?;
|
||||||
|
assign_dev(loop_file.as_raw_fd(), backing_file.as_raw_fd())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unassign any file descriptors currently attached to the given
|
||||||
|
/// /dev/loopN device.
|
||||||
|
pub fn unassign<P: AsRef<Path>>(path: P) -> Result<(), Error> {
|
||||||
|
let loop_file = File::open(path)?;
|
||||||
|
unsafe { ioctl_clr_fd(loop_file.as_raw_fd())?; }
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,14 +1,35 @@
|
|||||||
use std::thread::{JoinHandle};
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use crossbeam_channel::{bounded, Sender};
|
use std::thread::JoinHandle;
|
||||||
use anyhow::{format_err, Error};
|
|
||||||
|
|
||||||
/// A handle to send data toö the worker thread (implements clone)
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use crossbeam_channel::{bounded, Sender};
|
||||||
|
|
||||||
|
/// A handle to send data to the worker thread (implements clone)
|
||||||
pub struct SendHandle<I> {
|
pub struct SendHandle<I> {
|
||||||
input: Sender<I>,
|
input: Sender<I>,
|
||||||
abort: Arc<Mutex<Option<String>>>,
|
abort: Arc<Mutex<Option<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the first error happened, if any
|
||||||
|
pub fn check_abort(abort: &Mutex<Option<String>>) -> Result<(), Error> {
|
||||||
|
let guard = abort.lock().unwrap();
|
||||||
|
if let Some(err_msg) = &*guard {
|
||||||
|
return Err(format_err!("{}", err_msg));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I: Send> SendHandle<I> {
|
||||||
|
/// Send data to the worker threads
|
||||||
|
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||||
|
check_abort(&self.abort)?;
|
||||||
|
match self.input.send(input) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
Err(_) => bail!("send failed - channel closed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A thread pool which run the supplied closure
|
/// A thread pool which run the supplied closure
|
||||||
///
|
///
|
||||||
/// The send command sends data to the worker threads. If one handler
|
/// The send command sends data to the worker threads. If one handler
|
||||||
@ -20,44 +41,23 @@ pub struct SendHandle<I> {
|
|||||||
pub struct ParallelHandler<I> {
|
pub struct ParallelHandler<I> {
|
||||||
handles: Vec<JoinHandle<()>>,
|
handles: Vec<JoinHandle<()>>,
|
||||||
name: String,
|
name: String,
|
||||||
input: SendHandle<I>,
|
input: Option<SendHandle<I>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl <I: Send + Sync +'static> SendHandle<I> {
|
impl<I> Clone for SendHandle<I> {
|
||||||
|
|
||||||
/// Returns the first error happened, if any
|
|
||||||
pub fn check_abort(&self) -> Result<(), Error> {
|
|
||||||
let guard = self.abort.lock().unwrap();
|
|
||||||
if let Some(err_msg) = &*guard {
|
|
||||||
return Err(format_err!("{}", err_msg));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send data to the worker threads
|
|
||||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
|
||||||
self.check_abort()?;
|
|
||||||
self.input.send(input)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl <I> Clone for SendHandle<I> {
|
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self { input: self.input.clone(), abort: self.abort.clone() }
|
Self {
|
||||||
|
input: self.input.clone(),
|
||||||
|
abort: Arc::clone(&self.abort),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl <I: Send + Sync + 'static> ParallelHandler<I> {
|
impl<I: Send + 'static> ParallelHandler<I> {
|
||||||
|
|
||||||
/// Create a new thread pool, each thread processing incoming data
|
/// Create a new thread pool, each thread processing incoming data
|
||||||
/// with 'handler_fn'.
|
/// with 'handler_fn'.
|
||||||
pub fn new<F>(
|
pub fn new<F>(name: &str, threads: usize, handler_fn: F) -> Self
|
||||||
name: &str,
|
where F: Fn(I) -> Result<(), Error> + Send + Clone + 'static,
|
||||||
threads: usize,
|
|
||||||
handler_fn: F,
|
|
||||||
) -> Self
|
|
||||||
where F: Fn(I) -> Result<(), Error> + Send + Sync + Clone + 'static,
|
|
||||||
{
|
{
|
||||||
let mut handles = Vec::new();
|
let mut handles = Vec::new();
|
||||||
let (input_tx, input_rx) = bounded::<I>(threads);
|
let (input_tx, input_rx) = bounded::<I>(threads);
|
||||||
@ -66,24 +66,23 @@ impl <I: Send + Sync + 'static> ParallelHandler<I> {
|
|||||||
|
|
||||||
for i in 0..threads {
|
for i in 0..threads {
|
||||||
let input_rx = input_rx.clone();
|
let input_rx = input_rx.clone();
|
||||||
let abort = abort.clone();
|
let abort = Arc::clone(&abort);
|
||||||
let handler_fn = handler_fn.clone();
|
let handler_fn = handler_fn.clone();
|
||||||
|
|
||||||
handles.push(
|
handles.push(
|
||||||
std::thread::Builder::new()
|
std::thread::Builder::new()
|
||||||
.name(format!("{} ({})", name, i))
|
.name(format!("{} ({})", name, i))
|
||||||
.spawn(move || {
|
.spawn(move || loop {
|
||||||
loop {
|
let data = match input_rx.recv() {
|
||||||
let data = match input_rx.recv() {
|
Ok(data) => data,
|
||||||
Ok(data) => data,
|
Err(_) => return,
|
||||||
Err(_) => return,
|
};
|
||||||
};
|
match (handler_fn)(data) {
|
||||||
match (handler_fn)(data) {
|
Ok(()) => (),
|
||||||
Ok(()) => {},
|
Err(err) => {
|
||||||
Err(err) => {
|
let mut guard = abort.lock().unwrap();
|
||||||
let mut guard = abort.lock().unwrap();
|
if guard.is_none() {
|
||||||
if guard.is_none() {
|
*guard = Some(err.to_string());
|
||||||
*guard = Some(err.to_string());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,40 +93,74 @@ impl <I: Send + Sync + 'static> ParallelHandler<I> {
|
|||||||
Self {
|
Self {
|
||||||
handles,
|
handles,
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
input: SendHandle {
|
input: Some(SendHandle {
|
||||||
input: input_tx,
|
input: input_tx,
|
||||||
abort,
|
abort,
|
||||||
},
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a cloneable channel to send data to the worker threads
|
/// Returns a cloneable channel to send data to the worker threads
|
||||||
pub fn channel(&self) -> SendHandle<I> {
|
pub fn channel(&self) -> SendHandle<I> {
|
||||||
self.input.clone()
|
self.input.as_ref().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send data to the worker threads
|
/// Send data to the worker threads
|
||||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||||
self.input.send(input)?;
|
self.input.as_ref().unwrap().send(input)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for worker threads to complete and check for errors
|
/// Wait for worker threads to complete and check for errors
|
||||||
pub fn complete(self) -> Result<(), Error> {
|
pub fn complete(mut self) -> Result<(), Error> {
|
||||||
self.input.check_abort()?;
|
let input = self.input.take().unwrap();
|
||||||
drop(self.input);
|
let abort = Arc::clone(&input.abort);
|
||||||
let mut msg = Vec::new();
|
check_abort(&abort)?;
|
||||||
for (i, handle) in self.handles.into_iter().enumerate() {
|
drop(input);
|
||||||
if let Err(panic) = handle.join() {
|
|
||||||
match panic.downcast::<&str>() {
|
let msg_list = self.join_threads();
|
||||||
Ok(panic_msg) => msg.push(format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)),
|
|
||||||
Err(_) => msg.push(format!("thread {} ({}) paniced", self.name, i)),
|
// an error might be encountered while waiting for the join
|
||||||
}
|
check_abort(&abort)?;
|
||||||
}
|
|
||||||
}
|
if msg_list.is_empty() {
|
||||||
if msg.is_empty() {
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Err(format_err!("{}", msg.join("\n")))
|
Err(format_err!("{}", msg_list.join("\n")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn join_threads(&mut self) -> Vec<String> {
|
||||||
|
|
||||||
|
let mut msg_list = Vec::new();
|
||||||
|
|
||||||
|
let mut i = 0;
|
||||||
|
loop {
|
||||||
|
let handle = match self.handles.pop() {
|
||||||
|
Some(handle) => handle,
|
||||||
|
None => break,
|
||||||
|
};
|
||||||
|
if let Err(panic) = handle.join() {
|
||||||
|
match panic.downcast::<&str>() {
|
||||||
|
Ok(panic_msg) => msg_list.push(
|
||||||
|
format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)
|
||||||
|
),
|
||||||
|
Err(_) => msg_list.push(
|
||||||
|
format!("thread {} ({}) paniced", self.name, i)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
msg_list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: We make sure that all threads will be joined
|
||||||
|
impl<I> Drop for ParallelHandler<I> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
drop(self.input.take());
|
||||||
|
while let Some(handle) = self.handles.pop() {
|
||||||
|
let _ = handle.join();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
/*global Proxmox*/
|
|
||||||
Ext.define('PBS.Application', {
|
Ext.define('PBS.Application', {
|
||||||
extend: 'Ext.app.Application',
|
extend: 'Ext.app.Application',
|
||||||
|
|
||||||
@ -6,7 +5,7 @@ Ext.define('PBS.Application', {
|
|||||||
appProperty: 'app',
|
appProperty: 'app',
|
||||||
|
|
||||||
stores: [
|
stores: [
|
||||||
'NavigationStore'
|
'NavigationStore',
|
||||||
],
|
],
|
||||||
|
|
||||||
layout: 'fit',
|
layout: 'fit',
|
||||||
@ -29,7 +28,7 @@ Ext.define('PBS.Application', {
|
|||||||
PBS.view = view;
|
PBS.view = view;
|
||||||
me.view = view;
|
me.view = view;
|
||||||
|
|
||||||
if (me.currentView != undefined) {
|
if (me.currentView !== undefined) {
|
||||||
me.currentView.destroy();
|
me.currentView.destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,7 +57,7 @@ Ext.define('PBS.Application', {
|
|||||||
} else {
|
} else {
|
||||||
me.changeView('mainview', true);
|
me.changeView('mainview', true);
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.application('PBS.Application');
|
Ext.application('PBS.Application');
|
||||||
|
138
www/Dashboard.js
138
www/Dashboard.js
@ -13,7 +13,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
width: 300,
|
width: 300,
|
||||||
title: gettext('Dashboard Options'),
|
title: gettext('Dashboard Options'),
|
||||||
layout: {
|
layout: {
|
||||||
type: 'auto'
|
type: 'auto',
|
||||||
},
|
},
|
||||||
items: [{
|
items: [{
|
||||||
xtype: 'form',
|
xtype: 'form',
|
||||||
@ -21,39 +21,41 @@ Ext.define('PBS.Dashboard', {
|
|||||||
defaultButton: 'savebutton',
|
defaultButton: 'savebutton',
|
||||||
items: [{
|
items: [{
|
||||||
xtype: 'proxmoxintegerfield',
|
xtype: 'proxmoxintegerfield',
|
||||||
itemId: 'hours',
|
itemId: 'days',
|
||||||
labelWidth: 100,
|
labelWidth: 100,
|
||||||
anchor: '100%',
|
anchor: '100%',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
minValue: 1,
|
minValue: 1,
|
||||||
maxValue: 24,
|
maxValue: 60,
|
||||||
value: viewModel.get('hours'),
|
value: viewModel.get('days'),
|
||||||
fieldLabel: gettext('Hours to show')
|
fieldLabel: gettext('Days to show'),
|
||||||
}],
|
}],
|
||||||
buttons: [{
|
buttons: [{
|
||||||
text: gettext('Save'),
|
text: gettext('Save'),
|
||||||
reference: 'loginButton',
|
reference: 'savebutton',
|
||||||
formBind: true,
|
formBind: true,
|
||||||
handler: function() {
|
handler: function() {
|
||||||
var win = this.up('window');
|
var win = this.up('window');
|
||||||
var hours = win.down('#hours').getValue();
|
var days = win.down('#days').getValue();
|
||||||
me.setHours(hours, true);
|
me.setDays(days, true);
|
||||||
win.close();
|
win.close();
|
||||||
}
|
},
|
||||||
}]
|
}],
|
||||||
}]
|
}],
|
||||||
}).show();
|
}).show();
|
||||||
},
|
},
|
||||||
|
|
||||||
setHours: function(hours, setState) {
|
setDays: function(days, setState) {
|
||||||
var me = this;
|
var me = this;
|
||||||
var viewModel = me.getViewModel();
|
var viewModel = me.getViewModel();
|
||||||
viewModel.set('hours', hours);
|
viewModel.set('days', days);
|
||||||
viewModel.notify();
|
viewModel.notify();
|
||||||
|
|
||||||
|
viewModel.getStore('tasks').reload();
|
||||||
|
|
||||||
if (setState) {
|
if (setState) {
|
||||||
var sp = Ext.state.Manager.getProvider();
|
var sp = Ext.state.Manager.getProvider();
|
||||||
sp.set('dashboard-hours', hours);
|
sp.set('dashboard-days', days);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -119,7 +121,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
el.select();
|
el.select();
|
||||||
document.execCommand("copy");
|
document.execCommand("copy");
|
||||||
},
|
},
|
||||||
text: gettext('Copy')
|
text: gettext('Copy'),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Ok'),
|
text: gettext('Ok'),
|
||||||
@ -134,54 +136,57 @@ Ext.define('PBS.Dashboard', {
|
|||||||
updateTasks: function(store, records, success) {
|
updateTasks: function(store, records, success) {
|
||||||
if (!success) return;
|
if (!success) return;
|
||||||
let me = this;
|
let me = this;
|
||||||
|
let viewModel = me.getViewModel();
|
||||||
|
|
||||||
records.sort((a, b) => a.data.duration - b.data.duration);
|
records.sort((a, b) => a.data.duration - b.data.duration);
|
||||||
let top10 = records.slice(-10);
|
let top10 = records.slice(-10);
|
||||||
me.lookup('longesttasks').updateTasks(top10);
|
me.lookup('longesttasks').updateTasks(top10);
|
||||||
|
|
||||||
let data = {
|
let data = {
|
||||||
backup: { error: 0, warning: 0, ok: 0, },
|
backup: { error: 0, warning: 0, ok: 0 },
|
||||||
prune: { error: 0, warning: 0, ok: 0, },
|
prune: { error: 0, warning: 0, ok: 0 },
|
||||||
garbage_collection: { error: 0, warning: 0, ok: 0, },
|
garbage_collection: { error: 0, warning: 0, ok: 0 },
|
||||||
sync: { error: 0, warning: 0, ok: 0, },
|
sync: { error: 0, warning: 0, ok: 0 },
|
||||||
|
verify: { error: 0, warning: 0, ok: 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
records.forEach(record => {
|
records.forEach(record => {
|
||||||
let type = record.data.worker_type;
|
let task = record.data;
|
||||||
|
let type = task.worker_type;
|
||||||
if (type === 'syncjob') {
|
if (type === 'syncjob') {
|
||||||
type = 'sync';
|
type = 'sync';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data[type] && record.data.status) {
|
if (type.startsWith('verify')) {
|
||||||
let parsed = Proxmox.Utils.parse_task_status(record.data.status);
|
type = 'verify';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data[type] && task.status) {
|
||||||
|
let parsed = Proxmox.Utils.parse_task_status(task.status);
|
||||||
data[type][parsed]++;
|
data[type][parsed]++;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
me.lookup('tasksummary').updateTasks(data);
|
me.lookup('tasksummary').updateTasks(data, viewModel.get('sinceEpoch'));
|
||||||
},
|
},
|
||||||
|
|
||||||
init: function(view) {
|
init: function(view) {
|
||||||
var me = this;
|
var me = this;
|
||||||
var sp = Ext.state.Manager.getProvider();
|
var sp = Ext.state.Manager.getProvider();
|
||||||
var hours = sp.get('dashboard-hours') || 12;
|
var days = sp.get('dashboard-days') || 30;
|
||||||
me.setHours(hours, false);
|
me.setDays(days, false);
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
viewModel: {
|
viewModel: {
|
||||||
data: {
|
data: {
|
||||||
timespan: 300, // in seconds
|
|
||||||
hours: 12, // in hours
|
|
||||||
error_shown: false,
|
|
||||||
fingerprint: "",
|
fingerprint: "",
|
||||||
'bytes_in': 0,
|
days: 30,
|
||||||
'bytes_out': 0,
|
|
||||||
'avg_ptime': 0.0
|
|
||||||
},
|
},
|
||||||
|
|
||||||
formulas: {
|
formulas: {
|
||||||
disableFPButton: (get) => get('fingerprint') === "",
|
disableFPButton: (get) => get('fingerprint') === "",
|
||||||
|
sinceEpoch: (get) => (Date.now()/1000 - get('days') * 24*3600).toFixed(0),
|
||||||
},
|
},
|
||||||
|
|
||||||
stores: {
|
stores: {
|
||||||
@ -194,11 +199,11 @@ Ext.define('PBS.Dashboard', {
|
|||||||
autoDestroy: true,
|
autoDestroy: true,
|
||||||
proxy: {
|
proxy: {
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
url: '/api2/json/nodes/localhost/status'
|
url: '/api2/json/nodes/localhost/status',
|
||||||
},
|
},
|
||||||
listeners: {
|
listeners: {
|
||||||
load: 'updateUsageStats'
|
load: 'updateUsageStats',
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
subscription: {
|
subscription: {
|
||||||
storeid: 'dash-subscription',
|
storeid: 'dash-subscription',
|
||||||
@ -209,11 +214,11 @@ Ext.define('PBS.Dashboard', {
|
|||||||
autoDestroy: true,
|
autoDestroy: true,
|
||||||
proxy: {
|
proxy: {
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
url: '/api2/json/nodes/localhost/subscription'
|
url: '/api2/json/nodes/localhost/subscription',
|
||||||
},
|
},
|
||||||
listeners: {
|
listeners: {
|
||||||
load: 'updateSubscription'
|
load: 'updateSubscription',
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
tasks: {
|
tasks: {
|
||||||
storeid: 'dash-tasks',
|
storeid: 'dash-tasks',
|
||||||
@ -225,19 +230,22 @@ Ext.define('PBS.Dashboard', {
|
|||||||
model: 'proxmox-tasks',
|
model: 'proxmox-tasks',
|
||||||
proxy: {
|
proxy: {
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
url: '/api2/json/status/tasks'
|
url: '/api2/json/status/tasks',
|
||||||
|
extraParams: {
|
||||||
|
since: '{sinceEpoch}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
listeners: {
|
listeners: {
|
||||||
load: 'updateTasks'
|
load: 'updateTasks',
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
title: gettext('Dashboard') + ' - WIP',
|
title: gettext('Dashboard'),
|
||||||
|
|
||||||
layout: {
|
layout: {
|
||||||
type: 'column'
|
type: 'column',
|
||||||
},
|
},
|
||||||
|
|
||||||
bodyPadding: '20 0 0 20',
|
bodyPadding: '20 0 0 20',
|
||||||
@ -245,9 +253,17 @@ Ext.define('PBS.Dashboard', {
|
|||||||
defaults: {
|
defaults: {
|
||||||
columnWidth: 0.49,
|
columnWidth: 0.49,
|
||||||
xtype: 'panel',
|
xtype: 'panel',
|
||||||
margin: '0 20 20 0'
|
margin: '0 20 20 0',
|
||||||
},
|
},
|
||||||
|
|
||||||
|
tools: [
|
||||||
|
{
|
||||||
|
type: 'gear',
|
||||||
|
tooltip: gettext('Edit dashboard settings'),
|
||||||
|
handler: 'openDashboardOptions',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
scrollable: true,
|
scrollable: true,
|
||||||
|
|
||||||
items: [
|
items: [
|
||||||
@ -268,27 +284,27 @@ Ext.define('PBS.Dashboard', {
|
|||||||
],
|
],
|
||||||
layout: {
|
layout: {
|
||||||
type: 'hbox',
|
type: 'hbox',
|
||||||
align: 'center'
|
align: 'center',
|
||||||
},
|
},
|
||||||
defaults: {
|
defaults: {
|
||||||
xtype: 'proxmoxGauge',
|
xtype: 'proxmoxGauge',
|
||||||
spriteFontSize: '20px',
|
spriteFontSize: '20px',
|
||||||
flex: 1
|
flex: 1,
|
||||||
},
|
},
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
title: gettext('CPU'),
|
title: gettext('CPU'),
|
||||||
reference: 'cpu'
|
reference: 'cpu',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: gettext('Memory'),
|
title: gettext('Memory'),
|
||||||
reference: 'mem'
|
reference: 'mem',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: gettext('Root Disk'),
|
title: gettext('Root Disk'),
|
||||||
reference: 'root'
|
reference: 'root',
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'pbsDatastoresStatistics',
|
xtype: 'pbsDatastoresStatistics',
|
||||||
@ -296,6 +312,10 @@ Ext.define('PBS.Dashboard', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'pbsLongestTasks',
|
xtype: 'pbsLongestTasks',
|
||||||
|
bind: {
|
||||||
|
title: gettext('Longest Tasks') + ' (' +
|
||||||
|
Ext.String.format(gettext('{0} days'), '{days}') + ')',
|
||||||
|
},
|
||||||
reference: 'longesttasks',
|
reference: 'longesttasks',
|
||||||
height: 250,
|
height: 250,
|
||||||
},
|
},
|
||||||
@ -304,6 +324,10 @@ Ext.define('PBS.Dashboard', {
|
|||||||
height: 250,
|
height: 250,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
bind: {
|
||||||
|
title: gettext('Task Summary') + ' (' +
|
||||||
|
Ext.String.format(gettext('{0} days'), '{days}') + ')',
|
||||||
|
},
|
||||||
xtype: 'pbsTaskSummary',
|
xtype: 'pbsTaskSummary',
|
||||||
reference: 'tasksummary',
|
reference: 'tasksummary',
|
||||||
},
|
},
|
||||||
@ -314,7 +338,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
reference: 'subscription',
|
reference: 'subscription',
|
||||||
xtype: 'pbsSubscriptionInfo',
|
xtype: 'pbsSubscriptionInfo',
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.dashboard.SubscriptionInfo', {
|
Ext.define('PBS.dashboard.SubscriptionInfo', {
|
||||||
@ -322,7 +346,7 @@ Ext.define('PBS.dashboard.SubscriptionInfo', {
|
|||||||
xtype: 'pbsSubscriptionInfo',
|
xtype: 'pbsSubscriptionInfo',
|
||||||
|
|
||||||
style: {
|
style: {
|
||||||
cursor: 'pointer'
|
cursor: 'pointer',
|
||||||
},
|
},
|
||||||
|
|
||||||
layout: {
|
layout: {
|
||||||
@ -382,7 +406,7 @@ Ext.define('PBS.dashboard.SubscriptionInfo', {
|
|||||||
fn: function() {
|
fn: function() {
|
||||||
var mainview = this.component.up('mainview');
|
var mainview = this.component.up('mainview');
|
||||||
mainview.getController().redirectTo('pbsSubscription');
|
mainview.getController().redirectTo('pbsSubscription');
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
@ -6,9 +6,9 @@ Ext.define('pbs-prune-list', {
|
|||||||
{
|
{
|
||||||
name: 'backup-time',
|
name: 'backup-time',
|
||||||
type: 'date',
|
type: 'date',
|
||||||
dateFormat: 'timestamp'
|
dateFormat: 'timestamp',
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.DataStorePruneInputPanel', {
|
Ext.define('PBS.DataStorePruneInputPanel', {
|
||||||
@ -52,21 +52,21 @@ Ext.define('PBS.DataStorePruneInputPanel', {
|
|||||||
method: "POST",
|
method: "POST",
|
||||||
params: params,
|
params: params,
|
||||||
callback: function() {
|
callback: function() {
|
||||||
return; // for easy breakpoint setting
|
// for easy breakpoint setting
|
||||||
},
|
},
|
||||||
failure: function (response, opts) {
|
failure: function(response, opts) {
|
||||||
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
},
|
},
|
||||||
success: function(response, options) {
|
success: function(response, options) {
|
||||||
var data = response.result.data;
|
var data = response.result.data;
|
||||||
view.prune_store.setData(data);
|
view.prune_store.setData(data);
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
control: {
|
control: {
|
||||||
field: { change: 'reload' }
|
field: { change: 'reload' },
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
column1: [
|
column1: [
|
||||||
@ -111,16 +111,16 @@ Ext.define('PBS.DataStorePruneInputPanel', {
|
|||||||
allowBlank: true,
|
allowBlank: true,
|
||||||
fieldLabel: gettext('keep-yearly'),
|
fieldLabel: gettext('keep-yearly'),
|
||||||
minValue: 1,
|
minValue: 1,
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
|
|
||||||
initComponent : function() {
|
initComponent: function() {
|
||||||
var me = this;
|
var me = this;
|
||||||
|
|
||||||
me.prune_store = Ext.create('Ext.data.Store', {
|
me.prune_store = Ext.create('Ext.data.Store', {
|
||||||
model: 'pbs-prune-list',
|
model: 'pbs-prune-list',
|
||||||
sorters: { property: 'backup-time', direction: 'DESC' }
|
sorters: { property: 'backup-time', direction: 'DESC' },
|
||||||
});
|
});
|
||||||
|
|
||||||
me.column2 = [
|
me.column2 = [
|
||||||
@ -145,14 +145,14 @@ Ext.define('PBS.DataStorePruneInputPanel', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: "keep",
|
text: "keep",
|
||||||
dataIndex: 'keep'
|
dataIndex: 'keep',
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
me.callParent();
|
me.callParent();
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.DataStorePrune', {
|
Ext.define('PBS.DataStorePrune', {
|
||||||
@ -163,7 +163,7 @@ Ext.define('PBS.DataStorePrune', {
|
|||||||
|
|
||||||
isCreate: true,
|
isCreate: true,
|
||||||
|
|
||||||
initComponent : function() {
|
initComponent: function() {
|
||||||
var me = this;
|
var me = this;
|
||||||
|
|
||||||
if (!me.datastore) {
|
if (!me.datastore) {
|
||||||
@ -183,10 +183,10 @@ Ext.define('PBS.DataStorePrune', {
|
|||||||
xtype: 'pbsDataStorePruneInputPanel',
|
xtype: 'pbsDataStorePruneInputPanel',
|
||||||
url: '/api2/extjs/admin/datastore/' + me.datastore + "/prune",
|
url: '/api2/extjs/admin/datastore/' + me.datastore + "/prune",
|
||||||
backup_type: me.backup_type,
|
backup_type: me.backup_type,
|
||||||
backup_id: me.backup_id
|
backup_id: me.backup_id,
|
||||||
}]
|
}],
|
||||||
});
|
});
|
||||||
|
|
||||||
me.callParent();
|
me.callParent();
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
@ -19,10 +19,10 @@ Ext.define('pve-rrd-datastore', {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return (data.io_ticks*1000.0)/ios;
|
return (data.io_ticks*1000.0)/ios;
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
{ type: 'date', dateFormat: 'timestamp', name: 'time' },
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.DataStoreStatistic', {
|
Ext.define('PBS.DataStoreStatistic', {
|
||||||
@ -40,11 +40,11 @@ Ext.define('PBS.DataStoreStatistic', {
|
|||||||
throw "no datastore specified";
|
throw "no datastore specified";
|
||||||
}
|
}
|
||||||
|
|
||||||
me.tbar = [ '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
me.tbar = ['->', { xtype: 'proxmoxRRDTypeSelector' }];
|
||||||
|
|
||||||
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
||||||
rrdurl: "/api2/json/admin/datastore/" + me.datastore + "/rrd",
|
rrdurl: "/api2/json/admin/datastore/" + me.datastore + "/rrd",
|
||||||
model: 'pve-rrd-datastore'
|
model: 'pve-rrd-datastore',
|
||||||
});
|
});
|
||||||
|
|
||||||
me.items = {
|
me.items = {
|
||||||
@ -55,38 +55,38 @@ Ext.define('PBS.DataStoreStatistic', {
|
|||||||
defaults: {
|
defaults: {
|
||||||
minHeight: 320,
|
minHeight: 320,
|
||||||
padding: 5,
|
padding: 5,
|
||||||
columnWidth: 1
|
columnWidth: 1,
|
||||||
},
|
},
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Storage usage (bytes)'),
|
title: gettext('Storage usage (bytes)'),
|
||||||
fields: ['total','used'],
|
fields: ['total', 'used'],
|
||||||
fieldTitles: [gettext('Total'), gettext('Storage usage')],
|
fieldTitles: [gettext('Total'), gettext('Storage usage')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Transfer Rate (bytes/second)'),
|
title: gettext('Transfer Rate (bytes/second)'),
|
||||||
fields: ['read_bytes','write_bytes'],
|
fields: ['read_bytes', 'write_bytes'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Input/Output Operations per Second (IOPS)'),
|
title: gettext('Input/Output Operations per Second (IOPS)'),
|
||||||
fields: ['read_ios','write_ios'],
|
fields: ['read_ios', 'write_ios'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('IO Delay (ms)'),
|
title: gettext('IO Delay (ms)'),
|
||||||
fields: ['io_delay'],
|
fields: ['io_delay'],
|
||||||
fieldTitles: [gettext('IO Delay')],
|
fieldTitles: [gettext('IO Delay')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
me.listeners = {
|
me.listeners = {
|
||||||
@ -99,6 +99,6 @@ Ext.define('PBS.DataStoreStatistic', {
|
|||||||
};
|
};
|
||||||
|
|
||||||
me.callParent();
|
me.callParent();
|
||||||
}
|
},
|
||||||
|
|
||||||
});
|
});
|
||||||
|
@ -7,7 +7,6 @@ Ext.define('PBS.LoginView', {
|
|||||||
|
|
||||||
submitForm: function() {
|
submitForm: function() {
|
||||||
var me = this;
|
var me = this;
|
||||||
var view = me.getView();
|
|
||||||
var loginForm = me.lookupReference('loginForm');
|
var loginForm = me.lookupReference('loginForm');
|
||||||
var unField = me.lookupReference('usernameField');
|
var unField = me.lookupReference('usernameField');
|
||||||
var saveunField = me.lookupReference('saveunField');
|
var saveunField = me.lookupReference('saveunField');
|
||||||
@ -19,7 +18,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
let params = loginForm.getValues();
|
let params = loginForm.getValues();
|
||||||
|
|
||||||
params.username = params.username + '@' + params.realm;
|
params.username = params.username + '@' + params.realm;
|
||||||
delete(params.realm);
|
delete params.realm;
|
||||||
|
|
||||||
if (loginForm.isVisible()) {
|
if (loginForm.isVisible()) {
|
||||||
loginForm.mask(gettext('Please wait...'), 'x-mask-loading');
|
loginForm.mask(gettext('Please wait...'), 'x-mask-loading');
|
||||||
@ -48,9 +47,9 @@ Ext.define('PBS.LoginView', {
|
|||||||
loginForm.unmask();
|
loginForm.unmask();
|
||||||
Ext.MessageBox.alert(
|
Ext.MessageBox.alert(
|
||||||
gettext('Error'),
|
gettext('Error'),
|
||||||
gettext('Login failed. Please try again')
|
gettext('Login failed. Please try again'),
|
||||||
);
|
);
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -63,7 +62,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
pf.focus(false);
|
pf.focus(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
'field[name=lang]': {
|
'field[name=lang]': {
|
||||||
change: function(f, value) {
|
change: function(f, value) {
|
||||||
@ -71,10 +70,10 @@ Ext.define('PBS.LoginView', {
|
|||||||
Ext.util.Cookies.set('PBSLangCookie', value, dt);
|
Ext.util.Cookies.set('PBSLangCookie', value, dt);
|
||||||
this.getView().mask(gettext('Please wait...'), 'x-mask-loading');
|
this.getView().mask(gettext('Please wait...'), 'x-mask-loading');
|
||||||
window.location.reload();
|
window.location.reload();
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
'button[reference=loginButton]': {
|
'button[reference=loginButton]': {
|
||||||
click: 'submitForm'
|
click: 'submitForm',
|
||||||
},
|
},
|
||||||
'window[reference=loginwindow]': {
|
'window[reference=loginwindow]': {
|
||||||
show: function() {
|
show: function() {
|
||||||
@ -85,21 +84,21 @@ Ext.define('PBS.LoginView', {
|
|||||||
var checked = sp.get(checkboxField.getStateId());
|
var checked = sp.get(checkboxField.getStateId());
|
||||||
checkboxField.setValue(checked);
|
checkboxField.setValue(checked);
|
||||||
|
|
||||||
if(checked === true) {
|
if (checked === true) {
|
||||||
var username = sp.get(unField.getStateId());
|
var username = sp.get(unField.getStateId());
|
||||||
unField.setValue(username);
|
unField.setValue(username);
|
||||||
var pwField = this.lookupReference('passwordField');
|
var pwField = this.lookupReference('passwordField');
|
||||||
pwField.focus();
|
pwField.focus();
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
plugins: 'viewport',
|
plugins: 'viewport',
|
||||||
|
|
||||||
layout: {
|
layout: {
|
||||||
type: 'border'
|
type: 'border',
|
||||||
},
|
},
|
||||||
|
|
||||||
items: [
|
items: [
|
||||||
@ -108,7 +107,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
xtype: 'container',
|
xtype: 'container',
|
||||||
layout: {
|
layout: {
|
||||||
type: 'hbox',
|
type: 'hbox',
|
||||||
align: 'middle'
|
align: 'middle',
|
||||||
},
|
},
|
||||||
margin: '2 5 2 5',
|
margin: '2 5 2 5',
|
||||||
height: 38,
|
height: 38,
|
||||||
@ -119,12 +118,12 @@ Ext.define('PBS.LoginView', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'versioninfo',
|
xtype: 'versioninfo',
|
||||||
makeApiCall: false
|
makeApiCall: false,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
region: 'center'
|
region: 'center',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'window',
|
xtype: 'window',
|
||||||
@ -138,7 +137,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
defaultFocus: 'usernameField',
|
defaultFocus: 'usernameField',
|
||||||
|
|
||||||
layout: {
|
layout: {
|
||||||
type: 'auto'
|
type: 'auto',
|
||||||
},
|
},
|
||||||
|
|
||||||
title: gettext('Proxmox Backup Server Login'),
|
title: gettext('Proxmox Backup Server Login'),
|
||||||
@ -147,7 +146,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
{
|
{
|
||||||
xtype: 'form',
|
xtype: 'form',
|
||||||
layout: {
|
layout: {
|
||||||
type: 'form'
|
type: 'form',
|
||||||
},
|
},
|
||||||
defaultButton: 'loginButton',
|
defaultButton: 'loginButton',
|
||||||
url: '/api2/extjs/access/ticket',
|
url: '/api2/extjs/access/ticket',
|
||||||
@ -155,7 +154,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
|
|
||||||
fieldDefaults: {
|
fieldDefaults: {
|
||||||
labelAlign: 'right',
|
labelAlign: 'right',
|
||||||
allowBlank: false
|
allowBlank: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
items: [
|
items: [
|
||||||
@ -165,7 +164,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
name: 'username',
|
name: 'username',
|
||||||
itemId: 'usernameField',
|
itemId: 'usernameField',
|
||||||
reference: 'usernameField',
|
reference: 'usernameField',
|
||||||
stateId: 'login-username'
|
stateId: 'login-username',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'textfield',
|
xtype: 'textfield',
|
||||||
@ -177,7 +176,7 @@ Ext.define('PBS.LoginView', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'pmxRealmComboBox',
|
xtype: 'pmxRealmComboBox',
|
||||||
name: 'realm'
|
name: 'realm',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxLanguageSelector',
|
xtype: 'proxmoxLanguageSelector',
|
||||||
@ -185,8 +184,8 @@ Ext.define('PBS.LoginView', {
|
|||||||
value: Ext.util.Cookies.get('PBSLangCookie') || Proxmox.defaultLang || 'en',
|
value: Ext.util.Cookies.get('PBSLangCookie') || Proxmox.defaultLang || 'en',
|
||||||
name: 'lang',
|
name: 'lang',
|
||||||
reference: 'langField',
|
reference: 'langField',
|
||||||
submitValue: false
|
submitValue: false,
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
buttons: [
|
buttons: [
|
||||||
{
|
{
|
||||||
@ -197,16 +196,16 @@ Ext.define('PBS.LoginView', {
|
|||||||
stateId: 'login-saveusername',
|
stateId: 'login-saveusername',
|
||||||
labelWidth: 250,
|
labelWidth: 250,
|
||||||
labelAlign: 'right',
|
labelAlign: 'right',
|
||||||
submitValue: false
|
submitValue: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Login'),
|
text: gettext('Login'),
|
||||||
reference: 'loginButton',
|
reference: 'loginButton',
|
||||||
formBind: true
|
formBind: true,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
@ -10,11 +10,11 @@ Ext.define('PBS.MainView', {
|
|||||||
':path:subpath': {
|
':path:subpath': {
|
||||||
action: 'changePath',
|
action: 'changePath',
|
||||||
before: 'beforeChangePath',
|
before: 'beforeChangePath',
|
||||||
conditions : {
|
conditions: {
|
||||||
':path' : '(?:([%a-zA-Z0-9\\-\\_\\s,\.]+))',
|
':path': '(?:([%a-zA-Z0-9\\-\\_\\s,.]+))',
|
||||||
':subpath' : '(?:(?::)([%a-zA-Z0-9\\-\\_\\s,]+))?'
|
':subpath': '(?:(?::)([%a-zA-Z0-9\\-\\_\\s,]+))?',
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
beforeChangePath: function(path, subpath, action) {
|
beforeChangePath: function(path, subpath, action) {
|
||||||
@ -79,7 +79,7 @@ Ext.define('PBS.MainView', {
|
|||||||
obj = contentpanel.add({
|
obj = contentpanel.add({
|
||||||
xtype: path,
|
xtype: path,
|
||||||
nodename: 'localhost',
|
nodename: 'localhost',
|
||||||
border: false
|
border: false,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +113,6 @@ Ext.define('PBS.MainView', {
|
|||||||
if (lastpanel) {
|
if (lastpanel) {
|
||||||
contentpanel.remove(lastpanel, { destroy: true });
|
contentpanel.remove(lastpanel, { destroy: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
},
|
},
|
||||||
|
|
||||||
logout: function() {
|
logout: function() {
|
||||||
@ -126,8 +125,8 @@ Ext.define('PBS.MainView', {
|
|||||||
|
|
||||||
control: {
|
control: {
|
||||||
'[reference=logoutButton]': {
|
'[reference=logoutButton]': {
|
||||||
click: 'logout'
|
click: 'logout',
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
init: function(view) {
|
init: function(view) {
|
||||||
@ -139,7 +138,7 @@ Ext.define('PBS.MainView', {
|
|||||||
// show login on requestexception
|
// show login on requestexception
|
||||||
// fixme: what about other errors
|
// fixme: what about other errors
|
||||||
Ext.Ajax.on('requestexception', function(conn, response, options) {
|
Ext.Ajax.on('requestexception', function(conn, response, options) {
|
||||||
if (response.status == 401) { // auth failure
|
if (response.status === 401 || response.status === '401') { // auth failure
|
||||||
me.logout();
|
me.logout();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -155,7 +154,7 @@ Ext.define('PBS.MainView', {
|
|||||||
Ext.Ajax.request({
|
Ext.Ajax.request({
|
||||||
params: {
|
params: {
|
||||||
username: Proxmox.UserName,
|
username: Proxmox.UserName,
|
||||||
password: ticket
|
password: ticket,
|
||||||
},
|
},
|
||||||
url: '/api2/json/access/ticket',
|
url: '/api2/json/access/ticket',
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
@ -165,17 +164,17 @@ Ext.define('PBS.MainView', {
|
|||||||
success: function(response, opts) {
|
success: function(response, opts) {
|
||||||
var obj = Ext.decode(response.responseText);
|
var obj = Ext.decode(response.responseText);
|
||||||
PBS.Utils.updateLoginData(obj.data);
|
PBS.Utils.updateLoginData(obj.data);
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
interval: 15*60*1000
|
interval: 15*60*1000,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
// select treeitem and load page from url fragment, if set
|
// select treeitem and load page from url fragment, if set
|
||||||
let token = Ext.util.History.getToken() || 'pbsDashboard';
|
let token = Ext.util.History.getToken() || 'pbsDashboard';
|
||||||
this.redirectTo(token, true);
|
this.redirectTo(token, true);
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
plugins: 'viewport',
|
plugins: 'viewport',
|
||||||
@ -188,7 +187,7 @@ Ext.define('PBS.MainView', {
|
|||||||
xtype: 'container',
|
xtype: 'container',
|
||||||
layout: {
|
layout: {
|
||||||
type: 'hbox',
|
type: 'hbox',
|
||||||
align: 'middle'
|
align: 'middle',
|
||||||
},
|
},
|
||||||
margin: '2 0 2 5',
|
margin: '2 0 2 5',
|
||||||
height: 38,
|
height: 38,
|
||||||
@ -229,7 +228,7 @@ Ext.define('PBS.MainView', {
|
|||||||
style: {
|
style: {
|
||||||
// proxmox dark grey p light grey as border
|
// proxmox dark grey p light grey as border
|
||||||
backgroundColor: '#464d4d',
|
backgroundColor: '#464d4d',
|
||||||
borderColor: '#ABBABA'
|
borderColor: '#ABBABA',
|
||||||
},
|
},
|
||||||
margin: '0 5 0 0',
|
margin: '0 5 0 0',
|
||||||
iconCls: 'fa fa-user',
|
iconCls: 'fa fa-user',
|
||||||
@ -241,7 +240,7 @@ Ext.define('PBS.MainView', {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'panel',
|
xtype: 'panel',
|
||||||
@ -250,7 +249,7 @@ Ext.define('PBS.MainView', {
|
|||||||
region: 'west',
|
region: 'west',
|
||||||
layout: {
|
layout: {
|
||||||
type: 'vbox',
|
type: 'vbox',
|
||||||
align: 'stretch'
|
align: 'stretch',
|
||||||
},
|
},
|
||||||
items: [{
|
items: [{
|
||||||
xtype: 'navigationtree',
|
xtype: 'navigationtree',
|
||||||
@ -260,20 +259,20 @@ Ext.define('PBS.MainView', {
|
|||||||
// because of a bug where a viewcontroller does not detect
|
// because of a bug where a viewcontroller does not detect
|
||||||
// the selectionchange event of a treelist
|
// the selectionchange event of a treelist
|
||||||
listeners: {
|
listeners: {
|
||||||
selectionchange: 'navigate'
|
selectionchange: 'navigate',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
xtype: 'box',
|
xtype: 'box',
|
||||||
cls: 'x-treelist-nav',
|
cls: 'x-treelist-nav',
|
||||||
flex: 1
|
flex: 1,
|
||||||
}]
|
}],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'panel',
|
xtype: 'panel',
|
||||||
layout: { type: 'card' },
|
layout: { type: 'card' },
|
||||||
region: 'center',
|
region: 'center',
|
||||||
border: false,
|
border: false,
|
||||||
reference: 'contentpanel'
|
reference: 'contentpanel',
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
15
www/Makefile
15
www/Makefile
@ -59,18 +59,23 @@ OnlineHelpInfo.js:
|
|||||||
$(MAKE) -C ../docs onlinehelpinfo
|
$(MAKE) -C ../docs onlinehelpinfo
|
||||||
mv ../docs/output/scanrefs/OnlineHelpInfo.js .
|
mv ../docs/output/scanrefs/OnlineHelpInfo.js .
|
||||||
|
|
||||||
js/proxmox-backup-gui.js: js OnlineHelpInfo.js ${JSSRC}
|
js/proxmox-backup-gui.js: .lint-incremental js OnlineHelpInfo.js ${JSSRC}
|
||||||
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
||||||
mv $@.tmp $@
|
mv $@.tmp $@
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: check
|
||||||
lint: ${JSSRC}
|
check:
|
||||||
eslint ${JSSRC}
|
eslint ${JSSRC}
|
||||||
|
touch ".lint-incremental"
|
||||||
|
|
||||||
|
.lint-incremental: ${JSSRC}
|
||||||
|
eslint $?
|
||||||
|
touch "$@"
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
rm -rf js
|
rm -rf js .lint-incremental
|
||||||
|
|
||||||
install: js/proxmox-backup-gui.js css/ext6-pbs.css index.hbs
|
install: js/proxmox-backup-gui.js css/ext6-pbs.css index.hbs
|
||||||
install -dm755 $(DESTDIR)$(JSDIR)
|
install -dm755 $(DESTDIR)$(JSDIR)
|
||||||
|
@ -10,7 +10,7 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
text: gettext('Dashboard'),
|
text: gettext('Dashboard'),
|
||||||
iconCls: 'fa fa-tachometer',
|
iconCls: 'fa fa-tachometer',
|
||||||
path: 'pbsDashboard',
|
path: 'pbsDashboard',
|
||||||
leaf: true
|
leaf: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Configuration'),
|
text: gettext('Configuration'),
|
||||||
@ -22,13 +22,13 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
text: gettext('User Management'),
|
text: gettext('User Management'),
|
||||||
iconCls: 'fa fa-user',
|
iconCls: 'fa fa-user',
|
||||||
path: 'pbsUserView',
|
path: 'pbsUserView',
|
||||||
leaf: true
|
leaf: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Permissions'),
|
text: gettext('Permissions'),
|
||||||
iconCls: 'fa fa-unlock',
|
iconCls: 'fa fa-unlock',
|
||||||
path: 'pbsACLView',
|
path: 'pbsACLView',
|
||||||
leaf: true
|
leaf: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Remotes'),
|
text: gettext('Remotes'),
|
||||||
@ -46,9 +46,9 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
text: gettext('Subscription'),
|
text: gettext('Subscription'),
|
||||||
iconCls: 'fa fa-support',
|
iconCls: 'fa fa-support',
|
||||||
path: 'pbsSubscription',
|
path: 'pbsSubscription',
|
||||||
leaf: true
|
leaf: true,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Administration'),
|
text: gettext('Administration'),
|
||||||
@ -75,19 +75,19 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
path: 'pbsZFSList',
|
path: 'pbsZFSList',
|
||||||
leaf: true,
|
leaf: true,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Datastore'),
|
text: gettext('Datastore'),
|
||||||
iconCls: 'fa fa-archive',
|
iconCls: 'fa fa-archive',
|
||||||
path: 'pbsDataStoreConfig',
|
path: 'pbsDataStoreConfig',
|
||||||
expanded: true,
|
expanded: true,
|
||||||
leaf: false
|
leaf: false,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.view.main.NavigationTree', {
|
Ext.define('PBS.view.main.NavigationTree', {
|
||||||
@ -98,13 +98,12 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
xclass: 'Ext.app.ViewController',
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
init: function(view) {
|
init: function(view) {
|
||||||
|
|
||||||
view.rstore = Ext.create('Proxmox.data.UpdateStore', {
|
view.rstore = Ext.create('Proxmox.data.UpdateStore', {
|
||||||
autoStart: true,
|
autoStart: true,
|
||||||
interval: 15 * 1000,
|
interval: 15 * 1000,
|
||||||
storeId: 'pbs-datastore-list',
|
storeId: 'pbs-datastore-list',
|
||||||
storeid: 'pbs-datastore-list',
|
storeid: 'pbs-datastore-list',
|
||||||
model: 'pbs-datastore-list'
|
model: 'pbs-datastore-list',
|
||||||
});
|
});
|
||||||
|
|
||||||
view.rstore.on('load', this.onLoad, this);
|
view.rstore.on('load', this.onLoad, this);
|
||||||
@ -119,7 +118,7 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
|
|
||||||
// FIXME: newly added always get appended to the end..
|
// FIXME: newly added always get appended to the end..
|
||||||
records.sort((a, b) => {
|
records.sort((a, b) => {
|
||||||
if (a.id > b.id) return 1;
|
if (a.id > b.id) return 1;
|
||||||
if (a.id < b.id) return -1;
|
if (a.id < b.id) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
});
|
});
|
||||||
@ -128,29 +127,28 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
var length = records.length;
|
var length = records.length;
|
||||||
var lookup_hash = {};
|
var lookup_hash = {};
|
||||||
for (var i = 0; i < length; i++) {
|
for (var i = 0; i < length; i++) {
|
||||||
var name = records[i].id;
|
let name = records[i].id;
|
||||||
lookup_hash[name] = true;
|
lookup_hash[name] = true;
|
||||||
if (!list.findChild('text', name, false)) {
|
if (!list.findChild('text', name, false)) {
|
||||||
list.appendChild({
|
list.appendChild({
|
||||||
text: name,
|
text: name,
|
||||||
path: `DataStore-${name}`,
|
path: `DataStore-${name}`,
|
||||||
iconCls: 'fa fa-database',
|
iconCls: 'fa fa-database',
|
||||||
leaf: true
|
leaf: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var erase_list = [];
|
var erase_list = [];
|
||||||
list.eachChild(function(node) {
|
list.eachChild(function(node) {
|
||||||
var name = node.data.text;
|
let name = node.data.text;
|
||||||
if (!lookup_hash[name]) {
|
if (!lookup_hash[name]) {
|
||||||
erase_list.push(node);
|
erase_list.push(node);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.Array.forEach(erase_list, function(node) { node.erase(); });
|
Ext.Array.forEach(erase_list, function(node) { node.erase(); });
|
||||||
|
},
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
|
||||||
select: function(path) {
|
select: function(path) {
|
||||||
@ -163,5 +161,5 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
expanderOnly: true,
|
expanderOnly: true,
|
||||||
expanderFirst: false,
|
expanderFirst: false,
|
||||||
store: 'NavigationStore',
|
store: 'NavigationStore',
|
||||||
ui: 'nav'
|
ui: 'nav',
|
||||||
});
|
});
|
||||||
|
@ -3,28 +3,32 @@ const proxmoxOnlineHelpInfo = {
|
|||||||
"link": "/docs/index.html",
|
"link": "/docs/index.html",
|
||||||
"title": "Proxmox Backup Server Documentation Index"
|
"title": "Proxmox Backup Server Documentation Index"
|
||||||
},
|
},
|
||||||
"datastore-intro": {
|
"backup-pruning": {
|
||||||
"link": "/docs/administration-guide.html#datastore-intro",
|
"link": "/docs/backup-client.html#backup-pruning",
|
||||||
"title": ":term:`DataStore`"
|
"title": "Pruning and Removing Backups"
|
||||||
},
|
|
||||||
"user-mgmt": {
|
|
||||||
"link": "/docs/administration-guide.html#user-mgmt",
|
|
||||||
"title": "User Management"
|
|
||||||
},
|
|
||||||
"user-acl": {
|
|
||||||
"link": "/docs/administration-guide.html#user-acl",
|
|
||||||
"title": "Access Control"
|
|
||||||
},
|
|
||||||
"backup-remote": {
|
|
||||||
"link": "/docs/administration-guide.html#backup-remote",
|
|
||||||
"title": ":term:`Remote`"
|
|
||||||
},
|
|
||||||
"syncjobs": {
|
|
||||||
"link": "/docs/administration-guide.html#syncjobs",
|
|
||||||
"title": "Sync Jobs"
|
|
||||||
},
|
},
|
||||||
"chapter-zfs": {
|
"chapter-zfs": {
|
||||||
"link": "/docs/sysadmin.html#chapter-zfs",
|
"link": "/docs/sysadmin.html#chapter-zfs",
|
||||||
"title": "ZFS on Linux"
|
"title": "ZFS on Linux"
|
||||||
|
},
|
||||||
|
"backup-remote": {
|
||||||
|
"link": "/docs/managing-remotes.html#backup-remote",
|
||||||
|
"title": ":term:`Remote`"
|
||||||
|
},
|
||||||
|
"syncjobs": {
|
||||||
|
"link": "/docs/managing-remotes.html#syncjobs",
|
||||||
|
"title": "Sync Jobs"
|
||||||
|
},
|
||||||
|
"datastore-intro": {
|
||||||
|
"link": "/docs/storage.html#datastore-intro",
|
||||||
|
"title": ":term:`DataStore`"
|
||||||
|
},
|
||||||
|
"user-mgmt": {
|
||||||
|
"link": "/docs/user-management.html#user-mgmt",
|
||||||
|
"title": "User Management"
|
||||||
|
},
|
||||||
|
"user-acl": {
|
||||||
|
"link": "/docs/user-management.html#user-acl",
|
||||||
|
"title": "Access Control"
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
/*global Proxmox*/
|
|
||||||
Ext.define('PBS.ServerAdministration', {
|
Ext.define('PBS.ServerAdministration', {
|
||||||
extend: 'Ext.tab.Panel',
|
extend: 'Ext.tab.Panel',
|
||||||
alias: 'widget.pbsServerAdministration',
|
alias: 'widget.pbsServerAdministration',
|
||||||
@ -14,13 +13,13 @@ Ext.define('PBS.ServerAdministration', {
|
|||||||
init: function(view) {
|
init: function(view) {
|
||||||
var upgradeBtn = view.lookupReference('upgradeBtn');
|
var upgradeBtn = view.lookupReference('upgradeBtn');
|
||||||
upgradeBtn.setDisabled(!(Proxmox.UserName && Proxmox.UserName === 'root@pam'));
|
upgradeBtn.setDisabled(!(Proxmox.UserName && Proxmox.UserName === 'root@pam'));
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
xtype: 'pbsServerStatus',
|
xtype: 'pbsServerStatus',
|
||||||
itemId: 'status'
|
itemId: 'status',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxNodeServiceView',
|
xtype: 'proxmoxNodeServiceView',
|
||||||
@ -32,7 +31,7 @@ Ext.define('PBS.ServerAdministration', {
|
|||||||
'proxmox-backup': true,
|
'proxmox-backup': true,
|
||||||
'proxmox-backup-proxy': true,
|
'proxmox-backup-proxy': true,
|
||||||
},
|
},
|
||||||
nodename: 'localhost'
|
nodename: 'localhost',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxNodeAPT',
|
xtype: 'proxmoxNodeAPT',
|
||||||
@ -44,10 +43,10 @@ Ext.define('PBS.ServerAdministration', {
|
|||||||
text: gettext('Upgrade'),
|
text: gettext('Upgrade'),
|
||||||
handler: function() {
|
handler: function() {
|
||||||
Proxmox.Utils.openXtermJsViewer('upgrade', 0, 'localhost');
|
Proxmox.Utils.openXtermJsViewer('upgrade', 0, 'localhost');
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
itemId: 'updates',
|
itemId: 'updates',
|
||||||
nodename: 'localhost'
|
nodename: 'localhost',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxJournalView',
|
xtype: 'proxmoxJournalView',
|
||||||
@ -60,9 +59,9 @@ Ext.define('PBS.ServerAdministration', {
|
|||||||
itemId: 'tasks',
|
itemId: 'tasks',
|
||||||
title: gettext('Tasks'),
|
title: gettext('Tasks'),
|
||||||
height: 'auto',
|
height: 'auto',
|
||||||
nodename: 'localhost'
|
nodename: 'localhost',
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,14 +6,14 @@ Ext.define('pve-rrd-node', {
|
|||||||
// percentage
|
// percentage
|
||||||
convert: function(value) {
|
convert: function(value) {
|
||||||
return value*100;
|
return value*100;
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'iowait',
|
name: 'iowait',
|
||||||
// percentage
|
// percentage
|
||||||
convert: function(value) {
|
convert: function(value) {
|
||||||
return value*100;
|
return value*100;
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
'netin',
|
'netin',
|
||||||
'netout',
|
'netout',
|
||||||
@ -33,15 +33,15 @@ Ext.define('pve-rrd-node', {
|
|||||||
let ios = 0;
|
let ios = 0;
|
||||||
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||||
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||||
if (ios == 0 || data.io_ticks === undefined) {
|
if (ios === 0 || data.io_ticks === undefined) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
return (data.io_ticks*1000.0)/ios;
|
return (data.io_ticks*1000.0)/ios;
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
'loadavg',
|
'loadavg',
|
||||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
{ type: 'date', dateFormat: 'timestamp', name: 'time' },
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
Ext.define('PBS.ServerStatus', {
|
Ext.define('PBS.ServerStatus', {
|
||||||
extend: 'Ext.panel.Panel',
|
extend: 'Ext.panel.Panel',
|
||||||
@ -62,7 +62,7 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
waitMsgTarget: me,
|
waitMsgTarget: me,
|
||||||
failure: function(response, opts) {
|
failure: function(response, opts) {
|
||||||
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
handler: function() {
|
handler: function() {
|
||||||
node_command('reboot');
|
node_command('reboot');
|
||||||
},
|
},
|
||||||
iconCls: 'fa fa-undo'
|
iconCls: 'fa fa-undo',
|
||||||
});
|
});
|
||||||
|
|
||||||
var shutdownBtn = Ext.create('Proxmox.button.Button', {
|
var shutdownBtn = Ext.create('Proxmox.button.Button', {
|
||||||
@ -83,7 +83,7 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
handler: function() {
|
handler: function() {
|
||||||
node_command('shutdown');
|
node_command('shutdown');
|
||||||
},
|
},
|
||||||
iconCls: 'fa fa-power-off'
|
iconCls: 'fa fa-power-off',
|
||||||
});
|
});
|
||||||
|
|
||||||
var consoleBtn = Ext.create('Proxmox.button.Button', {
|
var consoleBtn = Ext.create('Proxmox.button.Button', {
|
||||||
@ -91,14 +91,14 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
iconCls: 'fa fa-terminal',
|
iconCls: 'fa fa-terminal',
|
||||||
handler: function() {
|
handler: function() {
|
||||||
Proxmox.Utils.openXtermJsViewer('shell', 0, Proxmox.NodeName);
|
Proxmox.Utils.openXtermJsViewer('shell', 0, Proxmox.NodeName);
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
me.tbar = [ consoleBtn, restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
me.tbar = [consoleBtn, restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' }];
|
||||||
|
|
||||||
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
||||||
rrdurl: "/api2/json/nodes/localhost/rrd",
|
rrdurl: "/api2/json/nodes/localhost/rrd",
|
||||||
model: 'pve-rrd-node'
|
model: 'pve-rrd-node',
|
||||||
});
|
});
|
||||||
|
|
||||||
me.items = {
|
me.items = {
|
||||||
@ -109,72 +109,72 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
defaults: {
|
defaults: {
|
||||||
minHeight: 320,
|
minHeight: 320,
|
||||||
padding: 5,
|
padding: 5,
|
||||||
columnWidth: 1
|
columnWidth: 1,
|
||||||
},
|
},
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('CPU usage'),
|
title: gettext('CPU usage'),
|
||||||
fields: ['cpu','iowait'],
|
fields: ['cpu', 'iowait'],
|
||||||
fieldTitles: [gettext('CPU usage'), gettext('IO wait')],
|
fieldTitles: [gettext('CPU usage'), gettext('IO wait')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Server load'),
|
title: gettext('Server load'),
|
||||||
fields: ['loadavg'],
|
fields: ['loadavg'],
|
||||||
fieldTitles: [gettext('Load average')],
|
fieldTitles: [gettext('Load average')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Memory usage'),
|
title: gettext('Memory usage'),
|
||||||
fields: ['memtotal','memused'],
|
fields: ['memtotal', 'memused'],
|
||||||
fieldTitles: [gettext('Total'), gettext('RAM usage')],
|
fieldTitles: [gettext('Total'), gettext('RAM usage')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Swap usage'),
|
title: gettext('Swap usage'),
|
||||||
fields: ['swaptotal','swapused'],
|
fields: ['swaptotal', 'swapused'],
|
||||||
fieldTitles: [gettext('Total'), gettext('Swap usage')],
|
fieldTitles: [gettext('Total'), gettext('Swap usage')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Network traffic'),
|
title: gettext('Network traffic'),
|
||||||
fields: ['netin','netout'],
|
fields: ['netin', 'netout'],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Root Disk usage'),
|
title: gettext('Root Disk usage'),
|
||||||
fields: ['total','used'],
|
fields: ['total', 'used'],
|
||||||
fieldTitles: [gettext('Total'), gettext('Disk usage')],
|
fieldTitles: [gettext('Total'), gettext('Disk usage')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Root Disk Transfer Rate (bytes/second)'),
|
title: gettext('Root Disk Transfer Rate (bytes/second)'),
|
||||||
fields: ['read_bytes','write_bytes'],
|
fields: ['read_bytes', 'write_bytes'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Root Disk Input/Output Operations per Second (IOPS)'),
|
title: gettext('Root Disk Input/Output Operations per Second (IOPS)'),
|
||||||
fields: ['read_ios','write_ios'],
|
fields: ['read_ios', 'write_ios'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Root Disk IO Delay (ms)'),
|
title: gettext('Root Disk IO Delay (ms)'),
|
||||||
fields: ['io_delay'],
|
fields: ['io_delay'],
|
||||||
fieldTitles: [gettext('IO Delay')],
|
fieldTitles: [gettext('IO Delay')],
|
||||||
store: rrdstore
|
store: rrdstore,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
me.listeners = {
|
me.listeners = {
|
||||||
@ -187,6 +187,6 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
};
|
};
|
||||||
|
|
||||||
me.callParent();
|
me.callParent();
|
||||||
}
|
},
|
||||||
|
|
||||||
});
|
});
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user