Compare commits
45 Commits
Author | SHA1 | Date | |
---|---|---|---|
7397f4a390 | |||
8317873c06 | |||
deef63699e | |||
c6e07769e9 | |||
423df9b1f4 | |||
c879e5af11 | |||
63d9aca96f | |||
c3b1da9e41 | |||
46388e6aef | |||
484d439a7c | |||
ab6615134c | |||
b1149ebb36 | |||
1bfdae7933 | |||
4f09d31085 | |||
58d73ddb1d | |||
6b809ff59b | |||
afe08d2755 | |||
a7bc5d4eaf | |||
97cd0a2a6d | |||
49a92084a9 | |||
9bdeecaee4 | |||
843880f008 | |||
a6ed5e1273 | |||
74f94d0678 | |||
946c3e8a81 | |||
7b212c1f79 | |||
3b2046d263 | |||
1ffe030123 | |||
5255e641fa | |||
c86b6f40d7 | |||
5a718dce17 | |||
1b32750644 | |||
5aa103c3c3 | |||
fd3f690104 | |||
24b638bd9f | |||
9624c5eecb | |||
503dd339a8 | |||
36ea5df444 | |||
dce9dd6f70 | |||
88e28e15e4 | |||
399e48a1ed | |||
7ae571e7cb | |||
4264c5023b | |||
82b7adf90b | |||
71c4a3138f |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.8.11"
|
version = "0.8.14"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -39,11 +39,11 @@ pam-sys = "0.5"
|
|||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.3.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.3.4", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
pxar = { version = "0.3.0", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "6"
|
||||||
|
34
debian/changelog
vendored
34
debian/changelog
vendored
@ -1,3 +1,37 @@
|
|||||||
|
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* verify speed up: use separate IO thread, use datastore-wide cache (instead
|
||||||
|
of per group)
|
||||||
|
|
||||||
|
* ui: datastore content: improve encrypted column
|
||||||
|
|
||||||
|
* ui: datastore content: show more granular verify state, especially for
|
||||||
|
backup group rows
|
||||||
|
|
||||||
|
* verify: log progress in percent
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve and add to documentation
|
||||||
|
|
||||||
|
* save last verify result in snapshot manifest and show it in the GUI
|
||||||
|
|
||||||
|
* gc: use human readable units for summary in task log
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 27 Aug 2020 16:12:07 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* verify: speedup - only verify chunks once
|
||||||
|
|
||||||
|
* verify: sort backup groups
|
||||||
|
|
||||||
|
* bump pxar dep to 0.4.0
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 25 Aug 2020 08:55:52 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
|
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
* improve sync jobs, allow to stop them and better logging
|
* improve sync jobs, allow to stop them and better logging
|
||||||
|
14
debian/control
vendored
14
debian/control
vendored
@ -34,14 +34,14 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.3+api-macro-dev (>= 0.3.3-~~),
|
librust-proxmox-0.3+api-macro-dev (>= 0.3.4-~~),
|
||||||
librust-proxmox-0.3+default-dev (>= 0.3.3-~~),
|
librust-proxmox-0.3+default-dev (>= 0.3.4-~~),
|
||||||
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.3-~~),
|
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.4-~~),
|
||||||
librust-proxmox-0.3+websocket-dev (>= 0.3.3-~~),
|
librust-proxmox-0.3+websocket-dev (>= 0.3.4-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev,
|
librust-proxmox-fuse-0.1+default-dev,
|
||||||
librust-pxar-0.3+default-dev,
|
librust-pxar-0.6+default-dev,
|
||||||
librust-pxar-0.3+futures-io-dev,
|
librust-pxar-0.6+futures-io-dev,
|
||||||
librust-pxar-0.3+tokio-io-dev,
|
librust-pxar-0.6+tokio-io-dev,
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-6+default-dev,
|
librust-rustyline-6+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
|
7
debian/postinst
vendored
7
debian/postinst
vendored
@ -15,11 +15,10 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
if test -n "$2"; then
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if dpkg --compare-versions "$2" 'le' '0.8.10-1'; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::root: /:termproxy::root@pam: /' /var/log/proxmox-backup/tasks/active
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
@ -146,6 +146,74 @@ when setting up the backup server.
|
|||||||
filesystem configuration from being supported for a datastore. For example,
|
filesystem configuration from being supported for a datastore. For example,
|
||||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
|
Disk Management
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||||
|
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||||
|
disks, create various filesystems, and get information about the disks.
|
||||||
|
|
||||||
|
To view the disks connected to the system, use the ``list`` subcommand of
|
||||||
|
``disk``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk list
|
||||||
|
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
|
||||||
|
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
|
||||||
|
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
|
||||||
|
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
|
||||||
|
|
||||||
|
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk initialize sdX
|
||||||
|
|
||||||
|
You can create an ``ext4`` or ``xfs`` filesystem on a disk, using ``fs
|
||||||
|
create``. The following command creates an ``ext4`` filesystem and passes the
|
||||||
|
``--add-datastore`` parameter, in order to automatically create a datastore on
|
||||||
|
the disk (in this case ``sdd``). This will create a datastore at the location
|
||||||
|
``/mnt/datastore/store1``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||||
|
create datastore 'store1' on disk sdd
|
||||||
|
Percentage done: 1
|
||||||
|
...
|
||||||
|
Percentage done: 99
|
||||||
|
TASK OK
|
||||||
|
|
||||||
|
You can also create a ``zpool`` with various raid levels. The command below
|
||||||
|
creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and mounts it
|
||||||
|
on the root directory (default):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||||
|
create Mirror zpool 'zpool1' on devices 'sdb,sdc'
|
||||||
|
# "zpool" "create" "-o" "ashift=12" "zpool1" "mirror" "sdb" "sdc"
|
||||||
|
|
||||||
|
TASK OK
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||||
|
create a datastore from the disk.
|
||||||
|
|
||||||
|
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||||
|
filesystems and zpools respectively.
|
||||||
|
|
||||||
|
If a disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||||
|
display S.M.A.R.T. attributes using the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk smart-attributes sdX
|
||||||
|
|
||||||
Datastore Configuration
|
Datastore Configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -404,6 +472,72 @@ A single user can be assigned multiple permission sets for different data stores
|
|||||||
remote (see `Remote` below) and ``{storename}`` is the name of the data store on
|
remote (see `Remote` below) and ``{storename}`` is the name of the data store on
|
||||||
the remote.
|
the remote.
|
||||||
|
|
||||||
|
Network Management
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
Proxmox Backup Server provides an interface for network configuration, through the
|
||||||
|
``network`` subcommand. This allows you to carry out some basic network
|
||||||
|
management tasks such as adding, configuring and removing network interfaces.
|
||||||
|
|
||||||
|
To get a list of available interfaces, use the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network list
|
||||||
|
┌───────┬────────┬───────────┬────────┬─────────┬───────────────────┬──────────────┬──────────────┐
|
||||||
|
│ name │ type │ autostart │ method │ method6 │ address │ gateway │ ports/slaves │
|
||||||
|
╞═══════╪════════╪═══════════╪════════╪═════════╪═══════════════════╪══════════════╪══════════════╡
|
||||||
|
│ bond0 │ bond │ 1 │ manual │ │ │ │ ens18 ens19 │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens18 │ eth │ 1 │ manual │ │ │ │ │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens19 │ eth │ 1 │ manual │ │ │ │ │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ vmbr0 │ bridge │ 1 │ static │ │ x.x.x.x/x │ x.x.x.x │ bond0 │
|
||||||
|
└───────┴────────┴───────────┴────────┴─────────┴───────────────────┴──────────────┴──────────────┘
|
||||||
|
|
||||||
|
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||||
|
parameters. The following command shows a template for creating a new bridge:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network create vmbr1 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x --bridge_ports iface_name --type bridge
|
||||||
|
|
||||||
|
You can make changes to the configuration of a network interface with the
|
||||||
|
``update`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network update vmbr1 --cidr y.y.y.y/y
|
||||||
|
|
||||||
|
You can also remove a network interface:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network remove vmbr1
|
||||||
|
|
||||||
|
To view the changes made to the network configuration file, before committing
|
||||||
|
them, use the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network changes
|
||||||
|
|
||||||
|
If you would like to cancel all changes at this point, you can do this using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network revert
|
||||||
|
|
||||||
|
If you are happy with the changes and would like to write them into the
|
||||||
|
configuration file, the command is:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network reload
|
||||||
|
|
||||||
|
You can also configure DNS settings using the ``dns`` subcommand of
|
||||||
|
``proxmox-backup-manager``.
|
||||||
|
|
||||||
:term:`Remote`
|
:term:`Remote`
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -461,6 +595,14 @@ provide it with a :term:`schedule` to run regularly. The
|
|||||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
# proxmox-backup-manager sync-job remove pbs2-local
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
|
Garbage Collection
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||||
|
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||||
|
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
||||||
|
collection on an entire data store and the ``status`` subcommand to see
|
||||||
|
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||||
|
|
||||||
|
|
||||||
Backup Client usage
|
Backup Client usage
|
||||||
-------------------
|
-------------------
|
||||||
@ -1143,6 +1285,10 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
|||||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||||
└───────────────────────────────────┴─────────────────────┘
|
└───────────────────────────────────┴─────────────────────┘
|
||||||
|
|
||||||
|
.. note:: The percentages given in the output table correspond to a
|
||||||
|
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||||
|
local host, so there is no network involved.
|
||||||
|
|
||||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
rather than the default table format.
|
rather than the default table format.
|
||||||
|
|
||||||
|
@ -13,7 +13,8 @@
|
|||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page // FIXME
|
// FIXME
|
||||||
|
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
.. _Rust: https://www.rust-lang.org/
|
.. _Rust: https://www.rust-lang.org/
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -361,7 +362,7 @@ pub fn list_snapshots (
|
|||||||
|
|
||||||
let mut size = None;
|
let mut size = None;
|
||||||
|
|
||||||
let (comment, files) = match get_all_snapshot_files(&datastore, &info) {
|
let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
|
||||||
Ok((manifest, files)) => {
|
Ok((manifest, files)) => {
|
||||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||||
// extract the first line from notes
|
// extract the first line from notes
|
||||||
@ -370,11 +371,21 @@ pub fn list_snapshots (
|
|||||||
.and_then(|notes| notes.lines().next())
|
.and_then(|notes| notes.lines().next())
|
||||||
.map(String::from);
|
.map(String::from);
|
||||||
|
|
||||||
(comment, files)
|
let verify = manifest.unprotected["verify_state"].clone();
|
||||||
|
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
|
||||||
|
Ok(verify) => verify,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("error parsing verification state : '{}'", err);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
(comment, verify, files)
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("error during snapshot file listing: '{}'", err);
|
eprintln!("error during snapshot file listing: '{}'", err);
|
||||||
(
|
(
|
||||||
|
None,
|
||||||
None,
|
None,
|
||||||
info
|
info
|
||||||
.files
|
.files
|
||||||
@ -394,6 +405,7 @@ pub fn list_snapshots (
|
|||||||
backup_id: group.backup_id().to_string(),
|
backup_id: group.backup_id().to_string(),
|
||||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||||
comment,
|
comment,
|
||||||
|
verification,
|
||||||
files,
|
files,
|
||||||
size,
|
size,
|
||||||
owner: Some(owner),
|
owner: Some(owner),
|
||||||
@ -489,7 +501,7 @@ pub fn verify(
|
|||||||
(None, None, None) => {
|
(None, None, None) => {
|
||||||
worker_id = store.clone();
|
worker_id = store.clone();
|
||||||
}
|
}
|
||||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
@ -501,25 +513,34 @@ pub fn verify(
|
|||||||
userid,
|
userid,
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
|
||||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||||
let mut verified_chunks = HashSet::with_capacity(1024*16);
|
|
||||||
let mut corrupt_chunks = HashSet::with_capacity(64);
|
|
||||||
let mut res = Vec::new();
|
let mut res = Vec::new();
|
||||||
if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
|
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
||||||
res.push(backup_dir.to_string());
|
res.push(backup_dir.to_string());
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
} else if let Some(backup_group) = backup_group {
|
} else if let Some(backup_group) = backup_group {
|
||||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
let (_count, failed_dirs) = verify_backup_group(
|
||||||
|
datastore,
|
||||||
|
&backup_group,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
None,
|
||||||
|
worker.clone(),
|
||||||
|
)?;
|
||||||
|
failed_dirs
|
||||||
} else {
|
} else {
|
||||||
verify_all_backups(&datastore, &worker)?
|
verify_all_backups(datastore, worker.clone())?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if failed_dirs.len() > 0 {
|
||||||
worker.log("Failed to verify following snapshots:");
|
worker.log("Failed to verify following snapshots:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
worker.log(format!("\t{}", dir));
|
worker.log(format!("\t{}", dir));
|
||||||
}
|
}
|
||||||
bail!("verfication failed - please check the log for details");
|
bail!("verification failed - please check the log for details");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
@ -1218,7 +1239,7 @@ fn catalog(
|
|||||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||||
&ObjectSchema::new(
|
&ObjectSchema::new(
|
||||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
"Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
|
||||||
&sorted!([
|
&sorted!([
|
||||||
("store", false, &DATASTORE_SCHEMA),
|
("store", false, &DATASTORE_SCHEMA),
|
||||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||||
|
@ -6,6 +6,7 @@ use proxmox::const_regex;
|
|||||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||||
|
|
||||||
use crate::backup::CryptMode;
|
use crate::backup::CryptMode;
|
||||||
|
use crate::server::UPID;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod macros;
|
mod macros;
|
||||||
@ -379,6 +380,25 @@ pub struct GroupListItem {
|
|||||||
pub owner: Option<Userid>,
|
pub owner: Option<Userid>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA
|
||||||
|
},
|
||||||
|
state: {
|
||||||
|
type: String
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct SnapshotVerifyState {
|
||||||
|
/// UPID of the verify task
|
||||||
|
pub upid: UPID,
|
||||||
|
/// State of the verification. "failed" or "ok"
|
||||||
|
pub state: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"backup-type": {
|
"backup-type": {
|
||||||
@ -390,6 +410,14 @@ pub struct GroupListItem {
|
|||||||
"backup-time": {
|
"backup-time": {
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
},
|
},
|
||||||
|
comment: {
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verification: {
|
||||||
|
type: SnapshotVerifyState,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
files: {
|
files: {
|
||||||
items: {
|
items: {
|
||||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
@ -411,6 +439,9 @@ pub struct SnapshotListItem {
|
|||||||
/// The first line from manifest "notes"
|
/// The first line from manifest "notes"
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
|
/// The result of the last run verify task
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verification: Option<SnapshotVerifyState>,
|
||||||
/// List of contained archive files.
|
/// List of contained archive files.
|
||||||
pub files: Vec<BackupContent>,
|
pub files: Vec<BackupContent>,
|
||||||
/// Overall snapshot size (sum of all archive sizes).
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
//! with `String`, meaning you can only make references to it.
|
//! with `String`, meaning you can only make references to it.
|
||||||
//! * [`Realm`]: an owned realm (`String` equivalent).
|
//! * [`Realm`]: an owned realm (`String` equivalent).
|
||||||
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
||||||
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separte
|
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
|
||||||
//! borrowed type.
|
//! borrowed type.
|
||||||
//!
|
//!
|
||||||
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||||
|
@ -120,6 +120,8 @@ macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
|
|||||||
|
|
||||||
/// Unix system user used by proxmox-backup-proxy
|
/// Unix system user used by proxmox-backup-proxy
|
||||||
pub const BACKUP_USER_NAME: &str = "backup";
|
pub const BACKUP_USER_NAME: &str = "backup";
|
||||||
|
/// Unix system group used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||||
|
|
||||||
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
||||||
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||||
@ -129,6 +131,14 @@ pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return Group info for the 'backup' group (``getgrnam(3)``)
|
||||||
|
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||||
|
match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
|
||||||
|
Some(group) => Ok(group),
|
||||||
|
None => bail!("Unable to lookup backup user."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mod file_formats;
|
mod file_formats;
|
||||||
pub use file_formats::*;
|
pub use file_formats::*;
|
||||||
|
|
||||||
|
@ -45,6 +45,31 @@ pub struct BackupGroup {
|
|||||||
backup_id: String,
|
backup_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||||
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
|
return type_order;
|
||||||
|
}
|
||||||
|
// try to compare IDs numerically
|
||||||
|
let id_self = self.backup_id.parse::<u64>();
|
||||||
|
let id_other = other.backup_id.parse::<u64>();
|
||||||
|
match (id_self, id_other) {
|
||||||
|
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||||
|
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||||
|
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||||
|
_ => self.backup_id.cmp(&other.backup_id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::PartialOrd for BackupGroup {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
|
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||||
|
@ -104,7 +104,7 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
let percentage = (i*100)/(64*1024);
|
let percentage = (i*100)/(64*1024);
|
||||||
if percentage != last_percentage {
|
if percentage != last_percentage {
|
||||||
eprintln!("Percentage done: {}", percentage);
|
eprintln!("{}%", percentage);
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -295,7 +295,7 @@ impl ChunkStore {
|
|||||||
for (entry, percentage) in self.get_chunk_iterator()? {
|
for (entry, percentage) in self.get_chunk_iterator()? {
|
||||||
if last_percentage != percentage {
|
if last_percentage != percentage {
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.fail_on_abort()?;
|
||||||
|
@ -304,7 +304,7 @@ impl DataBlob {
|
|||||||
|
|
||||||
let digest = match config {
|
let digest = match config {
|
||||||
Some(config) => config.compute_digest(data),
|
Some(config) => config.compute_digest(data),
|
||||||
None => openssl::sha::sha256(&data),
|
None => openssl::sha::sha256(data),
|
||||||
};
|
};
|
||||||
if &digest != expected_digest {
|
if &digest != expected_digest {
|
||||||
bail!("detected chunk with wrong digest.");
|
bail!("detected chunk with wrong digest.");
|
||||||
|
@ -21,6 +21,7 @@ use super::{DataBlob, ArchiveType, archive_type};
|
|||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
|
use crate::tools::format::HumanByte;
|
||||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||||
|
|
||||||
@ -299,7 +300,7 @@ impl DataStore {
|
|||||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||||
/// current owner (instead of setting the owner).
|
/// current owner (instead of setting the owner).
|
||||||
///
|
///
|
||||||
/// This also aquires an exclusive lock on the directory and returns the lock guard.
|
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||||
pub fn create_locked_backup_group(
|
pub fn create_locked_backup_group(
|
||||||
&self,
|
&self,
|
||||||
backup_group: &BackupGroup,
|
backup_group: &BackupGroup,
|
||||||
@ -429,6 +430,12 @@ impl DataStore {
|
|||||||
|
|
||||||
let image_list = self.list_images()?;
|
let image_list = self.list_images()?;
|
||||||
|
|
||||||
|
let image_count = image_list.len();
|
||||||
|
|
||||||
|
let mut done = 0;
|
||||||
|
|
||||||
|
let mut last_percentage: usize = 0;
|
||||||
|
|
||||||
for path in image_list {
|
for path in image_list {
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.fail_on_abort()?;
|
||||||
@ -443,6 +450,14 @@ impl DataStore {
|
|||||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
done += 1;
|
||||||
|
|
||||||
|
let percentage = done*100/image_count;
|
||||||
|
if percentage > last_percentage {
|
||||||
|
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
||||||
|
percentage, done, image_count));
|
||||||
|
last_percentage = percentage;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -462,9 +477,8 @@ impl DataStore {
|
|||||||
|
|
||||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||||
|
|
||||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
||||||
|
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
|
||||||
|
|
||||||
let mut gc_status = GarbageCollectionStatus::default();
|
let mut gc_status = GarbageCollectionStatus::default();
|
||||||
gc_status.upid = Some(worker.to_string());
|
gc_status.upid = Some(worker.to_string());
|
||||||
@ -474,26 +488,26 @@ impl DataStore {
|
|||||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||||
|
|
||||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||||
self.chunk_store.sweep_unused_chunks(oldest_writer, now, &mut gc_status, &worker)?;
|
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
||||||
|
|
||||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
||||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||||
if gc_status.pending_bytes > 0 {
|
if gc_status.pending_bytes > 0 {
|
||||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||||
|
|
||||||
if gc_status.index_data_bytes > 0 {
|
if gc_status.index_data_bytes > 0 {
|
||||||
let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
|
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||||
worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
|
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
|
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
||||||
|
|
||||||
if gc_status.disk_chunks > 0 {
|
if gc_status.disk_chunks > 0 {
|
||||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||||
worker.log(&format!("Average chunk size: {}", avg_chunk));
|
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
||||||
}
|
}
|
||||||
|
|
||||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||||
|
@ -145,7 +145,7 @@ impl BackupManifest {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate cannonical json
|
// Generate canonical json
|
||||||
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
Self::write_canonical_json(value, &mut data)?;
|
Self::write_canonical_json(value, &mut data)?;
|
||||||
|
@ -1,16 +1,20 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
use crate::api2::types::*;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||||
CryptMode,
|
CryptMode,
|
||||||
FileInfo, ArchiveType, archive_type,
|
FileInfo, ArchiveType, archive_type,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
|
|
||||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
@ -35,38 +39,97 @@ fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_index_chunks(
|
// We use a separate thread to read/load chunks, so that we can do
|
||||||
datastore: &DataStore,
|
// load and verify in parallel to increase performance.
|
||||||
index: Box<dyn IndexFile>,
|
fn chunk_reader_thread(
|
||||||
verified_chunks: &mut HashSet<[u8;32]>,
|
datastore: Arc<DataStore>,
|
||||||
corrupt_chunks: &mut HashSet<[u8; 32]>,
|
index: Box<dyn IndexFile + Send>,
|
||||||
crypt_mode: CryptMode,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: &WorkerTask,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
) -> Result<(), Error> {
|
errors: Arc<AtomicUsize>,
|
||||||
|
worker: Arc<WorkerTask>,
|
||||||
|
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
||||||
|
|
||||||
let mut errors = 0;
|
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
||||||
|
|
||||||
|
std::thread::spawn(move|| {
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
|
||||||
|
|
||||||
let info = index.chunk_info(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
let size = info.range.end - info.range.start;
|
let size = info.range.end - info.range.start;
|
||||||
|
|
||||||
let chunk = match datastore.load_chunk(&info.digest) {
|
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||||
Err(err) => {
|
continue; // already verified
|
||||||
corrupt_chunks.insert(info.digest);
|
}
|
||||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
|
||||||
errors += 1;
|
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||||
|
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||||
|
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
continue;
|
continue;
|
||||||
},
|
}
|
||||||
Ok(chunk) => chunk,
|
|
||||||
|
match datastore.load_chunk(&info.digest) {
|
||||||
|
Err(err) => {
|
||||||
|
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
|
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Ok(chunk) => {
|
||||||
|
if sender.send((chunk, info.digest, size)).is_err() {
|
||||||
|
break; // receiver gone - simply stop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
receiver
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_index_chunks(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
index: Box<dyn IndexFile + Send>,
|
||||||
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
|
crypt_mode: CryptMode,
|
||||||
|
worker: Arc<WorkerTask>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let errors = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let start_time = Instant::now();
|
||||||
|
|
||||||
|
let chunk_channel = chunk_reader_thread(
|
||||||
|
datastore,
|
||||||
|
index,
|
||||||
|
verified_chunks.clone(),
|
||||||
|
corrupt_chunks.clone(),
|
||||||
|
errors.clone(),
|
||||||
|
worker.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut read_bytes = 0;
|
||||||
|
let mut decoded_bytes = 0;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
|
||||||
|
worker.fail_on_abort()?;
|
||||||
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
|
let (chunk, digest, size) = match chunk_channel.recv() {
|
||||||
|
Ok(tuple) => tuple,
|
||||||
|
Err(std::sync::mpsc::RecvError) => break,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
read_bytes += chunk.raw_size();
|
||||||
|
decoded_bytes += size;
|
||||||
|
|
||||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
corrupt_chunks.insert(info.digest);
|
corrupt_chunks.lock().unwrap().insert(digest);
|
||||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||||
errors += 1;
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
continue;
|
continue;
|
||||||
},
|
},
|
||||||
Ok(mode) => mode,
|
Ok(mode) => mode,
|
||||||
@ -78,27 +141,32 @@ fn verify_index_chunks(
|
|||||||
chunk_crypt_mode,
|
chunk_crypt_mode,
|
||||||
crypt_mode
|
crypt_mode
|
||||||
));
|
));
|
||||||
errors += 1;
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !verified_chunks.contains(&info.digest) {
|
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||||
if !corrupt_chunks.contains(&info.digest) {
|
corrupt_chunks.lock().unwrap().insert(digest);
|
||||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &info.digest) {
|
|
||||||
corrupt_chunks.insert(info.digest);
|
|
||||||
worker.log(format!("{}", err));
|
worker.log(format!("{}", err));
|
||||||
errors += 1;
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
} else {
|
} else {
|
||||||
verified_chunks.insert(info.digest);
|
verified_chunks.lock().unwrap().insert(digest);
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
|
||||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
|
||||||
errors += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors > 0 {
|
let elapsed = start_time.elapsed().as_secs_f64();
|
||||||
|
|
||||||
|
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
||||||
|
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
||||||
|
|
||||||
|
let read_speed = read_bytes_mib/elapsed;
|
||||||
|
let decode_speed = decoded_bytes_mib/elapsed;
|
||||||
|
|
||||||
|
let error_count = errors.load(Ordering::SeqCst);
|
||||||
|
|
||||||
|
worker.log(format!(" verified {:.2}/{:.2} Mib in {:.2} seconds, speed {:.2}/{:.2} Mib/s ({} errors)",
|
||||||
|
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
||||||
|
|
||||||
|
if errors.load(Ordering::SeqCst) > 0 {
|
||||||
bail!("chunks could not be verified");
|
bail!("chunks could not be verified");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,12 +174,12 @@ fn verify_index_chunks(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verify_fixed_index(
|
fn verify_fixed_index(
|
||||||
datastore: &DataStore,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
verified_chunks: &mut HashSet<[u8;32]>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: &WorkerTask,
|
worker: Arc<WorkerTask>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
@ -132,12 +200,12 @@ fn verify_fixed_index(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verify_dynamic_index(
|
fn verify_dynamic_index(
|
||||||
datastore: &DataStore,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
verified_chunks: &mut HashSet<[u8;32]>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: &WorkerTask,
|
worker: Arc<WorkerTask>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
@ -167,14 +235,14 @@ fn verify_dynamic_index(
|
|||||||
/// - Ok(false) if there were verification errors
|
/// - Ok(false) if there were verification errors
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_backup_dir(
|
pub fn verify_backup_dir(
|
||||||
datastore: &DataStore,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
verified_chunks: &mut HashSet<[u8;32]>,
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
worker: &WorkerTask
|
worker: Arc<WorkerTask>
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||||
@ -186,40 +254,53 @@ pub fn verify_backup_dir(
|
|||||||
|
|
||||||
let mut error_count = 0;
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
let mut verify_result = "ok";
|
||||||
for info in manifest.files() {
|
for info in manifest.files() {
|
||||||
let result = proxmox::try_block!({
|
let result = proxmox::try_block!({
|
||||||
worker.log(format!(" check {}", info.filename));
|
worker.log(format!(" check {}", info.filename));
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex =>
|
ArchiveType::FixedIndex =>
|
||||||
verify_fixed_index(
|
verify_fixed_index(
|
||||||
&datastore,
|
datastore.clone(),
|
||||||
&backup_dir,
|
&backup_dir,
|
||||||
info,
|
info,
|
||||||
verified_chunks,
|
verified_chunks.clone(),
|
||||||
corrupt_chunks,
|
corrupt_chunks.clone(),
|
||||||
worker
|
worker.clone(),
|
||||||
),
|
),
|
||||||
ArchiveType::DynamicIndex =>
|
ArchiveType::DynamicIndex =>
|
||||||
verify_dynamic_index(
|
verify_dynamic_index(
|
||||||
&datastore,
|
datastore.clone(),
|
||||||
&backup_dir,
|
&backup_dir,
|
||||||
info,
|
info,
|
||||||
verified_chunks,
|
verified_chunks.clone(),
|
||||||
corrupt_chunks,
|
corrupt_chunks.clone(),
|
||||||
worker
|
worker.clone(),
|
||||||
),
|
),
|
||||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.fail_on_abort()?;
|
||||||
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||||
error_count += 1;
|
error_count += 1;
|
||||||
|
verify_result = "failed";
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let verify_state = SnapshotVerifyState {
|
||||||
|
state: verify_result.to_string(),
|
||||||
|
upid: worker.upid().clone(),
|
||||||
|
};
|
||||||
|
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||||
|
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||||
|
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||||
|
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(error_count == 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,32 +309,45 @@ pub fn verify_backup_dir(
|
|||||||
/// Errors are logged to the worker log.
|
/// Errors are logged to the worker log.
|
||||||
///
|
///
|
||||||
/// Returns
|
/// Returns
|
||||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
pub fn verify_backup_group(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
group: &BackupGroup,
|
||||||
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||||
|
worker: Arc<WorkerTask>,
|
||||||
|
) -> Result<(usize, Vec<String>), Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||||
return Ok(errors);
|
return Ok((0, errors));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||||
|
|
||||||
let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB)
|
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||||
let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones
|
|
||||||
|
|
||||||
|
let mut count = 0;
|
||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for info in list {
|
for info in list {
|
||||||
if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{
|
count += 1;
|
||||||
|
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
||||||
errors.push(info.backup_dir.to_string());
|
errors.push(info.backup_dir.to_string());
|
||||||
}
|
}
|
||||||
|
if snapshot_count != 0 {
|
||||||
|
let pos = done + count;
|
||||||
|
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||||
|
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(errors)
|
Ok((count, errors))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify all backups inside a datastore
|
/// Verify all backups inside a datastore
|
||||||
@ -263,11 +357,11 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
|
|||||||
/// Returns
|
/// Returns
|
||||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||||
@ -275,11 +369,34 @@ pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
worker.log(format!("verify datastore {}", datastore.name()));
|
list.sort_unstable();
|
||||||
|
|
||||||
|
let mut snapshot_count = 0;
|
||||||
|
for group in list.iter() {
|
||||||
|
snapshot_count += group.list_backups(&datastore.base_path())?.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
// start with 16384 chunks (up to 65GB)
|
||||||
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
|
|
||||||
|
// start with 64 chunks since we assume there are few corrupt ones
|
||||||
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
|
||||||
|
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
||||||
|
|
||||||
|
let mut done = 0;
|
||||||
for group in list {
|
for group in list {
|
||||||
let mut group_errors = verify_backup_group(datastore, &group, worker)?;
|
let (count, mut group_errors) = verify_backup_group(
|
||||||
|
datastore.clone(),
|
||||||
|
&group,
|
||||||
|
verified_chunks.clone(),
|
||||||
|
corrupt_chunks.clone(),
|
||||||
|
Some((done, snapshot_count)),
|
||||||
|
worker.clone(),
|
||||||
|
)?;
|
||||||
errors.append(&mut group_errors);
|
errors.append(&mut group_errors);
|
||||||
|
|
||||||
|
done += count;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(errors)
|
Ok(errors)
|
||||||
|
@ -20,13 +20,19 @@ use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
|||||||
|
|
||||||
use proxmox_backup::api2::pull::do_sync_job;
|
use proxmox_backup::api2::pull::do_sync_job;
|
||||||
|
|
||||||
fn main() {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::setup_safe_path_env();
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
let backup_uid = proxmox_backup::backup::backup_user()?.uid;
|
||||||
eprintln!("Error: {}", err);
|
let backup_gid = proxmox_backup::backup::backup_group()?.gid;
|
||||||
std::process::exit(-1);
|
let running_uid = nix::unistd::Uid::effective();
|
||||||
|
let running_gid = nix::unistd::Gid::effective();
|
||||||
|
|
||||||
|
if running_uid != backup_uid || running_gid != backup_gid {
|
||||||
|
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
@ -43,11 +49,6 @@ async fn run() -> Result<(), Error> {
|
|||||||
let mut config = ApiConfig::new(
|
let mut config = ApiConfig::new(
|
||||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
||||||
|
|
||||||
// add default dirs which includes jquery and bootstrap
|
|
||||||
// my $base = '/usr/share/libpve-http-server-perl';
|
|
||||||
// add_dirs($self->{dirs}, '/css/' => "$base/css/");
|
|
||||||
// add_dirs($self->{dirs}, '/js/' => "$base/js/");
|
|
||||||
// add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
|
|
||||||
config.add_alias("novnc", "/usr/share/novnc-pve");
|
config.add_alias("novnc", "/usr/share/novnc-pve");
|
||||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||||
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
||||||
|
@ -239,7 +239,7 @@ pub fn zpool_commands() -> CommandLineInterface {
|
|||||||
.insert("create",
|
.insert("create",
|
||||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
.completion_cb("devices", complete_disk_name) // fixme: complete the list
|
||||||
);
|
);
|
||||||
|
|
||||||
cmd_def.into()
|
cmd_def.into()
|
||||||
|
@ -629,7 +629,7 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upload speed test - prints result ot stderr
|
/// Upload speed test - prints result to stderr
|
||||||
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
|
@ -133,7 +133,7 @@ impl DiskManage {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information about file system type and unsed device for a path
|
/// Information about file system type and used device for a path
|
||||||
///
|
///
|
||||||
/// Returns tuple (fs_type, device, mount_source)
|
/// Returns tuple (fs_type, device, mount_source)
|
||||||
pub fn find_mounted_device(
|
pub fn find_mounted_device(
|
||||||
|
@ -111,7 +111,7 @@ fn parse_zpool_list_item(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
|||||||
Ok((i, stat))
|
Ok((i, stat))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parse zpool list outout
|
/// Parse zpool list output
|
||||||
///
|
///
|
||||||
/// Note: This does not reveal any details on how the pool uses the devices, because
|
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||||
/// the zpool list output format is not really defined...
|
/// the zpool list output format is not really defined...
|
||||||
|
@ -53,7 +53,7 @@ fn parse_zpool_status_vdev(i: &str) -> IResult<&str, ZFSPoolVDevState> {
|
|||||||
|
|
||||||
let (i, vdev_name) = notspace1(i)?;
|
let (i, vdev_name) = notspace1(i)?;
|
||||||
|
|
||||||
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // sepecial device
|
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // special device
|
||||||
let vdev = ZFSPoolVDevState {
|
let vdev = ZFSPoolVDevState {
|
||||||
name: vdev_name.to_string(),
|
name: vdev_name.to_string(),
|
||||||
lvl: indent_level,
|
lvl: indent_level,
|
||||||
|
@ -80,6 +80,11 @@ impl From<usize> for HumanByte {
|
|||||||
HumanByte { b: v }
|
HumanByte { b: v }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl From<u64> for HumanByte {
|
||||||
|
fn from(v: u64) -> Self {
|
||||||
|
HumanByte { b: v as usize }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn correct_byte_convert() {
|
fn correct_byte_convert() {
|
||||||
|
@ -6,16 +6,16 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
{
|
{
|
||||||
name: 'backup-time',
|
name: 'backup-time',
|
||||||
type: 'date',
|
type: 'date',
|
||||||
dateFormat: 'timestamp'
|
dateFormat: 'timestamp',
|
||||||
},
|
},
|
||||||
'files',
|
'files',
|
||||||
'owner',
|
'owner',
|
||||||
{ name: 'size', type: 'int', allowNull: true, },
|
'verification',
|
||||||
|
{ name: 'size', type: 'int', allowNull: true },
|
||||||
{
|
{
|
||||||
name: 'crypt-mode',
|
name: 'crypt-mode',
|
||||||
type: 'boolean',
|
type: 'boolean',
|
||||||
calculate: function(data) {
|
calculate: function(data) {
|
||||||
let encrypted = 0;
|
|
||||||
let crypt = {
|
let crypt = {
|
||||||
none: 0,
|
none: 0,
|
||||||
mixed: 0,
|
mixed: 0,
|
||||||
@ -23,25 +23,24 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
encrypt: 0,
|
encrypt: 0,
|
||||||
count: 0,
|
count: 0,
|
||||||
};
|
};
|
||||||
let signed = 0;
|
|
||||||
data.files.forEach(file => {
|
data.files.forEach(file => {
|
||||||
if (file.filename === 'index.json.blob') return; // is never encrypted
|
if (file.filename === 'index.json.blob') return; // is never encrypted
|
||||||
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||||
if (mode !== -1) {
|
if (mode !== -1) {
|
||||||
crypt[file['crypt-mode']]++;
|
crypt[file['crypt-mode']]++;
|
||||||
}
|
|
||||||
crypt.count++;
|
crypt.count++;
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return PBS.Utils.calculateCryptMode(crypt);
|
return PBS.Utils.calculateCryptMode(crypt);
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'matchesFilter',
|
name: 'matchesFilter',
|
||||||
type: 'boolean',
|
type: 'boolean',
|
||||||
defaultValue: true,
|
defaultValue: true,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.define('PBS.DataStoreContent', {
|
Ext.define('PBS.DataStoreContent', {
|
||||||
@ -69,7 +68,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
view.getStore().setSorters([
|
view.getStore().setSorters([
|
||||||
'backup-group',
|
'backup-group',
|
||||||
'text',
|
'text',
|
||||||
'backup-time'
|
'backup-time',
|
||||||
]);
|
]);
|
||||||
Proxmox.Utils.monStoreErrors(view, this.store);
|
Proxmox.Utils.monStoreErrors(view, this.store);
|
||||||
this.reload(); // initial load
|
this.reload(); // initial load
|
||||||
@ -87,7 +86,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
this.store.setProxy({
|
this.store.setProxy({
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
timeout: 300*1000, // 5 minutes, we should make that api call faster
|
timeout: 300*1000, // 5 minutes, we should make that api call faster
|
||||||
url: url
|
url: url,
|
||||||
});
|
});
|
||||||
|
|
||||||
this.store.load();
|
this.store.load();
|
||||||
@ -123,7 +122,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
expanded: false,
|
expanded: false,
|
||||||
backup_type: item.data["backup-type"],
|
backup_type: item.data["backup-type"],
|
||||||
backup_id: item.data["backup-id"],
|
backup_id: item.data["backup-id"],
|
||||||
children: []
|
children: [],
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,7 +161,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
},
|
},
|
||||||
after: () => {},
|
after: Ext.emptyFn,
|
||||||
});
|
});
|
||||||
|
|
||||||
for (const item of records) {
|
for (const item of records) {
|
||||||
@ -180,7 +179,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
|
|
||||||
data.children = [];
|
data.children = [];
|
||||||
for (const file of data.files) {
|
for (const file of data.files) {
|
||||||
file.text = file.filename,
|
file.text = file.filename;
|
||||||
file['crypt-mode'] = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
file['crypt-mode'] = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||||
file.leaf = true;
|
file.leaf = true;
|
||||||
file.matchesFilter = true;
|
file.matchesFilter = true;
|
||||||
@ -191,6 +190,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
children.push(data);
|
children.push(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let nowSeconds = Date.now() / 1000;
|
||||||
let children = [];
|
let children = [];
|
||||||
for (const [name, group] of Object.entries(groups)) {
|
for (const [name, group] of Object.entries(groups)) {
|
||||||
let last_backup = 0;
|
let last_backup = 0;
|
||||||
@ -200,7 +200,13 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
'sign-only': 0,
|
'sign-only': 0,
|
||||||
encrypt: 0,
|
encrypt: 0,
|
||||||
};
|
};
|
||||||
for (const item of group.children) {
|
let verify = {
|
||||||
|
outdated: 0,
|
||||||
|
none: 0,
|
||||||
|
failed: 0,
|
||||||
|
ok: 0,
|
||||||
|
};
|
||||||
|
for (let item of group.children) {
|
||||||
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
|
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
|
||||||
if (item["backup-time"] > last_backup && item.size !== null) {
|
if (item["backup-time"] > last_backup && item.size !== null) {
|
||||||
last_backup = item["backup-time"];
|
last_backup = item["backup-time"];
|
||||||
@ -208,9 +214,24 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
group.files = item.files;
|
group.files = item.files;
|
||||||
group.size = item.size;
|
group.size = item.size;
|
||||||
group.owner = item.owner;
|
group.owner = item.owner;
|
||||||
|
verify.lastFailed = item.verification && item.verification.state !== 'ok';
|
||||||
}
|
}
|
||||||
|
if (!item.verification) {
|
||||||
|
verify.none++;
|
||||||
|
} else {
|
||||||
|
if (item.verification.state === 'ok') {
|
||||||
|
verify.ok++;
|
||||||
|
} else {
|
||||||
|
verify.failed++;
|
||||||
}
|
}
|
||||||
|
let task = Proxmox.Utils.parse_task_upid(item.verification.upid);
|
||||||
|
item.verification.lastTime = task.starttime;
|
||||||
|
if (nowSeconds - task.starttime > 30 * 24 * 60 * 60) {
|
||||||
|
verify.outdated++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
group.verification = verify;
|
||||||
group.count = group.children.length;
|
group.count = group.children.length;
|
||||||
group.matchesFilter = true;
|
group.matchesFilter = true;
|
||||||
crypt.count = group.count;
|
crypt.count = group.count;
|
||||||
@ -221,7 +242,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
|
|
||||||
view.setRootNode({
|
view.setRootNode({
|
||||||
expanded: true,
|
expanded: true,
|
||||||
children: children
|
children: children,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (selected !== undefined) {
|
if (selected !== undefined) {
|
||||||
@ -241,13 +262,13 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
Proxmox.Utils.setErrorMask(view, false);
|
Proxmox.Utils.setErrorMask(view, false);
|
||||||
if (view.getStore().getFilters().length > 0) {
|
if (view.getStore().getFilters().length > 0) {
|
||||||
let searchBox = me.lookup("searchbox");
|
let searchBox = me.lookup("searchbox");
|
||||||
let searchvalue = searchBox.getValue();;
|
let searchvalue = searchBox.getValue();
|
||||||
me.search(searchBox, searchvalue);
|
me.search(searchBox, searchvalue);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
onPrune: function(view, rI, cI, item, e, rec) {
|
onPrune: function(view, rI, cI, item, e, rec) {
|
||||||
var view = this.getView();
|
view = this.getView();
|
||||||
|
|
||||||
if (!(rec && rec.data)) return;
|
if (!(rec && rec.data)) return;
|
||||||
let data = rec.data;
|
let data = rec.data;
|
||||||
@ -265,7 +286,8 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
},
|
},
|
||||||
|
|
||||||
onVerify: function(view, rI, cI, item, e, rec) {
|
onVerify: function(view, rI, cI, item, e, rec) {
|
||||||
var view = this.getView();
|
let me = this;
|
||||||
|
view = me.getView();
|
||||||
|
|
||||||
if (!view.datastore) return;
|
if (!view.datastore) return;
|
||||||
|
|
||||||
@ -297,6 +319,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
success: function(response, options) {
|
success: function(response, options) {
|
||||||
Ext.create('Proxmox.window.TaskViewer', {
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
upid: response.result.data,
|
upid: response.result.data,
|
||||||
|
taskDone: () => me.reload(),
|
||||||
}).show();
|
}).show();
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
@ -304,7 +327,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
|
|
||||||
onForget: function(view, rI, cI, item, e, rec) {
|
onForget: function(view, rI, cI, item, e, rec) {
|
||||||
let me = this;
|
let me = this;
|
||||||
var view = this.getView();
|
view = this.getView();
|
||||||
|
|
||||||
if (!(rec && rec.data)) return;
|
if (!(rec && rec.data)) return;
|
||||||
let data = rec.data;
|
let data = rec.data;
|
||||||
@ -359,7 +382,8 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
let atag = document.createElement('a');
|
let atag = document.createElement('a');
|
||||||
params['file-name'] = file;
|
params['file-name'] = file;
|
||||||
atag.download = filename;
|
atag.download = filename;
|
||||||
let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`, window.location.origin);
|
let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`,
|
||||||
|
window.location.origin);
|
||||||
for (const [key, value] of Object.entries(params)) {
|
for (const [key, value] of Object.entries(params)) {
|
||||||
url.searchParams.append(key, value);
|
url.searchParams.append(key, value);
|
||||||
}
|
}
|
||||||
@ -422,7 +446,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
store.beginUpdate();
|
store.beginUpdate();
|
||||||
store.getRoot().cascadeBy({
|
store.getRoot().cascadeBy({
|
||||||
before: function(item) {
|
before: function(item) {
|
||||||
if(me.filter(item, value)) {
|
if (me.filter(item, value)) {
|
||||||
item.set('matchesFilter', true);
|
item.set('matchesFilter', true);
|
||||||
if (item.parentNode && item.parentNode.id !== 'root') {
|
if (item.parentNode && item.parentNode.id !== 'root') {
|
||||||
item.parentNode.childmatches = true;
|
item.parentNode.childmatches = true;
|
||||||
@ -454,12 +478,22 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
viewConfig: {
|
||||||
|
getRowClass: function(record, index) {
|
||||||
|
let verify = record.get('verification');
|
||||||
|
if (verify && verify.lastFailed) {
|
||||||
|
return 'proxmox-invalid-row';
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
columns: [
|
columns: [
|
||||||
{
|
{
|
||||||
xtype: 'treecolumn',
|
xtype: 'treecolumn',
|
||||||
header: gettext("Backup Group"),
|
header: gettext("Backup Group"),
|
||||||
dataIndex: 'text',
|
dataIndex: 'text',
|
||||||
flex: 1
|
flex: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Actions'),
|
header: gettext('Actions'),
|
||||||
@ -506,9 +540,9 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
data.filename &&
|
data.filename &&
|
||||||
data.filename.endsWith('pxar.didx') &&
|
data.filename.endsWith('pxar.didx') &&
|
||||||
data['crypt-mode'] < 3);
|
data['crypt-mode'] < 3);
|
||||||
}
|
|
||||||
},
|
},
|
||||||
]
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'datecolumn',
|
xtype: 'datecolumn',
|
||||||
@ -516,7 +550,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
sortable: true,
|
sortable: true,
|
||||||
dataIndex: 'backup-time',
|
dataIndex: 'backup-time',
|
||||||
format: 'Y-m-d H:i:s',
|
format: 'Y-m-d H:i:s',
|
||||||
width: 150
|
width: 150,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext("Size"),
|
header: gettext("Size"),
|
||||||
@ -538,6 +572,8 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
format: '0',
|
format: '0',
|
||||||
header: gettext("Count"),
|
header: gettext("Count"),
|
||||||
sortable: true,
|
sortable: true,
|
||||||
|
width: 75,
|
||||||
|
align: 'right',
|
||||||
dataIndex: 'count',
|
dataIndex: 'count',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -560,8 +596,80 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
if (iconCls) {
|
if (iconCls) {
|
||||||
iconTxt = `<i class="fa fa-fw fa-${iconCls}"></i> `;
|
iconTxt = `<i class="fa fa-fw fa-${iconCls}"></i> `;
|
||||||
}
|
}
|
||||||
return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText
|
return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Verify State'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'verification',
|
||||||
|
width: 120,
|
||||||
|
renderer: (v, meta, record) => {
|
||||||
|
let i = (cls, txt) => `<i class="fa fa-fw fa-${cls}"></i> ${txt}`;
|
||||||
|
if (v === undefined || v === null) {
|
||||||
|
return record.data.leaf ? '' : i('question-circle-o warning', gettext('None'));
|
||||||
}
|
}
|
||||||
|
let tip, iconCls, txt;
|
||||||
|
if (record.parentNode.id === 'root') {
|
||||||
|
if (v.failed === 0) {
|
||||||
|
if (v.none === 0) {
|
||||||
|
if (v.outdated > 0) {
|
||||||
|
tip = 'All OK, but some snapshots were not verified in last 30 days';
|
||||||
|
iconCls = 'check warning';
|
||||||
|
txt = gettext('All OK (old)');
|
||||||
|
} else {
|
||||||
|
tip = 'All snapshots verified at least once in last 30 days';
|
||||||
|
iconCls = 'check good';
|
||||||
|
txt = gettext('All OK');
|
||||||
|
}
|
||||||
|
} else if (v.ok === 0) {
|
||||||
|
tip = `${v.none} not verified yet`;
|
||||||
|
iconCls = 'question-circle-o warning';
|
||||||
|
txt = gettext('None');
|
||||||
|
} else {
|
||||||
|
tip = `${v.ok} OK, ${v.none} not verified yet`;
|
||||||
|
iconCls = 'check faded';
|
||||||
|
txt = `${v.ok} OK`;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tip = `${v.ok} OK, ${v.failed} failed, ${v.none} not verified yet`;
|
||||||
|
iconCls = 'times critical';
|
||||||
|
txt = v.ok === 0 && v.none === 0
|
||||||
|
? gettext('All failed')
|
||||||
|
: `${v.failed} failed`;
|
||||||
|
}
|
||||||
|
} else if (!v.state) {
|
||||||
|
return record.data.leaf ? '' : gettext('None');
|
||||||
|
} else {
|
||||||
|
let verify_time = Proxmox.Utils.render_timestamp(v.lastTime);
|
||||||
|
tip = `Last verify task started on ${verify_time}`;
|
||||||
|
txt = v.state;
|
||||||
|
iconCls = 'times critical';
|
||||||
|
if (v.state === 'ok') {
|
||||||
|
iconCls = 'check good';
|
||||||
|
let now = Date.now() / 1000;
|
||||||
|
if (now - v.lastTime > 30 * 24 * 60 * 60) {
|
||||||
|
tip = `Last verify task over 30 days ago: ${verify_time}`;
|
||||||
|
iconCls = 'check warning';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return `<span data-qtip="${tip}">
|
||||||
|
<i class="fa fa-fw fa-${iconCls}"></i> ${txt}
|
||||||
|
</span>`;
|
||||||
|
},
|
||||||
|
listeners: {
|
||||||
|
dblclick: function(view, el, row, col, ev, rec) {
|
||||||
|
let data = rec.data || {};
|
||||||
|
let verify = data.verification;
|
||||||
|
if (verify && verify.upid && rec.parentNode.id !== 'root') {
|
||||||
|
let win = Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: verify.upid,
|
||||||
|
});
|
||||||
|
win.show();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
@ -579,6 +687,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
{
|
{
|
||||||
xtype: 'textfield',
|
xtype: 'textfield',
|
||||||
reference: 'searchbox',
|
reference: 'searchbox',
|
||||||
|
emptyText: gettext('group, date or owner'),
|
||||||
triggers: {
|
triggers: {
|
||||||
clear: {
|
clear: {
|
||||||
cls: 'pmx-clear-trigger',
|
cls: 'pmx-clear-trigger',
|
||||||
@ -588,7 +697,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
this.triggers.clear.setVisible(false);
|
this.triggers.clear.setVisible(false);
|
||||||
this.setValue('');
|
this.setValue('');
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
listeners: {
|
listeners: {
|
||||||
change: {
|
change: {
|
||||||
@ -596,6 +705,6 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
buffer: 500,
|
buffer: 500,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
});
|
});
|
||||||
|
Reference in New Issue
Block a user