Compare commits
130 Commits
Author | SHA1 | Date | |
---|---|---|---|
4430f199c4 | |||
eef18365e8 | |||
319fe45261 | |||
f26080fab1 | |||
0cbdeed96b | |||
8b4f4d9ee4 | |||
b9cc905761 | |||
c9725bb829 | |||
40492a562f | |||
db67e4fe06 | |||
b4b14dc16e | |||
c4a45ec744 | |||
5428f5ca29 | |||
328df3b507 | |||
a4915dfc2b | |||
d642802d8c | |||
a20fcab060 | |||
b9e7bcc272 | |||
acc3d9df5a | |||
1298618a83 | |||
a12388d177 | |||
1f092c7802 | |||
cd82870015 | |||
8d6b6a045f | |||
1dceaed1e9 | |||
2565fdd075 | |||
7ece65a01e | |||
028d0a1352 | |||
68931742cb | |||
3ea148598a | |||
cd92fd7336 | |||
d58e6313e1 | |||
16f9f244cf | |||
b683fd589c | |||
a2285525be | |||
f23497b088 | |||
b57b3c9bfc | |||
d3444c0891 | |||
d28e688666 | |||
72c0e102ff | |||
7b22fb257f | |||
2e201e7da6 | |||
ee89416319 | |||
2357744bd0 | |||
52fe9e8ece | |||
eed1bae554 | |||
6eb41487ce | |||
9e61c01ce4 | |||
91c9b42da3 | |||
52d2ae48f0 | |||
1872050564 | |||
efeb92efee | |||
4ebda996e5 | |||
5eb9dd0c8a | |||
12bcbf0734 | |||
dc2876f6bb | |||
bdc208af48 | |||
2ef1b6290f | |||
df0bdf6be7 | |||
8b47a23002 | |||
29615fe838 | |||
133042b5d8 | |||
73df9c515b | |||
8d1beca7e8 | |||
9b2bad7af0 | |||
78efafc2d0 | |||
2d3d91b1db | |||
030c5c6d8a | |||
53a561a222 | |||
e832860a3c | |||
804f61432d | |||
943479f5f6 | |||
fdce52aa99 | |||
4e32d1c590 | |||
afef7f3bba | |||
b428af9781 | |||
c8774067ee | |||
23440482d4 | |||
6f757b8458 | |||
95ade8fdb5 | |||
9e870b5f39 | |||
7827e3b93e | |||
e6ca9c3235 | |||
0698f78df5 | |||
bcc2880461 | |||
115d927c15 | |||
df729017b4 | |||
455f2ad228 | |||
e4f5f59eea | |||
16cdb9563b | |||
02479720c0 | |||
97168f920e | |||
9809772b23 | |||
4940012d0d | |||
0c2f9621d5 | |||
e7372972b5 | |||
e5adbc3419 | |||
41255b4d95 | |||
0c4c6a7b1c | |||
c7e18ba08a | |||
bb14d46796 | |||
e6475b09e0 | |||
d39d095fa4 | |||
86f3c2363c | |||
8e7e2223d8 | |||
081c37cccf | |||
c0df91f8bd | |||
400c568f8e | |||
4703ba81ce | |||
29633e2fe9 | |||
b64e9a97f3 | |||
254b1f2213 | |||
1a374fcfd6 | |||
e07620028d | |||
b947b1e7ee | |||
1e80fb8e92 | |||
8d841f81ee | |||
d9f365d79f | |||
32a4695c46 | |||
2081327428 | |||
4c0ae82e23 | |||
883aa6d5a4 | |||
bfa54f2e85 | |||
238a872d1f | |||
7d6c4c39e9 | |||
f153930066 | |||
836c4a278d | |||
6cd8496008 | |||
61c6eafc08 | |||
8db1468952 |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.9.1"
|
version = "0.9.2"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -29,7 +29,7 @@ hyper = "0.13.6"
|
|||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = "0.16"
|
nix = "0.19"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
openssl = "0.10"
|
openssl = "0.10"
|
||||||
@ -38,7 +38,7 @@ pam-sys = "0.5"
|
|||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.4.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.5.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
|
84
debian/changelog
vendored
84
debian/changelog
vendored
@ -1,3 +1,83 @@
|
|||||||
|
rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* rework server web-interface, move more datastore related panels as tabs
|
||||||
|
inside the datastore view
|
||||||
|
|
||||||
|
* prune: never fail, just warn about failed removals
|
||||||
|
|
||||||
|
* prune/forget: skip snapshots with open readers (restore, verification)
|
||||||
|
|
||||||
|
* datastore: always ensure to remove individual snapshots before their group
|
||||||
|
|
||||||
|
* pxar: fix relative '!' rules in .pxarexclude
|
||||||
|
|
||||||
|
* pxar: anchor pxarexcludes starting with a slash
|
||||||
|
|
||||||
|
* GC: mark phase: ignore vanished index files
|
||||||
|
|
||||||
|
* server/rest: forward real client IP on proxied request and log it in
|
||||||
|
failed authentication requests
|
||||||
|
|
||||||
|
* server: rest: implement max URI path and query length request limits
|
||||||
|
|
||||||
|
* server/rest: implement request access log and log the query part of
|
||||||
|
URL and the user agent
|
||||||
|
|
||||||
|
* api: access: log to separate file, use syslog to errors only to reduce
|
||||||
|
syslog spam
|
||||||
|
|
||||||
|
* client: set HTTP connect timeout to 10 seconds
|
||||||
|
|
||||||
|
* client: sent TCP keep-alive after 2 minutes instead of the Linux default
|
||||||
|
of two hours.
|
||||||
|
|
||||||
|
* CLI completion: fix ACL path completion
|
||||||
|
|
||||||
|
* fix #2988: allow one to enable automatic verification after finishing a
|
||||||
|
snapshot, can be controlled as a per-datastore option
|
||||||
|
|
||||||
|
* various log-rotation improvements
|
||||||
|
|
||||||
|
* proxmox-backup-client: use HumanByte to render snapshot size
|
||||||
|
|
||||||
|
* paperkey: use svg as image format to provide better scalability
|
||||||
|
|
||||||
|
* backup: avoid Transport endpoint is not connected error
|
||||||
|
|
||||||
|
* fix #3038: check user before renewing ticket
|
||||||
|
|
||||||
|
* ui/tools: add zip module and allow to download an archive directory as a zip
|
||||||
|
|
||||||
|
* ui and api: add verification job config, allowing to schedule more
|
||||||
|
flexible jobs, filtering out already and/or recently verified snapshots
|
||||||
|
NOTE: the previous simple "verify all" schedule was dropped from the
|
||||||
|
datastore content, and does *not* gets migrated to the new job config.
|
||||||
|
|
||||||
|
* tasks: use systemd escape to decode/encode the task worker ID, avoiding
|
||||||
|
some display problems with problematic characters
|
||||||
|
|
||||||
|
* fix #2934: list also new to-be-installed packages in updates
|
||||||
|
|
||||||
|
* apt: add /changelog API call similar to PVE
|
||||||
|
|
||||||
|
* api: add world accessible ping dummy endpoint, to cheaply check for a
|
||||||
|
running PBS instance.
|
||||||
|
|
||||||
|
* ui: add datastore summary panel and move Statistics into it
|
||||||
|
|
||||||
|
* ui: navigation: add 'Add Datastore' button below datastore list
|
||||||
|
|
||||||
|
* ui: datastore panel: save and restore selected tab statefully
|
||||||
|
|
||||||
|
* send notification mails to email of root@pam account for GC and verify
|
||||||
|
jobs
|
||||||
|
|
||||||
|
* ui: datastore: use simple V. for verify action button
|
||||||
|
|
||||||
|
* ui: datastore: show snapshot manifest comment and allow to edit them
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 21:27:02 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
||||||
@ -16,7 +96,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
|||||||
|
|
||||||
* add "Build" section to README.rst
|
* add "Build" section to README.rst
|
||||||
|
|
||||||
* reader: actually allow users to downlod their own backups
|
* reader: actually allow users to download their own backups
|
||||||
|
|
||||||
* reader: track index chunks and limit access
|
* reader: track index chunks and limit access
|
||||||
|
|
||||||
@ -38,7 +118,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
|||||||
|
|
||||||
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
||||||
|
|
||||||
* ui: implment task history limit and make it configurable
|
* ui: implement task history limit and make it configurable
|
||||||
|
|
||||||
* docs: installation: add system requirements section
|
* docs: installation: add system requirements section
|
||||||
|
|
||||||
|
12
debian/control
vendored
12
debian/control
vendored
@ -24,7 +24,7 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev,
|
librust-log-0.4+default-dev,
|
||||||
librust-nix-0.16+default-dev,
|
librust-nix-0.19+default-dev,
|
||||||
librust-nom-5+default-dev (>= 5.1-~~),
|
librust-nom-5+default-dev (>= 5.1-~~),
|
||||||
librust-num-traits-0.2+default-dev,
|
librust-num-traits-0.2+default-dev,
|
||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
@ -34,10 +34,10 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.3-~~),
|
librust-proxmox-0.5+api-macro-dev,
|
||||||
librust-proxmox-0.4+default-dev (>= 0.4.3-~~),
|
librust-proxmox-0.5+default-dev,
|
||||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.3-~~),
|
librust-proxmox-0.5+sortable-macro-dev,
|
||||||
librust-proxmox-0.4+websocket-dev (>= 0.4.3-~~),
|
librust-proxmox-0.5+websocket-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev,
|
librust-proxmox-fuse-0.1+default-dev,
|
||||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
||||||
@ -107,7 +107,7 @@ Depends: fonts-font-awesome,
|
|||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-1),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
|
2
debian/control.in
vendored
2
debian/control.in
vendored
@ -7,7 +7,7 @@ Depends: fonts-font-awesome,
|
|||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-1),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
|
2
debian/postinst
vendored
2
debian/postinst
vendored
@ -15,6 +15,8 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
|
flock -w 30 /etc/proxmox-backup/.datastore.lck sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg
|
||||||
|
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
|
@ -246,6 +246,8 @@ Restoring this backup will result in:
|
|||||||
. .. file2
|
. .. file2
|
||||||
|
|
||||||
|
|
||||||
|
.. _encryption:
|
||||||
|
|
||||||
Encryption
|
Encryption
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
What is Proxmox Backup Server
|
What is Proxmox Backup Server?
|
||||||
-----------------------------
|
------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||||
@ -10,12 +10,14 @@ physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
|||||||
platform and allows you to back up your data securely, even between remote
|
platform and allows you to back up your data securely, even between remote
|
||||||
sites, providing easy management with a web-based user interface.
|
sites, providing easy management with a web-based user interface.
|
||||||
|
|
||||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
It supports deduplication, compression, and authenticated
|
||||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||||
performance, low resource usage, and a safe, high-quality codebase.
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
It features strong client-side encryption. Thus, it's possible to
|
Proxmox Backup uses state of the art cryptography for client communication and
|
||||||
backup data to targets that are not fully trusted.
|
backup content :ref:`encryption <encryption>`. Encryption is done on the
|
||||||
|
client side, making it safer to back up data to targets that are not fully
|
||||||
|
trusted.
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
@ -179,29 +181,28 @@ along with this program. If not, see AGPL3_.
|
|||||||
History
|
History
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Backup is, and always was, as central aspect of IT administration.
|
Backup is, and always has been, a central aspect of IT administration.
|
||||||
The need to recover from data loss is fundamental and increases with
|
The need to recover from data loss is fundamental and only increases with
|
||||||
virtualization.
|
virtualization.
|
||||||
|
|
||||||
Not surprisingly, we shipped a backup tool with Proxmox VE from the
|
For this reason, we've been shipping a backup tool with Proxmox VE, from the
|
||||||
beginning. The tool is called ``vzdump`` and is able to make
|
beginning. This tool is called ``vzdump`` and is able to make
|
||||||
consistent snapshots of running LXC containers and KVM virtual
|
consistent snapshots of running LXC containers and KVM virtual
|
||||||
machines.
|
machines.
|
||||||
|
|
||||||
But ``vzdump`` only allowed for full backups. While this is perfect
|
However, ``vzdump`` only allows for full backups. While this is fine
|
||||||
for small backups, it becomes a burden for users with large VMs. Both
|
for small backups, it becomes a burden for users with large VMs. Both
|
||||||
backup time and space usage was too large for this case, specially
|
backup duration and storage usage are too high for this case, especially
|
||||||
when Users want to keep many backups of the same VMs. We need
|
for users who want to keep many backups of the same VMs. To solve these
|
||||||
deduplication and incremental backups to solve those problems.
|
problems, we needed to offer deduplication and incremental backups.
|
||||||
|
|
||||||
Back in October 2018 development started. We had been looking into
|
Back in October 2018, development started. We investigated
|
||||||
several technologies and frameworks and finally decided to use
|
several technologies and frameworks and finally decided to use
|
||||||
:term:`Rust` as implementation language to provide high speed and
|
:term:`Rust` as the implementation language, in order to provide high speed and
|
||||||
memory efficiency. The 2018-edition of Rust seemed to be promising and
|
memory efficiency. The 2018-edition of Rust seemed promising for our
|
||||||
useful for our requirements.
|
requirements.
|
||||||
|
|
||||||
In July 2020 we released the first beta version of Proxmox Backup
|
In July 2020, we released the first beta version of Proxmox Backup
|
||||||
Server, followed by a first stable version in November 2020. With the
|
Server, followed by the first stable version in November 2020. With support for
|
||||||
support of incremental, fully deduplicated backups, Proxmox Backup
|
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
||||||
significantly reduces the network load and saves valuable storage
|
network load and saves valuable storage space.
|
||||||
space.
|
|
||||||
|
@ -7,6 +7,7 @@ pub mod reader;
|
|||||||
pub mod status;
|
pub mod status;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
|
pub mod ping;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
|
||||||
@ -22,6 +23,7 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("backup", &backup::ROUTER),
|
("backup", &backup::ROUTER),
|
||||||
("config", &config::ROUTER),
|
("config", &config::ROUTER),
|
||||||
("nodes", &NODES_ROUTER),
|
("nodes", &NODES_ROUTER),
|
||||||
|
("ping", &ping::ROUTER),
|
||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
|
@ -10,6 +10,7 @@ use proxmox::{http_err, list_subdirs_api_method};
|
|||||||
use crate::tools::ticket::{self, Empty, Ticket};
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
use crate::auth_helpers::*;
|
use crate::auth_helpers::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::tools::{FileLogOptions, FileLogger};
|
||||||
|
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||||
@ -138,14 +139,22 @@ fn create_ticket(
|
|||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
privs: Option<String>,
|
privs: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
let logger_options = FileLogOptions {
|
||||||
|
append: true,
|
||||||
|
prefix_time: true,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let mut auth_log = FileLogger::new("/var/log/proxmox-backup/api/auth.log", logger_options)?;
|
||||||
|
|
||||||
match authenticate_user(&username, &password, path, privs, port) {
|
match authenticate_user(&username, &password, path, privs, port) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
||||||
|
|
||||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
|
|
||||||
log::info!("successful auth for user '{}'", username);
|
auth_log.log(format!("successful auth for user '{}'", username));
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"username": username,
|
"username": username,
|
||||||
@ -157,8 +166,20 @@ fn create_ticket(
|
|||||||
"username": username,
|
"username": username,
|
||||||
})),
|
})),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
|
||||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
Some(ip) => format!("{}", ip),
|
||||||
|
None => "unknown".into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let msg = format!(
|
||||||
|
"authentication failure; rhost={} user={} msg={}",
|
||||||
|
client_ip,
|
||||||
|
username,
|
||||||
|
err.to_string()
|
||||||
|
);
|
||||||
|
auth_log.log(&msg);
|
||||||
|
log::error!("{}", msg);
|
||||||
|
|
||||||
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,12 @@ use proxmox::list_subdirs_api_method;
|
|||||||
|
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
pub mod verify;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
("sync", &sync::ROUTER)
|
("sync", &sync::ROUTER),
|
||||||
|
("verify", &verify::ROUTER)
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -2,6 +2,8 @@ use std::collections::{HashSet, HashMap};
|
|||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -16,10 +18,9 @@ use proxmox::api::{
|
|||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
|
||||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use pxar::accessor::aio::Accessor;
|
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
||||||
use pxar::EntryKind;
|
use pxar::EntryKind;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -29,7 +30,12 @@ use crate::config::datastore;
|
|||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
use crate::tools::{
|
||||||
|
self,
|
||||||
|
zip::{ZipEncoder, ZipEntry},
|
||||||
|
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::config::acl::{
|
use crate::config::acl::{
|
||||||
PRIV_DATASTORE_AUDIT,
|
PRIV_DATASTORE_AUDIT,
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
@ -165,8 +171,8 @@ fn list_groups(
|
|||||||
|
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
if !list_all {
|
if !list_all && owner != userid {
|
||||||
if owner != userid { continue; }
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let result_item = GroupListItem {
|
let result_item = GroupListItem {
|
||||||
@ -356,8 +362,8 @@ pub fn list_snapshots (
|
|||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
|
|
||||||
if !list_all {
|
if !list_all && owner != userid {
|
||||||
if owner != userid { continue; }
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut size = None;
|
let mut size = None;
|
||||||
@ -417,6 +423,37 @@ pub fn list_snapshots (
|
|||||||
Ok(snapshots)
|
Ok(snapshots)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns a map from type to (group_count, snapshot_count)
|
||||||
|
fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize)>, Error> {
|
||||||
|
let base_path = store.base_path();
|
||||||
|
let backup_list = BackupInfo::list_backups(&base_path)?;
|
||||||
|
let mut groups = HashSet::new();
|
||||||
|
let mut result: HashMap<String, (usize, usize)> = HashMap::new();
|
||||||
|
for info in backup_list {
|
||||||
|
let group = info.backup_dir.group();
|
||||||
|
|
||||||
|
let id = group.backup_id();
|
||||||
|
let backup_type = group.backup_type();
|
||||||
|
|
||||||
|
let mut new_id = false;
|
||||||
|
|
||||||
|
if groups.insert(format!("{}-{}", &backup_type, &id)) {
|
||||||
|
new_id = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(mut counts) = result.get_mut(backup_type) {
|
||||||
|
counts.1 += 1;
|
||||||
|
if new_id {
|
||||||
|
counts.0 +=1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
result.insert(backup_type.to_string(), (1, 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -426,7 +463,21 @@ pub fn list_snapshots (
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: StorageStatus,
|
description: "The overall Datastore status and information.",
|
||||||
|
type: Object,
|
||||||
|
properties: {
|
||||||
|
storage: {
|
||||||
|
type: StorageStatus,
|
||||||
|
},
|
||||||
|
counts: {
|
||||||
|
description: "Group and Snapshot counts per Type",
|
||||||
|
type: Object,
|
||||||
|
properties: { },
|
||||||
|
},
|
||||||
|
"gc-status": {
|
||||||
|
type: GarbageCollectionStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
|
||||||
@ -437,9 +488,19 @@ pub fn status(
|
|||||||
store: String,
|
store: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<StorageStatus, Error> {
|
) -> Result<Value, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
let storage_status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||||
|
let counts = get_snaphots_count(&datastore)?;
|
||||||
|
let gc_status = datastore.last_gc_status();
|
||||||
|
|
||||||
|
let res = json!({
|
||||||
|
"storage": storage_status,
|
||||||
|
"counts": counts,
|
||||||
|
"gc-status": gc_status,
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -486,17 +547,20 @@ pub fn verify(
|
|||||||
|
|
||||||
let mut backup_dir = None;
|
let mut backup_dir = None;
|
||||||
let mut backup_group = None;
|
let mut backup_group = None;
|
||||||
|
let mut worker_type = "verify";
|
||||||
|
|
||||||
match (backup_type, backup_id, backup_time) {
|
match (backup_type, backup_id, backup_time) {
|
||||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
|
||||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
backup_dir = Some(dir);
|
backup_dir = Some(dir);
|
||||||
|
worker_type = "verify_snapshot";
|
||||||
}
|
}
|
||||||
(Some(backup_type), Some(backup_id), None) => {
|
(Some(backup_type), Some(backup_id), None) => {
|
||||||
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||||
let group = BackupGroup::new(backup_type, backup_id);
|
let group = BackupGroup::new(backup_type, backup_id);
|
||||||
backup_group = Some(group);
|
backup_group = Some(group);
|
||||||
|
worker_type = "verify_group";
|
||||||
}
|
}
|
||||||
(None, None, None) => {
|
(None, None, None) => {
|
||||||
worker_id = store.clone();
|
worker_id = store.clone();
|
||||||
@ -508,13 +572,14 @@ pub fn verify(
|
|||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"verify",
|
worker_type,
|
||||||
Some(worker_id.clone()),
|
Some(worker_id.clone()),
|
||||||
userid,
|
userid,
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
let filter = |_backup_info: &BackupInfo| { true };
|
||||||
|
|
||||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||||
let mut res = Vec::new();
|
let mut res = Vec::new();
|
||||||
@ -538,10 +603,11 @@ pub fn verify(
|
|||||||
None,
|
None,
|
||||||
worker.clone(),
|
worker.clone(),
|
||||||
worker.upid(),
|
worker.upid(),
|
||||||
|
&filter,
|
||||||
)?;
|
)?;
|
||||||
failed_dirs
|
failed_dirs
|
||||||
} else {
|
} else {
|
||||||
verify_all_backups(datastore, worker.clone(), worker.upid())?
|
verify_all_backups(datastore, worker.clone(), worker.upid(), &filter)?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if failed_dirs.len() > 0 {
|
||||||
worker.log("Failed to verify following snapshots:");
|
worker.log("Failed to verify following snapshots:");
|
||||||
@ -659,7 +725,7 @@ fn prune(
|
|||||||
keep_yearly: param["keep-yearly"].as_u64(),
|
keep_yearly: param["keep-yearly"].as_u64(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||||
|
|
||||||
let mut prune_result = Vec::new();
|
let mut prune_result = Vec::new();
|
||||||
|
|
||||||
@ -692,53 +758,52 @@ fn prune(
|
|||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
||||||
|
|
||||||
let result = try_block! {
|
if keep_all {
|
||||||
if keep_all {
|
worker.log("No prune selection - keeping all files.");
|
||||||
worker.log("No prune selection - keeping all files.");
|
} else {
|
||||||
} else {
|
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
store, backup_type, backup_id));
|
||||||
store, backup_type, backup_id));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (info, mut keep) in prune_info {
|
for (info, mut keep) in prune_info {
|
||||||
if keep_all { keep = true; }
|
if keep_all { keep = true; }
|
||||||
|
|
||||||
let backup_time = info.backup_dir.backup_time();
|
let backup_time = info.backup_dir.backup_time();
|
||||||
let timestamp = info.backup_dir.backup_time_string();
|
let timestamp = info.backup_dir.backup_time_string();
|
||||||
let group = info.backup_dir.group();
|
let group = info.backup_dir.group();
|
||||||
|
|
||||||
|
|
||||||
let msg = format!(
|
let msg = format!(
|
||||||
"{}/{}/{} {}",
|
"{}/{}/{} {}",
|
||||||
group.backup_type(),
|
group.backup_type(),
|
||||||
group.backup_id(),
|
group.backup_id(),
|
||||||
timestamp,
|
timestamp,
|
||||||
if keep { "keep" } else { "remove" },
|
if keep { "keep" } else { "remove" },
|
||||||
);
|
);
|
||||||
|
|
||||||
worker.log(msg);
|
worker.log(msg);
|
||||||
|
|
||||||
prune_result.push(json!({
|
prune_result.push(json!({
|
||||||
"backup-type": group.backup_type(),
|
"backup-type": group.backup_type(),
|
||||||
"backup-id": group.backup_id(),
|
"backup-id": group.backup_id(),
|
||||||
"backup-time": backup_time,
|
"backup-time": backup_time,
|
||||||
"keep": keep,
|
"keep": keep,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
if !(dry_run || keep) {
|
if !(dry_run || keep) {
|
||||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
||||||
|
worker.warn(
|
||||||
|
format!(
|
||||||
|
"failed to remove dir {:?}: {}",
|
||||||
|
info.backup_dir.relative_path(), err
|
||||||
|
)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
worker.log_result(&Ok(()));
|
||||||
};
|
|
||||||
|
|
||||||
worker.log_result(&result);
|
|
||||||
|
|
||||||
if let Err(err) = result {
|
|
||||||
bail!("prune failed - {}", err);
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(json!(prune_result))
|
Ok(json!(prune_result))
|
||||||
}
|
}
|
||||||
@ -1243,6 +1308,66 @@ fn catalog(
|
|||||||
Ok(res.into())
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn recurse_files<'a, T, W>(
|
||||||
|
zip: &'a mut ZipEncoder<W>,
|
||||||
|
decoder: &'a mut Accessor<T>,
|
||||||
|
prefix: &'a Path,
|
||||||
|
file: FileEntry<T>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
||||||
|
where
|
||||||
|
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||||
|
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
Box::pin(async move {
|
||||||
|
let metadata = file.entry().metadata();
|
||||||
|
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
||||||
|
|
||||||
|
match file.kind() {
|
||||||
|
EntryKind::File { .. } => {
|
||||||
|
let entry = ZipEntry::new(
|
||||||
|
path,
|
||||||
|
metadata.stat.mtime.secs,
|
||||||
|
metadata.stat.mode as u16,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
zip.add_entry(entry, Some(file.contents().await?))
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||||
|
}
|
||||||
|
EntryKind::Hardlink(_) => {
|
||||||
|
let realfile = decoder.follow_hardlink(&file).await?;
|
||||||
|
let entry = ZipEntry::new(
|
||||||
|
path,
|
||||||
|
metadata.stat.mtime.secs,
|
||||||
|
metadata.stat.mode as u16,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
zip.add_entry(entry, Some(realfile.contents().await?))
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||||
|
}
|
||||||
|
EntryKind::Directory => {
|
||||||
|
let dir = file.enter_directory().await?;
|
||||||
|
let mut readdir = dir.read_dir();
|
||||||
|
let entry = ZipEntry::new(
|
||||||
|
path,
|
||||||
|
metadata.stat.mtime.secs,
|
||||||
|
metadata.stat.mode as u16,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
||||||
|
while let Some(entry) = readdir.next().await {
|
||||||
|
let entry = entry?.decode_entry().await?;
|
||||||
|
recurse_files(zip, decoder, prefix, entry).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {} // ignore all else
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||||
@ -1325,23 +1450,55 @@ fn pxar_file_download(
|
|||||||
.lookup(OsStr::from_bytes(file_path)).await?
|
.lookup(OsStr::from_bytes(file_path)).await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||||
|
|
||||||
let file = match file.kind() {
|
let body = match file.kind() {
|
||||||
EntryKind::File { .. } => file,
|
EntryKind::File { .. } => Body::wrap_stream(
|
||||||
EntryKind::Hardlink(_) => {
|
AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
|
||||||
decoder.follow_hardlink(&file).await?
|
eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
|
||||||
},
|
err
|
||||||
// TODO symlink
|
}),
|
||||||
|
),
|
||||||
|
EntryKind::Hardlink(_) => Body::wrap_stream(
|
||||||
|
AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!(
|
||||||
|
"error during streaming of hardlink '{:?}' - {}",
|
||||||
|
filepath, err
|
||||||
|
);
|
||||||
|
err
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
EntryKind::Directory => {
|
||||||
|
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
||||||
|
let mut prefix = PathBuf::new();
|
||||||
|
let mut components = file.entry().path().components();
|
||||||
|
components.next_back(); // discar last
|
||||||
|
for comp in components {
|
||||||
|
prefix.push(comp);
|
||||||
|
}
|
||||||
|
|
||||||
|
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
||||||
|
|
||||||
|
crate::server::spawn_internal_task(async move {
|
||||||
|
let mut zipencoder = ZipEncoder::new(channelwriter);
|
||||||
|
let mut decoder = decoder;
|
||||||
|
recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
|
||||||
|
.await
|
||||||
|
.map_err(|err| eprintln!("error during creating of zip: {}", err))?;
|
||||||
|
|
||||||
|
zipencoder
|
||||||
|
.finish()
|
||||||
|
.await
|
||||||
|
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
||||||
|
});
|
||||||
|
|
||||||
|
Body::wrap_stream(receiver.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
||||||
|
err
|
||||||
|
}))
|
||||||
|
}
|
||||||
other => bail!("cannot download file of type {:?}", other),
|
other => bail!("cannot download file of type {:?}", other),
|
||||||
};
|
};
|
||||||
|
|
||||||
let body = Body::wrap_stream(
|
|
||||||
AsyncReaderStream::new(file.contents().await?)
|
|
||||||
.map_err(move |err| {
|
|
||||||
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
|
||||||
err
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
// fixme: set other headers ?
|
// fixme: set other headers ?
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
@ -1430,9 +1587,9 @@ fn get_notes(
|
|||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
let manifest = datastore.load_manifest_json(&backup_dir)?;
|
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
|
||||||
|
|
||||||
let notes = manifest["unprotected"]["notes"]
|
let notes = manifest.unprotected["notes"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap_or("");
|
.unwrap_or("");
|
||||||
|
|
||||||
@ -1483,17 +1640,15 @@ fn set_notes(
|
|||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
|
datastore.update_manifest(&backup_dir,|manifest| {
|
||||||
|
manifest.unprotected["notes"] = notes.into();
|
||||||
manifest["unprotected"]["notes"] = notes.into();
|
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
||||||
datastore.store_manifest(&backup_dir, manifest)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
@ -1508,10 +1663,10 @@ fn set_notes(
|
|||||||
type: Userid,
|
type: Userid,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Change owner of a backup group
|
/// Change owner of a backup group
|
||||||
fn set_backup_owner(
|
fn set_backup_owner(
|
||||||
|
@ -9,13 +9,18 @@ use crate::api2::types::*;
|
|||||||
use crate::api2::pull::do_sync_job;
|
use crate::api2::pull::do_sync_job;
|
||||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||||
use crate::server::UPID;
|
use crate::server::UPID;
|
||||||
use crate::config::jobstate::{Job, JobState};
|
use crate::server::jobstate::{Job, JobState};
|
||||||
use crate::tools::systemd::time::{
|
use crate::tools::systemd::time::{
|
||||||
parse_calendar_event, compute_next_event};
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {},
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "List configured jobs and their status.",
|
description: "List configured jobs and their status.",
|
||||||
@ -25,13 +30,23 @@ use crate::tools::systemd::time::{
|
|||||||
)]
|
)]
|
||||||
/// List all sync jobs
|
/// List all sync jobs
|
||||||
pub fn list_sync_jobs(
|
pub fn list_sync_jobs(
|
||||||
|
store: Option<String>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||||
|
|
||||||
let (config, digest) = sync::config()?;
|
let (config, digest) = sync::config()?;
|
||||||
|
|
||||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
let mut list: Vec<SyncJobStatus> = config
|
||||||
|
.convert_to_typed_array("sync")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &SyncJobStatus| {
|
||||||
|
if let Some(store) = &store {
|
||||||
|
&job.store == store
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
for job in &mut list {
|
for job in &mut list {
|
||||||
let last_state = JobState::load("syncjob", &job.id)
|
let last_state = JobState::load("syncjob", &job.id)
|
||||||
|
122
src/api2/admin/verify.rs
Normal file
122
src/api2/admin/verify.rs
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::server::do_verification_job;
|
||||||
|
use crate::server::jobstate::{Job, JobState};
|
||||||
|
use crate::config::verify;
|
||||||
|
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
||||||
|
use serde_json::Value;
|
||||||
|
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
||||||
|
use crate::server::UPID;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs and their status.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: verify::VerificationJobStatus },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all verification jobs
|
||||||
|
pub fn list_verification_jobs(
|
||||||
|
store: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<VerificationJobStatus>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = verify::config()?;
|
||||||
|
|
||||||
|
let mut list: Vec<VerificationJobStatus> = config
|
||||||
|
.convert_to_typed_array("verification")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &VerificationJobStatus| {
|
||||||
|
if let Some(store) = &store {
|
||||||
|
&job.store == store
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
for job in &mut list {
|
||||||
|
let last_state = JobState::load("verificationjob", &job.id)
|
||||||
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
|
|
||||||
|
let (upid, endtime, state, starttime) = match last_state {
|
||||||
|
JobState::Created { time } => (None, None, None, time),
|
||||||
|
JobState::Started { upid } => {
|
||||||
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
|
(Some(upid), None, None, parsed_upid.starttime)
|
||||||
|
},
|
||||||
|
JobState::Finished { upid, state } => {
|
||||||
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
|
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
job.last_run_upid = upid;
|
||||||
|
job.last_run_state = state;
|
||||||
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
|
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||||
|
|
||||||
|
job.next_run = (|| -> Option<i64> {
|
||||||
|
let schedule = job.schedule.as_ref()?;
|
||||||
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
|
// ignore errors
|
||||||
|
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||||
|
})();
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Runs a verification job manually.
|
||||||
|
fn run_verification_job(
|
||||||
|
id: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let (config, _digest) = verify::config()?;
|
||||||
|
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
|
let job = Job::new("verificationjob", &id)?;
|
||||||
|
|
||||||
|
let upid_str = do_verification_job(job, verification_job, &userid, None)?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
|
||||||
|
|
||||||
|
const VERIFICATION_INFO_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))
|
||||||
|
.subdirs(VERIFICATION_INFO_SUBDIRS);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
|
||||||
|
.match_all("id", &VERIFICATION_INFO_ROUTER);
|
@ -16,7 +16,7 @@ use crate::backup::*;
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::tools::fs::lock_dir_noblock;
|
use crate::tools::fs::lock_dir_noblock_shared;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -86,7 +86,7 @@ async move {
|
|||||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||||
}
|
}
|
||||||
|
|
||||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||||
|
|
||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
@ -144,12 +144,12 @@ async move {
|
|||||||
|
|
||||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||||
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
|
|
||||||
@ -182,8 +182,22 @@ async move {
|
|||||||
http.http2_initial_connection_window_size(window_size);
|
http.http2_initial_connection_window_size(window_size);
|
||||||
http.http2_max_frame_size(4*1024*1024);
|
http.http2_max_frame_size(4*1024*1024);
|
||||||
|
|
||||||
|
let env3 = env2.clone();
|
||||||
http.serve_connection(conn, service)
|
http.serve_connection(conn, service)
|
||||||
.map_err(Error::from)
|
.map(move |result| {
|
||||||
|
match result {
|
||||||
|
Err(err) => {
|
||||||
|
// Avoid Transport endpoint is not connected (os error 107)
|
||||||
|
// fixme: find a better way to test for that error
|
||||||
|
if err.to_string().starts_with("connection error") && env3.finished() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
}
|
||||||
|
})
|
||||||
});
|
});
|
||||||
let mut abort_future = abort_future
|
let mut abort_future = abort_future
|
||||||
.map(|_| Err(format_err!("task aborted")));
|
.map(|_| Err(format_err!("task aborted")));
|
||||||
@ -191,7 +205,7 @@ async move {
|
|||||||
async move {
|
async move {
|
||||||
// keep flock until task ends
|
// keep flock until task ends
|
||||||
let _group_guard = _group_guard;
|
let _group_guard = _group_guard;
|
||||||
let _snap_guard = _snap_guard;
|
let snap_guard = snap_guard;
|
||||||
let _last_guard = _last_guard;
|
let _last_guard = _last_guard;
|
||||||
|
|
||||||
let res = select!{
|
let res = select!{
|
||||||
@ -203,20 +217,32 @@ async move {
|
|||||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let verify = |env: BackupEnvironment| {
|
||||||
|
if let Err(err) = env.verify_after_complete(snap_guard) {
|
||||||
|
env.log(format!(
|
||||||
|
"backup finished, but starting the requested verify task failed: {}",
|
||||||
|
err
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished successfully");
|
env.log("backup finished successfully");
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Err(err), Ok(())) => {
|
(Err(err), Ok(())) => {
|
||||||
// ignore errors after finish
|
// ignore errors after finish
|
||||||
env.log(format!("backup had errors but finished: {}", err));
|
env.log(format!("backup had errors but finished: {}", err));
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Ok(_), Err(err)) => {
|
(Ok(_), Err(err)) => {
|
||||||
env.log(format!("backup ended and finish failed: {}", err));
|
env.log(format!("backup ended and finish failed: {}", err));
|
||||||
env.log("removing unfinished backup");
|
env.log("removing unfinished backup");
|
||||||
env.remove_backup()?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
Err(err)
|
Err(err)
|
||||||
},
|
},
|
||||||
(Err(err), Err(_)) => {
|
(Err(err), Err(_)) => {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use ::serde::{Serialize};
|
use ::serde::{Serialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -472,16 +473,11 @@ impl BackupEnvironment {
|
|||||||
bail!("backup does not contain valid files (file count == 0)");
|
bail!("backup does not contain valid files (file count == 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// check manifest
|
// check for valid manifest and store stats
|
||||||
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
|
||||||
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
|
||||||
|
|
||||||
let stats = serde_json::to_value(state.backup_stat)?;
|
let stats = serde_json::to_value(state.backup_stat)?;
|
||||||
|
self.datastore.update_manifest(&self.backup_dir, |manifest| {
|
||||||
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
manifest.unprotected["chunk_upload_stats"] = stats;
|
||||||
|
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
self.datastore.store_manifest(&self.backup_dir, manifest)
|
|
||||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
|
||||||
|
|
||||||
if let Some(base) = &self.last_backup {
|
if let Some(base) = &self.last_backup {
|
||||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||||
@ -499,6 +495,54 @@ impl BackupEnvironment {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If verify-new is set on the datastore, this will run a new verify task
|
||||||
|
/// for the backup. If not, this will return and also drop the passed lock
|
||||||
|
/// immediately.
|
||||||
|
pub fn verify_after_complete(&self, snap_lock: Dir) -> Result<(), Error> {
|
||||||
|
self.ensure_finished()?;
|
||||||
|
|
||||||
|
if !self.datastore.verify_new() {
|
||||||
|
// no verify requested, do nothing
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker_id = format!("{}:{}/{}/{:08X}",
|
||||||
|
self.datastore.name(),
|
||||||
|
self.backup_dir.group().backup_type(),
|
||||||
|
self.backup_dir.group().backup_id(),
|
||||||
|
self.backup_dir.backup_time());
|
||||||
|
|
||||||
|
let datastore = self.datastore.clone();
|
||||||
|
let backup_dir = self.backup_dir.clone();
|
||||||
|
|
||||||
|
WorkerTask::new_thread(
|
||||||
|
"verify",
|
||||||
|
Some(worker_id),
|
||||||
|
self.user.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
worker.log("Automatically verifying newly added snapshot");
|
||||||
|
|
||||||
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
|
||||||
|
if !verify_backup_dir_with_lock(
|
||||||
|
datastore,
|
||||||
|
&backup_dir,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker.clone(),
|
||||||
|
worker.upid().clone(),
|
||||||
|
snap_lock,
|
||||||
|
)? {
|
||||||
|
bail!("verification failed - please check the log for details");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
).map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
||||||
self.worker.log(msg);
|
self.worker.log(msg);
|
||||||
}
|
}
|
||||||
@ -523,6 +567,12 @@ impl BackupEnvironment {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return true if the finished flag is set
|
||||||
|
pub fn finished(&self) -> bool {
|
||||||
|
let state = self.state.lock().unwrap();
|
||||||
|
state.finished
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove complete backup
|
/// Remove complete backup
|
||||||
pub fn remove_backup(&self) -> Result<(), Error> {
|
pub fn remove_backup(&self) -> Result<(), Error> {
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
|
@ -4,11 +4,13 @@ use proxmox::list_subdirs_api_method;
|
|||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
pub mod verify;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
("remote", &remote::ROUTER),
|
("remote", &remote::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
|
("verify", &verify::ROUTER)
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -12,6 +12,7 @@ use crate::backup::*;
|
|||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||||
|
use crate::server::jobstate;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -75,10 +76,6 @@ pub fn list_datastores(
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
"verify-schedule": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"keep-last": {
|
"keep-last": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
@ -131,9 +128,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
jobstate::create_state_file("prune", &datastore.name)?;
|
||||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -179,8 +175,6 @@ pub enum DeletableProperty {
|
|||||||
gc_schedule,
|
gc_schedule,
|
||||||
/// Delete the prune job schedule.
|
/// Delete the prune job schedule.
|
||||||
prune_schedule,
|
prune_schedule,
|
||||||
/// Delete the verify schedule property
|
|
||||||
verify_schedule,
|
|
||||||
/// Delete the keep-last property
|
/// Delete the keep-last property
|
||||||
keep_last,
|
keep_last,
|
||||||
/// Delete the keep-hourly property
|
/// Delete the keep-hourly property
|
||||||
@ -214,10 +208,6 @@ pub enum DeletableProperty {
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
"verify-schedule": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"keep-last": {
|
"keep-last": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
@ -266,7 +256,6 @@ pub fn update_datastore(
|
|||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
gc_schedule: Option<String>,
|
gc_schedule: Option<String>,
|
||||||
prune_schedule: Option<String>,
|
prune_schedule: Option<String>,
|
||||||
verify_schedule: Option<String>,
|
|
||||||
keep_last: Option<u64>,
|
keep_last: Option<u64>,
|
||||||
keep_hourly: Option<u64>,
|
keep_hourly: Option<u64>,
|
||||||
keep_daily: Option<u64>,
|
keep_daily: Option<u64>,
|
||||||
@ -295,7 +284,6 @@ pub fn update_datastore(
|
|||||||
DeletableProperty::comment => { data.comment = None; },
|
DeletableProperty::comment => { data.comment = None; },
|
||||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||||
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
|
|
||||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||||
@ -327,12 +315,6 @@ pub fn update_datastore(
|
|||||||
data.prune_schedule = prune_schedule;
|
data.prune_schedule = prune_schedule;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verify_schedule_changed = false;
|
|
||||||
if verify_schedule.is_some() {
|
|
||||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
|
||||||
data.verify_schedule = verify_schedule;
|
|
||||||
}
|
|
||||||
|
|
||||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||||
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
||||||
@ -347,15 +329,11 @@ pub fn update_datastore(
|
|||||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||||
// (e.g. going from monthly to weekly in the second week of the month)
|
// (e.g. going from monthly to weekly in the second week of the month)
|
||||||
if gc_schedule_changed {
|
if gc_schedule_changed {
|
||||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
jobstate::create_state_file("garbage_collection", &name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if prune_schedule_changed {
|
if prune_schedule_changed {
|
||||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
jobstate::create_state_file("prune", &name)?;
|
||||||
}
|
|
||||||
|
|
||||||
if verify_schedule_changed {
|
|
||||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -398,9 +376,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
|||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
// ignore errors
|
// ignore errors
|
||||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
let _ = jobstate::remove_state_file("prune", &name);
|
||||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
||||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
use base64;
|
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
@ -83,7 +83,7 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
sync::save_config(&config)?;
|
sync::save_config(&config)?;
|
||||||
|
|
||||||
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
crate::server::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -266,7 +266,7 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
sync::save_config(&config)?;
|
sync::save_config(&config)?;
|
||||||
|
|
||||||
crate::config::jobstate::remove_state_file("syncjob", &id)?;
|
crate::server::jobstate::remove_state_file("syncjob", &id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
274
src/api2/config/verify.rs
Normal file
274
src/api2/config/verify.rs
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, RpcEnvironment};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::config::verify::{self, VerificationJobConfig};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: verify::VerificationJobConfig },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all verification jobs
|
||||||
|
pub fn list_verification_jobs(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<VerificationJobConfig>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = verify::config()?;
|
||||||
|
|
||||||
|
let list = config.convert_to_typed_array("verification")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Create a new verification job.
|
||||||
|
pub fn create_verification_job(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = verify::config()?;
|
||||||
|
|
||||||
|
if let Some(_) = config.sections.get(&verification_job.id) {
|
||||||
|
bail!("job '{}' already exists.", verification_job.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&verification_job.id, "verification", &verification_job)?;
|
||||||
|
|
||||||
|
verify::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::create_state_file("verificationjob", &verification_job.id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The verification job configuration.",
|
||||||
|
type: verify::VerificationJobConfig,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read a verification job configuration.
|
||||||
|
pub fn read_verification_job(
|
||||||
|
id: String,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<VerificationJobConfig, Error> {
|
||||||
|
let (config, digest) = verify::config()?;
|
||||||
|
|
||||||
|
let verification_job = config.lookup("verification", &id)?;
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(verification_job)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the ignore verified property.
|
||||||
|
IgnoreVerified,
|
||||||
|
/// Delete the comment property.
|
||||||
|
Comment,
|
||||||
|
/// Delete the job schedule.
|
||||||
|
Schedule,
|
||||||
|
/// Delete outdated after property.
|
||||||
|
OutdatedAfter
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
optional: true,
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update verification job config.
|
||||||
|
pub fn update_verification_job(
|
||||||
|
id: String,
|
||||||
|
store: Option<String>,
|
||||||
|
ignore_verified: Option<bool>,
|
||||||
|
outdated_after: Option<i64>,
|
||||||
|
comment: Option<String>,
|
||||||
|
schedule: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
// pass/compare digest
|
||||||
|
let (mut config, expected_digest) = verify::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: verify::VerificationJobConfig = config.lookup("verification", &id)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
|
||||||
|
DeletableProperty::OutdatedAfter => { data.outdated_after = None; },
|
||||||
|
DeletableProperty::Comment => { data.comment = None; },
|
||||||
|
DeletableProperty::Schedule => { data.schedule = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(comment) = comment {
|
||||||
|
let comment = comment.trim().to_string();
|
||||||
|
if comment.is_empty() {
|
||||||
|
data.comment = None;
|
||||||
|
} else {
|
||||||
|
data.comment = Some(comment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(store) = store { data.store = store; }
|
||||||
|
|
||||||
|
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
|
||||||
|
if outdated_after.is_some() { data.outdated_after = outdated_after; }
|
||||||
|
if schedule.is_some() { data.schedule = schedule; }
|
||||||
|
|
||||||
|
config.set_data(&id, "verification", &data)?;
|
||||||
|
|
||||||
|
verify::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Remove a verification job configuration
|
||||||
|
pub fn delete_verification_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = verify::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match config.sections.get(&id) {
|
||||||
|
Some(_) => { config.sections.remove(&id); },
|
||||||
|
None => bail!("job '{}' does not exist.", id),
|
||||||
|
}
|
||||||
|
|
||||||
|
verify::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::remove_state_file("verificationjob", &id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_VERIFICATION_JOB)
|
||||||
|
.put(&API_METHOD_UPDATE_VERIFICATION_JOB)
|
||||||
|
.delete(&API_METHOD_DELETE_VERIFICATION_JOB);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
|
||||||
|
.post(&API_METHOD_CREATE_VERIFICATION_JOB)
|
||||||
|
.match_all("id", &ITEM_ROUTER);
|
@ -1,5 +1,7 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use apt_pkg_native::Cache;
|
use apt_pkg_native::Cache;
|
||||||
use anyhow::{Error, bail};
|
use anyhow::{Error, bail, format_err};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::{list_subdirs_api_method, const_regex};
|
use proxmox::{list_subdirs_api_method, const_regex};
|
||||||
@ -7,6 +9,7 @@ use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
|||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
use crate::tools::http;
|
||||||
|
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
||||||
@ -16,14 +19,13 @@ const_regex! {
|
|||||||
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
// FIXME: once the 'changelog' API call switches over to 'apt-get changelog' only,
|
||||||
// not possible as our packages do not have a URI set in their Release file
|
// consider removing this function entirely, as it's value is never used anywhere
|
||||||
|
// then (widget-toolkit doesn't use the value either)
|
||||||
fn get_changelog_url(
|
fn get_changelog_url(
|
||||||
package: &str,
|
package: &str,
|
||||||
filename: &str,
|
filename: &str,
|
||||||
source_pkg: &str,
|
|
||||||
version: &str,
|
version: &str,
|
||||||
source_version: &str,
|
|
||||||
origin: &str,
|
origin: &str,
|
||||||
component: &str,
|
component: &str,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
@ -32,25 +34,24 @@ fn get_changelog_url(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if origin == "Debian" {
|
if origin == "Debian" {
|
||||||
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
let mut command = std::process::Command::new("apt-get");
|
||||||
|
command.arg("changelog");
|
||||||
let prefix = if source_pkg.starts_with("lib") {
|
command.arg("--print-uris");
|
||||||
source_pkg.get(0..4)
|
command.arg(package);
|
||||||
} else {
|
let output = crate::tools::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
|
||||||
source_pkg.get(0..1)
|
let output = match output.splitn(2, ' ').next() {
|
||||||
|
Some(output) => {
|
||||||
|
if output.len() < 2 {
|
||||||
|
bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
|
||||||
|
}
|
||||||
|
output[1..output.len()-1].to_owned()
|
||||||
|
},
|
||||||
|
None => bail!("invalid output from 'apt-get changelog --print-uris': {}", output)
|
||||||
};
|
};
|
||||||
|
return Ok(output);
|
||||||
let prefix = match prefix {
|
|
||||||
Some(p) => p,
|
|
||||||
None => bail!("cannot get starting characters of package name '{}'", package)
|
|
||||||
};
|
|
||||||
|
|
||||||
// note: security updates seem to not always upload a changelog for
|
|
||||||
// their package version, so this only works *most* of the time
|
|
||||||
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
|
||||||
prefix, source_pkg, source_pkg, source_version));
|
|
||||||
|
|
||||||
} else if origin == "Proxmox" {
|
} else if origin == "Proxmox" {
|
||||||
|
// FIXME: Use above call to 'apt changelog <pkg> --print-uris' as well.
|
||||||
|
// Currently not possible as our packages do not have a URI set in their Release file.
|
||||||
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||||
|
|
||||||
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||||
@ -71,115 +72,229 @@ fn get_changelog_url(
|
|||||||
bail!("unknown origin ({}) or component ({})", origin, component)
|
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
struct FilterData<'a> {
|
||||||
-> Vec<APTUpdateInfo> {
|
// this is version info returned by APT
|
||||||
|
installed_version: Option<&'a str>,
|
||||||
|
candidate_version: &'a str,
|
||||||
|
|
||||||
|
// this is the version info the filter is supposed to check
|
||||||
|
active_version: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
enum PackagePreSelect {
|
||||||
|
OnlyInstalled,
|
||||||
|
OnlyNew,
|
||||||
|
All,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
|
||||||
|
filter: F,
|
||||||
|
only_versions_for: Option<&str>,
|
||||||
|
) -> Vec<APTUpdateInfo> {
|
||||||
|
|
||||||
let mut ret = Vec::new();
|
let mut ret = Vec::new();
|
||||||
|
let mut depends = HashSet::new();
|
||||||
|
|
||||||
// note: this is not an 'apt update', it just re-reads the cache from disk
|
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||||
let mut cache = Cache::get_singleton();
|
let mut cache = Cache::get_singleton();
|
||||||
cache.reload();
|
cache.reload();
|
||||||
|
|
||||||
let mut cache_iter = cache.iter();
|
let mut cache_iter = match only_versions_for {
|
||||||
|
Some(name) => cache.find_by_name(name),
|
||||||
|
None => cache.iter()
|
||||||
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let view = match cache_iter.next() {
|
|
||||||
Some(view) => view,
|
|
||||||
None => break
|
|
||||||
};
|
|
||||||
|
|
||||||
let current_version = match view.current_version() {
|
match cache_iter.next() {
|
||||||
Some(vers) => vers,
|
Some(view) => {
|
||||||
None => continue
|
let di = if only_versions_for.is_some() {
|
||||||
};
|
query_detailed_info(
|
||||||
let candidate_version = match view.candidate_version() {
|
PackagePreSelect::All,
|
||||||
Some(vers) => vers,
|
&filter,
|
||||||
// if there's no candidate (i.e. no update) get info of currently
|
view,
|
||||||
// installed version instead
|
None
|
||||||
None => current_version.clone()
|
)
|
||||||
};
|
} else {
|
||||||
|
query_detailed_info(
|
||||||
|
PackagePreSelect::OnlyInstalled,
|
||||||
|
&filter,
|
||||||
|
view,
|
||||||
|
Some(&mut depends)
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if let Some(info) = di {
|
||||||
|
ret.push(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
if only_versions_for.is_some() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
drop(cache_iter);
|
||||||
|
// also loop through missing dependencies, as they would be installed
|
||||||
|
for pkg in depends.iter() {
|
||||||
|
let mut iter = cache.find_by_name(&pkg);
|
||||||
|
let view = match iter.next() {
|
||||||
|
Some(view) => view,
|
||||||
|
None => continue // package not found, ignore
|
||||||
|
};
|
||||||
|
|
||||||
|
let di = query_detailed_info(
|
||||||
|
PackagePreSelect::OnlyNew,
|
||||||
|
&filter,
|
||||||
|
view,
|
||||||
|
None
|
||||||
|
);
|
||||||
|
if let Some(info) = di {
|
||||||
|
ret.push(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn query_detailed_info<'a, F, V>(
|
||||||
|
pre_select: PackagePreSelect,
|
||||||
|
filter: F,
|
||||||
|
view: V,
|
||||||
|
depends: Option<&mut HashSet<String>>,
|
||||||
|
) -> Option<APTUpdateInfo>
|
||||||
|
where
|
||||||
|
F: Fn(FilterData) -> bool,
|
||||||
|
V: std::ops::Deref<Target = apt_pkg_native::sane::PkgView<'a>>
|
||||||
|
{
|
||||||
|
let current_version = view.current_version();
|
||||||
|
let candidate_version = view.candidate_version();
|
||||||
|
|
||||||
|
let (current_version, candidate_version) = match pre_select {
|
||||||
|
PackagePreSelect::OnlyInstalled => match (current_version, candidate_version) {
|
||||||
|
(Some(cur), Some(can)) => (Some(cur), can), // package installed and there is an update
|
||||||
|
(Some(cur), None) => (Some(cur.clone()), cur), // package installed and up-to-date
|
||||||
|
(None, Some(_)) => return None, // package could be installed
|
||||||
|
(None, None) => return None, // broken
|
||||||
|
},
|
||||||
|
PackagePreSelect::OnlyNew => match (current_version, candidate_version) {
|
||||||
|
(Some(_), Some(_)) => return None,
|
||||||
|
(Some(_), None) => return None,
|
||||||
|
(None, Some(can)) => (None, can),
|
||||||
|
(None, None) => return None,
|
||||||
|
},
|
||||||
|
PackagePreSelect::All => match (current_version, candidate_version) {
|
||||||
|
(Some(cur), Some(can)) => (Some(cur), can),
|
||||||
|
(Some(cur), None) => (Some(cur.clone()), cur),
|
||||||
|
(None, Some(can)) => (None, can),
|
||||||
|
(None, None) => return None,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// get additional information via nested APT 'iterators'
|
||||||
|
let mut view_iter = view.versions();
|
||||||
|
while let Some(ver) = view_iter.next() {
|
||||||
|
|
||||||
let package = view.name();
|
let package = view.name();
|
||||||
if filter(&package, ¤t_version, &candidate_version) {
|
let version = ver.version();
|
||||||
let mut origin_res = "unknown".to_owned();
|
let mut origin_res = "unknown".to_owned();
|
||||||
let mut section_res = "unknown".to_owned();
|
let mut section_res = "unknown".to_owned();
|
||||||
let mut priority_res = "unknown".to_owned();
|
let mut priority_res = "unknown".to_owned();
|
||||||
let mut change_log_url = "".to_owned();
|
let mut change_log_url = "".to_owned();
|
||||||
let mut short_desc = package.clone();
|
let mut short_desc = package.clone();
|
||||||
let mut long_desc = "".to_owned();
|
let mut long_desc = "".to_owned();
|
||||||
|
|
||||||
// get additional information via nested APT 'iterators'
|
let fd = FilterData {
|
||||||
let mut view_iter = view.versions();
|
installed_version: current_version.as_deref(),
|
||||||
while let Some(ver) = view_iter.next() {
|
candidate_version: &candidate_version,
|
||||||
if ver.version() == candidate_version {
|
active_version: &version,
|
||||||
if let Some(section) = ver.section() {
|
};
|
||||||
section_res = section;
|
|
||||||
|
if filter(fd) {
|
||||||
|
if let Some(section) = ver.section() {
|
||||||
|
section_res = section;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(prio) = ver.priority_type() {
|
||||||
|
priority_res = prio;
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume every package has only one origin file (not
|
||||||
|
// origin, but origin *file*, for some reason those seem to
|
||||||
|
// be different concepts in APT)
|
||||||
|
let mut origin_iter = ver.origin_iter();
|
||||||
|
let origin = origin_iter.next();
|
||||||
|
if let Some(origin) = origin {
|
||||||
|
|
||||||
|
if let Some(sd) = origin.short_desc() {
|
||||||
|
short_desc = sd;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ld) = origin.long_desc() {
|
||||||
|
long_desc = ld;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the package files appear in priority order, meaning
|
||||||
|
// the one for the candidate version is first - this is fine
|
||||||
|
// however, as the source package should be the same for all
|
||||||
|
// versions anyway
|
||||||
|
let mut pkg_iter = origin.file();
|
||||||
|
let pkg_file = pkg_iter.next();
|
||||||
|
if let Some(pkg_file) = pkg_file {
|
||||||
|
if let Some(origin_name) = pkg_file.origin() {
|
||||||
|
origin_res = origin_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(prio) = ver.priority_type() {
|
let filename = pkg_file.file_name();
|
||||||
priority_res = prio;
|
let component = pkg_file.component();
|
||||||
|
|
||||||
|
// build changelog URL from gathered information
|
||||||
|
// ignore errors, use empty changelog instead
|
||||||
|
let url = get_changelog_url(&package, &filename,
|
||||||
|
&version, &origin_res, &component);
|
||||||
|
if let Ok(url) = url {
|
||||||
|
change_log_url = url;
|
||||||
}
|
}
|
||||||
|
|
||||||
// assume every package has only one origin file (not
|
|
||||||
// origin, but origin *file*, for some reason those seem to
|
|
||||||
// be different concepts in APT)
|
|
||||||
let mut origin_iter = ver.origin_iter();
|
|
||||||
let origin = origin_iter.next();
|
|
||||||
if let Some(origin) = origin {
|
|
||||||
|
|
||||||
if let Some(sd) = origin.short_desc() {
|
|
||||||
short_desc = sd;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ld) = origin.long_desc() {
|
|
||||||
long_desc = ld;
|
|
||||||
}
|
|
||||||
|
|
||||||
// the package files appear in priority order, meaning
|
|
||||||
// the one for the candidate version is first
|
|
||||||
let mut pkg_iter = origin.file();
|
|
||||||
let pkg_file = pkg_iter.next();
|
|
||||||
if let Some(pkg_file) = pkg_file {
|
|
||||||
if let Some(origin_name) = pkg_file.origin() {
|
|
||||||
origin_res = origin_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
let filename = pkg_file.file_name();
|
|
||||||
let source_pkg = ver.source_package();
|
|
||||||
let source_ver = ver.source_version();
|
|
||||||
let component = pkg_file.component();
|
|
||||||
|
|
||||||
// build changelog URL from gathered information
|
|
||||||
// ignore errors, use empty changelog instead
|
|
||||||
let url = get_changelog_url(&package, &filename, &source_pkg,
|
|
||||||
&candidate_version, &source_ver, &origin_res, &component);
|
|
||||||
if let Ok(url) = url {
|
|
||||||
change_log_url = url;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let info = APTUpdateInfo {
|
if let Some(depends) = depends {
|
||||||
|
let mut dep_iter = ver.dep_iter();
|
||||||
|
loop {
|
||||||
|
let dep = match dep_iter.next() {
|
||||||
|
Some(dep) if dep.dep_type() != "Depends" => continue,
|
||||||
|
Some(dep) => dep,
|
||||||
|
None => break
|
||||||
|
};
|
||||||
|
|
||||||
|
let dep_pkg = dep.target_pkg();
|
||||||
|
let name = dep_pkg.name();
|
||||||
|
|
||||||
|
depends.insert(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Some(APTUpdateInfo {
|
||||||
package,
|
package,
|
||||||
title: short_desc,
|
title: short_desc,
|
||||||
arch: view.arch(),
|
arch: view.arch(),
|
||||||
description: long_desc,
|
description: long_desc,
|
||||||
change_log_url,
|
change_log_url,
|
||||||
origin: origin_res,
|
origin: origin_res,
|
||||||
version: candidate_version,
|
version: candidate_version.clone(),
|
||||||
old_version: current_version,
|
old_version: match current_version {
|
||||||
|
Some(vers) => vers,
|
||||||
|
None => "".to_owned()
|
||||||
|
},
|
||||||
priority: priority_res,
|
priority: priority_res,
|
||||||
section: section_res,
|
section: section_res,
|
||||||
};
|
});
|
||||||
ret.push(info);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -201,8 +316,11 @@ fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
|||||||
)]
|
)]
|
||||||
/// List available APT updates
|
/// List available APT updates
|
||||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||||
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
let all_upgradeable = list_installed_apt_packages(|data| {
|
||||||
Ok(json!(ret))
|
data.candidate_version == data.active_version &&
|
||||||
|
data.installed_version != Some(data.candidate_version)
|
||||||
|
}, None);
|
||||||
|
Ok(json!(all_upgradeable))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -256,7 +374,67 @@ pub fn apt_update_database(
|
|||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
name: {
|
||||||
|
description: "Package name to get changelog of.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
version: {
|
||||||
|
description: "Package version to get changelog of. Omit to use candidate version.",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Retrieve the changelog of the specified package.
|
||||||
|
fn apt_get_changelog(
|
||||||
|
param: Value,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let name = crate::tools::required_string_param(¶m, "name")?.to_owned();
|
||||||
|
let version = param["version"].as_str();
|
||||||
|
|
||||||
|
let pkg_info = list_installed_apt_packages(|data| {
|
||||||
|
match version {
|
||||||
|
Some(version) => version == data.active_version,
|
||||||
|
None => data.active_version == data.candidate_version
|
||||||
|
}
|
||||||
|
}, Some(&name));
|
||||||
|
|
||||||
|
if pkg_info.len() == 0 {
|
||||||
|
bail!("Package '{}' not found", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
let changelog_url = &pkg_info[0].change_log_url;
|
||||||
|
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
||||||
|
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||||
|
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url))
|
||||||
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
|
return Ok(json!(changelog));
|
||||||
|
} else {
|
||||||
|
let mut command = std::process::Command::new("apt-get");
|
||||||
|
command.arg("changelog");
|
||||||
|
command.arg("-qq"); // don't display download progress
|
||||||
|
command.arg(name);
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
return Ok(json!(output));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)),
|
||||||
("update", &Router::new()
|
("update", &Router::new()
|
||||||
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||||
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use ::serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||||
|
@ -31,10 +31,8 @@ pub fn create_value_from_rrd(
|
|||||||
} else {
|
} else {
|
||||||
result.push(json!({ "time": t }));
|
result.push(json!({ "time": t }));
|
||||||
}
|
}
|
||||||
} else {
|
} else if let Some(value) = list[index] {
|
||||||
if let Some(value) = list[index] {
|
result[index][name] = value.into();
|
||||||
result[index][name] = value.into();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
t += reso;
|
t += reso;
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,70 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error, format_err, bail};
|
||||||
use serde_json::{json, Value};
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::api2::types::{NODE_SCHEMA, Userid};
|
use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
force: {
|
||||||
|
description: "Always connect to server, even if information in cache is up to date.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Check and update subscription status.
|
||||||
|
fn check_subscription(
|
||||||
|
force: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
|
||||||
|
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
|
||||||
|
|
||||||
|
let info = match subscription::read_subscription() {
|
||||||
|
Err(err) => bail!("could not read subscription status: {}", err),
|
||||||
|
Ok(Some(info)) => info,
|
||||||
|
Ok(None) => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_id = tools::get_hardware_address()?;
|
||||||
|
let key = if let Some(key) = info.key {
|
||||||
|
// always update apt auth if we have a key to ensure user can access enterprise repo
|
||||||
|
subscription::update_apt_auth(Some(key.to_owned()), Some(server_id.to_owned()))?;
|
||||||
|
key
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
if !force && info.status == SubscriptionStatus::ACTIVE {
|
||||||
|
let age = proxmox::tools::time::epoch_i64() - info.checktime.unwrap_or(i64::MAX);
|
||||||
|
if age < subscription::MAX_LOCAL_KEY_AGE {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = subscription::check_subscription(key, server_id)?;
|
||||||
|
|
||||||
|
subscription::write_subscription(info)
|
||||||
|
.map_err(|e| format_err!("Error writing updated subscription status - {}", e))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -18,24 +75,7 @@ use crate::api2::types::{NODE_SCHEMA, Userid};
|
|||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "Subscription status.",
|
description: "Subscription status.",
|
||||||
properties: {
|
type: SubscriptionInfo,
|
||||||
status: {
|
|
||||||
type: String,
|
|
||||||
description: "'NotFound', 'active' or 'inactive'."
|
|
||||||
},
|
|
||||||
message: {
|
|
||||||
type: String,
|
|
||||||
description: "Human readable problem description.",
|
|
||||||
},
|
|
||||||
serverid: {
|
|
||||||
type: String,
|
|
||||||
description: "The unique server ID, if permitted to access.",
|
|
||||||
},
|
|
||||||
url: {
|
|
||||||
type: String,
|
|
||||||
description: "URL to Web Shop.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
@ -45,24 +85,95 @@ use crate::api2::types::{NODE_SCHEMA, Userid};
|
|||||||
fn get_subscription(
|
fn get_subscription(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<SubscriptionInfo, Error> {
|
||||||
|
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||||
|
|
||||||
|
let info = match subscription::read_subscription() {
|
||||||
|
Err(err) => bail!("could not read subscription status: {}", err),
|
||||||
|
Ok(Some(info)) => info,
|
||||||
|
Ok(None) => SubscriptionInfo {
|
||||||
|
status: SubscriptionStatus::NOTFOUND,
|
||||||
|
message: Some("There is no subscription key".into()),
|
||||||
|
serverid: Some(tools::get_hardware_address()?),
|
||||||
|
url: Some(url.into()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &[]);
|
let user_privs = user_info.lookup_privs(&userid, &[]);
|
||||||
let server_id = if (user_privs & PRIV_SYS_AUDIT) != 0 {
|
|
||||||
tools::get_hardware_address()?
|
if (user_privs & PRIV_SYS_AUDIT) == 0 {
|
||||||
} else {
|
// not enough privileges for full state
|
||||||
"hidden".to_string()
|
return Ok(SubscriptionInfo {
|
||||||
|
status: info.status,
|
||||||
|
message: info.message,
|
||||||
|
url: info.url,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
Ok(info)
|
||||||
Ok(json!({
|
}
|
||||||
"status": "NotFound",
|
|
||||||
"message": "There is no subscription key",
|
#[api(
|
||||||
"serverid": server_id,
|
input: {
|
||||||
"url": url,
|
properties: {
|
||||||
}))
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
key: {
|
||||||
|
description: "Proxmox Backup Server subscription key",
|
||||||
|
type: String,
|
||||||
|
max_length: 32,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Set a subscription key and check it.
|
||||||
|
fn set_subscription(
|
||||||
|
key: String,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let server_id = tools::get_hardware_address()?;
|
||||||
|
|
||||||
|
let info = subscription::check_subscription(key, server_id.to_owned())?;
|
||||||
|
|
||||||
|
subscription::write_subscription(info)
|
||||||
|
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete subscription info.
|
||||||
|
fn delete_subscription() -> Result<(), Error> {
|
||||||
|
|
||||||
|
subscription::delete_subscription()
|
||||||
|
.map_err(|err| format_err!("Deleting subscription failed: {}", err))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.post(&API_METHOD_CHECK_SUBSCRIPTION)
|
||||||
|
.put(&API_METHOD_SET_SUBSCRIPTION)
|
||||||
|
.delete(&API_METHOD_DELETE_SUBSCRIPTION)
|
||||||
.get(&API_METHOD_GET_SUBSCRIPTION);
|
.get(&API_METHOD_GET_SUBSCRIPTION);
|
||||||
|
@ -27,7 +27,7 @@ use crate::config::cached_user_info::CachedUserInfo;
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "Task status nformation.",
|
description: "Task status information.",
|
||||||
properties: {
|
properties: {
|
||||||
node: {
|
node: {
|
||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
@ -72,7 +72,7 @@ use crate::config::cached_user_info::CachedUserInfo;
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
description: "Users can access their own tasks, or need Sys.Audit on /system/tasks.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
@ -342,7 +342,7 @@ pub fn list_tasks(
|
|||||||
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
||||||
info.upid.worker_type == "prune"
|
info.upid.worker_type == "prune"
|
||||||
{
|
{
|
||||||
let prefix = format!("{}_", store);
|
let prefix = format!("{}:", store);
|
||||||
if !worker_id.starts_with(&prefix) { return None; }
|
if !worker_id.starts_with(&prefix) { return None; }
|
||||||
} else if info.upid.worker_type == "garbage_collection" {
|
} else if info.upid.worker_type == "garbage_collection" {
|
||||||
if worker_id != store { return None; }
|
if worker_id != store { return None; }
|
||||||
|
29
src/api2/ping.rs
Normal file
29
src/api2/ping.rs
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, Permission};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "Dummy ping",
|
||||||
|
type: Object,
|
||||||
|
properties: {
|
||||||
|
pong: {
|
||||||
|
description: "Always true",
|
||||||
|
type: bool,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Anyone can access this, because it's used for a cheap check if the API daemon is online.",
|
||||||
|
permission: &Permission::World,
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Dummy method which replies with `{ "pong": True }`
|
||||||
|
fn ping() -> Result<Value, Error> {
|
||||||
|
Ok(json!({
|
||||||
|
"pong": true,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_PING);
|
@ -7,14 +7,13 @@ use futures::{select, future::FutureExt};
|
|||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
|
|
||||||
use crate::server::{WorkerTask};
|
use crate::server::{WorkerTask, jobstate::Job};
|
||||||
use crate::backup::DataStore;
|
use crate::backup::DataStore;
|
||||||
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
|
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::{
|
use crate::config::{
|
||||||
remote,
|
remote,
|
||||||
sync::SyncJobConfig,
|
sync::SyncJobConfig,
|
||||||
jobstate::Job,
|
|
||||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
};
|
};
|
||||||
|
@ -17,6 +17,7 @@ use crate::tools;
|
|||||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::api2::helpers;
|
use crate::api2::helpers;
|
||||||
|
use crate::tools::fs::lock_dir_noblock_shared;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -98,11 +99,16 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let _guard = lock_dir_noblock_shared(
|
||||||
|
&datastore.snapshot_path(&backup_dir),
|
||||||
|
"snapshot",
|
||||||
|
"locked by another operation")?;
|
||||||
|
|
||||||
let path = datastore.base_path();
|
let path = datastore.base_path();
|
||||||
|
|
||||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||||
|
|
||||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||||
|
|
||||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||||
let mut env = ReaderEnvironment::new(
|
let mut env = ReaderEnvironment::new(
|
||||||
@ -146,11 +152,14 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
|
|
||||||
use futures::future::Either;
|
use futures::future::Either;
|
||||||
futures::future::select(req_fut, abort_future)
|
futures::future::select(req_fut, abort_future)
|
||||||
.map(|res| match res {
|
.map(move |res| {
|
||||||
Either::Left((Ok(res), _)) => Ok(res),
|
let _guard = _guard;
|
||||||
Either::Left((Err(err), _)) => Err(err),
|
match res {
|
||||||
Either::Right((Ok(res), _)) => Ok(res),
|
Either::Left((Ok(res), _)) => Ok(res),
|
||||||
Either::Right((Err(err), _)) => Err(err),
|
Either::Left((Err(err), _)) => Err(err),
|
||||||
|
Either::Right((Ok(res), _)) => Ok(res),
|
||||||
|
Either::Right((Err(err), _)) => Err(err),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.map_ok(move |_| env.log("reader finished successfully"))
|
.map_ok(move |_| env.log("reader finished successfully"))
|
||||||
})?;
|
})?;
|
||||||
|
@ -302,7 +302,7 @@ pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
|||||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const VERIFY_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Run verify job at specified schedule.")
|
"Run verify job at specified schedule.")
|
||||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||||
.schema();
|
.schema();
|
||||||
@ -324,6 +324,16 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
|||||||
.default(true)
|
.default(true)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Do not verify backups that are already verified if their verification is not outdated.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Days after that a verification becomes outdated")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
.schema();
|
.schema();
|
||||||
@ -577,6 +587,8 @@ pub struct GarbageCollectionStatus {
|
|||||||
pub pending_chunks: usize,
|
pub pending_chunks: usize,
|
||||||
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||||
pub removed_bad: usize,
|
pub removed_bad: usize,
|
||||||
|
/// Number of chunks still marked as .bad after garbage collection.
|
||||||
|
pub still_bad: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for GarbageCollectionStatus {
|
impl Default for GarbageCollectionStatus {
|
||||||
@ -592,6 +604,7 @@ impl Default for GarbageCollectionStatus {
|
|||||||
pending_bytes: 0,
|
pending_bytes: 0,
|
||||||
pending_chunks: 0,
|
pending_chunks: 0,
|
||||||
removed_bad: 0,
|
removed_bad: 0,
|
||||||
|
still_bad: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
199
src/backup.rs
199
src/backup.rs
@ -1,107 +1,146 @@
|
|||||||
//! This module implements the proxmox backup data storage
|
//! This module implements the data storage and access layer.
|
||||||
//!
|
//!
|
||||||
//! Proxmox backup splits large files into chunks, and stores them
|
//! # Data formats
|
||||||
//! deduplicated using a content addressable storage format.
|
|
||||||
//!
|
//!
|
||||||
//! A chunk is simply defined as binary blob, which is stored inside a
|
//! PBS splits large files into chunks, and stores them deduplicated using
|
||||||
//! `ChunkStore`, addressed by the SHA256 digest of the binary blob.
|
//! a content addressable storage format.
|
||||||
//!
|
//!
|
||||||
//! Index files are used to reconstruct the original file. They
|
//! Backup snapshots are stored as folders containing a manifest file and
|
||||||
//! basically contain a list of SHA256 checksums. The `DynamicIndex*`
|
//! potentially one or more index or blob files.
|
||||||
//! format is able to deal with dynamic chunk sizes, whereas the
|
|
||||||
//! `FixedIndex*` format is an optimization to store a list of equal
|
|
||||||
//! sized chunks.
|
|
||||||
//!
|
//!
|
||||||
//! # ChunkStore Locking
|
//! The manifest contains hashes of all other files and can be signed by
|
||||||
|
//! the client.
|
||||||
//!
|
//!
|
||||||
//! We need to be able to restart the proxmox-backup service daemons,
|
//! Blob files contain data directly. They are used for config files and
|
||||||
//! so that we can update the software without rebooting the host. But
|
//! the like.
|
||||||
//! such restarts must not abort running backup jobs, so we need to
|
|
||||||
//! keep the old service running until those jobs are finished. This
|
|
||||||
//! implies that we need some kind of locking for the
|
|
||||||
//! ChunkStore. Please note that it is perfectly valid to have
|
|
||||||
//! multiple parallel ChunkStore writers, even when they write the
|
|
||||||
//! same chunk (because the chunk would have the same name and the
|
|
||||||
//! same data). The only real problem is garbage collection, because
|
|
||||||
//! we need to avoid deleting chunks which are still referenced.
|
|
||||||
//!
|
//!
|
||||||
//! * Read Index Files:
|
//! Index files are used to reconstruct an original file. They contain a
|
||||||
|
//! list of SHA256 checksums. The `DynamicIndex*` format is able to deal
|
||||||
|
//! with dynamic chunk sizes (CT and host backups), whereas the
|
||||||
|
//! `FixedIndex*` format is an optimization to store a list of equal sized
|
||||||
|
//! chunks (VMs, whole block devices).
|
||||||
//!
|
//!
|
||||||
//! Acquire shared lock for .idx files.
|
//! A chunk is defined as a binary blob, which is stored inside a
|
||||||
//!
|
//! [ChunkStore](struct.ChunkStore.html) instead of the backup directory
|
||||||
//!
|
//! directly, and can be addressed by its SHA256 digest.
|
||||||
//! * Delete Index Files:
|
|
||||||
//!
|
|
||||||
//! Acquire exclusive lock for .idx files. This makes sure that we do
|
|
||||||
//! not delete index files while they are still in use.
|
|
||||||
//!
|
|
||||||
//!
|
|
||||||
//! * Create Index Files:
|
|
||||||
//!
|
|
||||||
//! Acquire shared lock for ChunkStore (process wide).
|
|
||||||
//!
|
|
||||||
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
|
||||||
//! then do an atomic rename ...
|
|
||||||
//!
|
|
||||||
//!
|
|
||||||
//! * Garbage Collect:
|
|
||||||
//!
|
|
||||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
|
||||||
//! already a shared lock for the ChunkStore, try to upgrade that
|
|
||||||
//! lock.
|
|
||||||
//!
|
|
||||||
//!
|
|
||||||
//! * Server Restart
|
|
||||||
//!
|
|
||||||
//! Try to abort the running garbage collection to release exclusive
|
|
||||||
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
|
||||||
//! socket.
|
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! # Garbage Collection (GC)
|
//! # Garbage Collection (GC)
|
||||||
//!
|
//!
|
||||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
//! Deleting backups is as easy as deleting the corresponding .idx files.
|
||||||
//! files. Unfortunately, this does not free up any storage, because
|
//! However, this does not free up any storage, because those files just
|
||||||
//! those files just contain references to chunks.
|
//! contain references to chunks.
|
||||||
//!
|
//!
|
||||||
//! To free up some storage, we run a garbage collection process at
|
//! To free up some storage, we run a garbage collection process at
|
||||||
//! regular intervals. The collector uses a mark and sweep
|
//! regular intervals. The collector uses a mark and sweep approach. In
|
||||||
//! approach. In the first phase, it scans all .idx files to mark used
|
//! the first phase, it scans all .idx files to mark used chunks. The
|
||||||
//! chunks. The second phase then removes all unmarked chunks from the
|
//! second phase then removes all unmarked chunks from the store.
|
||||||
//! store.
|
|
||||||
//!
|
//!
|
||||||
//! The above locking mechanism makes sure that we are the only
|
//! The locking mechanisms mentioned below make sure that we are the only
|
||||||
//! process running GC. But we still want to be able to create backups
|
//! process running GC. We still want to be able to create backups during
|
||||||
//! during GC, so there may be multiple backup threads/tasks
|
//! GC, so there may be multiple backup threads/tasks running, either
|
||||||
//! running. Either started before GC started, or started while GC is
|
//! started before GC, or while GC is running.
|
||||||
//! running.
|
|
||||||
//!
|
//!
|
||||||
//! ## `atime` based GC
|
//! ## `atime` based GC
|
||||||
//!
|
//!
|
||||||
//! The idea here is to mark chunks by updating the `atime` (access
|
//! The idea here is to mark chunks by updating the `atime` (access
|
||||||
//! timestamp) on the chunk file. This is quite simple and does not
|
//! timestamp) on the chunk file. This is quite simple and does not need
|
||||||
//! need additional RAM.
|
//! additional RAM.
|
||||||
//!
|
//!
|
||||||
//! One minor problem is that recent Linux versions use the `relatime`
|
//! One minor problem is that recent Linux versions use the `relatime`
|
||||||
//! mount flag by default for performance reasons (yes, we want
|
//! mount flag by default for performance reasons (and we want that). When
|
||||||
//! that). When enabled, `atime` data is written to the disk only if
|
//! enabled, `atime` data is written to the disk only if the file has been
|
||||||
//! the file has been modified since the `atime` data was last updated
|
//! modified since the `atime` data was last updated (`mtime`), or if the
|
||||||
//! (`mtime`), or if the file was last accessed more than a certain
|
//! file was last accessed more than a certain amount of time ago (by
|
||||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
//! default 24h). So we may only delete chunks with `atime` older than 24
|
||||||
//! with `atime` older than 24 hours.
|
//! hours.
|
||||||
//!
|
|
||||||
//! Another problem arises from running backups. The mark phase does
|
|
||||||
//! not find any chunks from those backups, because there is no .idx
|
|
||||||
//! file for them (created after the backup). Chunks created or
|
|
||||||
//! touched by those backups may have an `atime` as old as the start
|
|
||||||
//! time of those backups. Please note that the backup start time may
|
|
||||||
//! predate the GC start time. So we may only delete chunks older than
|
|
||||||
//! the start time of those running backup jobs.
|
|
||||||
//!
|
//!
|
||||||
|
//! Another problem arises from running backups. The mark phase does not
|
||||||
|
//! find any chunks from those backups, because there is no .idx file for
|
||||||
|
//! them (created after the backup). Chunks created or touched by those
|
||||||
|
//! backups may have an `atime` as old as the start time of those backups.
|
||||||
|
//! Please note that the backup start time may predate the GC start time.
|
||||||
|
//! So we may only delete chunks older than the start time of those
|
||||||
|
//! running backup jobs, which might be more than 24h back (this is the
|
||||||
|
//! reason why ProcessLocker exclusive locks only have to be exclusive
|
||||||
|
//! between processes, since within one we can determine the age of the
|
||||||
|
//! oldest shared lock).
|
||||||
//!
|
//!
|
||||||
//! ## Store `marks` in RAM using a HASH
|
//! ## Store `marks` in RAM using a HASH
|
||||||
//!
|
//!
|
||||||
//! Not sure if this is better. TODO
|
//! Might be better. Under investigation.
|
||||||
|
//!
|
||||||
|
//!
|
||||||
|
//! # Locking
|
||||||
|
//!
|
||||||
|
//! Since PBS allows multiple potentially interfering operations at the
|
||||||
|
//! same time (e.g. garbage collect, prune, multiple backup creations
|
||||||
|
//! (only in seperate groups), forget, ...), these need to lock against
|
||||||
|
//! each other in certain scenarios. There is no overarching global lock
|
||||||
|
//! though, instead always the finest grained lock possible is used,
|
||||||
|
//! because running these operations concurrently is treated as a feature
|
||||||
|
//! on its own.
|
||||||
|
//!
|
||||||
|
//! ## Inter-process Locking
|
||||||
|
//!
|
||||||
|
//! We need to be able to restart the proxmox-backup service daemons, so
|
||||||
|
//! that we can update the software without rebooting the host. But such
|
||||||
|
//! restarts must not abort running backup jobs, so we need to keep the
|
||||||
|
//! old service running until those jobs are finished. This implies that
|
||||||
|
//! we need some kind of locking for modifying chunks and indices in the
|
||||||
|
//! ChunkStore.
|
||||||
|
//!
|
||||||
|
//! Please note that it is perfectly valid to have multiple
|
||||||
|
//! parallel ChunkStore writers, even when they write the same chunk
|
||||||
|
//! (because the chunk would have the same name and the same data, and
|
||||||
|
//! writes are completed atomically via a rename). The only problem is
|
||||||
|
//! garbage collection, because we need to avoid deleting chunks which are
|
||||||
|
//! still referenced.
|
||||||
|
//!
|
||||||
|
//! To do this we use the
|
||||||
|
//! [ProcessLocker](../tools/struct.ProcessLocker.html).
|
||||||
|
//!
|
||||||
|
//! ### ChunkStore-wide
|
||||||
|
//!
|
||||||
|
//! * Create Index Files:
|
||||||
|
//!
|
||||||
|
//! Acquire shared lock for ChunkStore.
|
||||||
|
//!
|
||||||
|
//! Note: When creating .idx files, we create a temporary .tmp file,
|
||||||
|
//! then do an atomic rename.
|
||||||
|
//!
|
||||||
|
//! * Garbage Collect:
|
||||||
|
//!
|
||||||
|
//! Acquire exclusive lock for ChunkStore. If we have
|
||||||
|
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||||
|
//! lock.
|
||||||
|
//!
|
||||||
|
//! Exclusive locks only work _between processes_. It is valid to have an
|
||||||
|
//! exclusive and one or more shared locks held within one process. Writing
|
||||||
|
//! chunks within one process is synchronized using the gc_mutex.
|
||||||
|
//!
|
||||||
|
//! On server restart, we stop any running GC in the old process to avoid
|
||||||
|
//! having the exclusive lock held for too long.
|
||||||
|
//!
|
||||||
|
//! ## Locking table
|
||||||
|
//!
|
||||||
|
//! Below table shows all operations that play a role in locking, and which
|
||||||
|
//! mechanisms are used to make their concurrent usage safe.
|
||||||
|
//!
|
||||||
|
//! | starting ><br>v during | read index file | create index file | GC mark | GC sweep | update manifest | forget | prune | create backup | verify | reader api |
|
||||||
|
//! |-|-|-|-|-|-|-|-|-|-|-|
|
||||||
|
//! | **read index file** | / | / | / | / | / | mmap stays valid, oldest_shared_lock prevents GC | see forget column | / | / | / |
|
||||||
|
//! | **create index file** | / | / | / | / | / | / | / | /, happens at the end, after all chunks are touched | /, only happens without a manifest | / |
|
||||||
|
//! | **GC mark** | / | Datastore process-lock shared | gc_mutex, exclusive ProcessLocker | gc_mutex | /, GC only cares about index files, not manifests | tells GC about removed chunks | see forget column | /, index files don’t exist yet | / | / |
|
||||||
|
//! | **GC sweep** | / | Datastore process-lock shared | gc_mutex, exclusive ProcessLocker | gc_mutex | / | /, chunks already marked | see forget column | chunks get touched; chunk_store.mutex; oldest PL lock | / | / |
|
||||||
|
//! | **update manifest** | / | / | / | / | update_manifest lock | update_manifest lock, remove dir under lock | see forget column | /, “write manifest” happens at the end | /, can call “write manifest”, see that column | / |
|
||||||
|
//! | **forget** | / | / | removed_during_gc mutex is held during unlink | marking done, doesn’t matter if forgotten now | update_manifest lock, forget waits for lock | /, unlink is atomic | causes forget to fail, but that’s OK | running backup has snapshot flock | /, potentially detects missing folder | shared snap flock |
|
||||||
|
//! | **prune** | / | / | see forget row | see forget row | see forget row | causes warn in prune, but no error | see forget column | running and last non-running can’t be pruned | see forget row | shared snap flock |
|
||||||
|
//! | **create backup** | / | only time this happens, thus has snapshot flock | / | chunks get touched; chunk_store.mutex; oldest PL lock | no lock, but cannot exist beforehand | snapshot flock, can’t be forgotten | running and last non-running can’t be pruned | snapshot group flock, only one running per group | /, won’t be verified since manifest missing | / |
|
||||||
|
//! | **verify** | / | / | / | / | see “update manifest” row | /, potentially detects missing folder | see forget column | / | /, but useless (“update manifest” protects itself) | / |
|
||||||
|
//! | **reader api** | / | / | / | /, open snap can’t be forgotten, so ref must exist | / | prevented by shared snap flock | prevented by shared snap flock | / | / | /, lock is shared |!
|
||||||
|
//! * / = no interaction
|
||||||
|
//! * shared/exclusive from POV of 'starting' process
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
@ -15,6 +15,17 @@ use super::IndexFile;
|
|||||||
use super::read_chunk::AsyncReadChunk;
|
use super::read_chunk::AsyncReadChunk;
|
||||||
use super::index::ChunkReadInfo;
|
use super::index::ChunkReadInfo;
|
||||||
|
|
||||||
|
// FIXME: This enum may not be required?
|
||||||
|
// - Put the `WaitForData` case directly into a `read_future: Option<>`
|
||||||
|
// - make the read loop as follows:
|
||||||
|
// * if read_buffer is not empty:
|
||||||
|
// use it
|
||||||
|
// * else if read_future is there:
|
||||||
|
// poll it
|
||||||
|
// if read: move data to read_buffer
|
||||||
|
// * else
|
||||||
|
// create read future
|
||||||
|
#[allow(clippy::enum_variant_names)]
|
||||||
enum AsyncIndexReaderState<S> {
|
enum AsyncIndexReaderState<S> {
|
||||||
NoData,
|
NoData,
|
||||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||||
@ -118,9 +129,8 @@ where
|
|||||||
}
|
}
|
||||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||||
match ready!(future.as_mut().poll(cx)) {
|
match ready!(future.as_mut().poll(cx)) {
|
||||||
Ok((store, mut chunk_data)) => {
|
Ok((store, chunk_data)) => {
|
||||||
this.read_buffer.clear();
|
this.read_buffer = chunk_data;
|
||||||
this.read_buffer.append(&mut chunk_data);
|
|
||||||
this.state = AsyncIndexReaderState::HaveData;
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
this.store = Some(store);
|
this.store = Some(store);
|
||||||
}
|
}
|
||||||
|
@ -354,9 +354,11 @@ impl ChunkStore {
|
|||||||
},
|
},
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||||
// chunk hasn't been rewritten yet, keep .bad file
|
// chunk hasn't been rewritten yet, keep .bad file
|
||||||
|
status.still_bad += 1;
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// some other error, warn user and keep .bad file around too
|
// some other error, warn user and keep .bad file around too
|
||||||
|
status.still_bad += 1;
|
||||||
crate::task_warn!(
|
crate::task_warn!(
|
||||||
worker,
|
worker,
|
||||||
"error during stat on '{:?}' - {}",
|
"error during stat on '{:?}' - {}",
|
||||||
@ -378,14 +380,12 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
status.removed_chunks += 1;
|
status.removed_chunks += 1;
|
||||||
status.removed_bytes += stat.st_size as u64;
|
status.removed_bytes += stat.st_size as u64;
|
||||||
|
} else if stat.st_atime < oldest_writer {
|
||||||
|
status.pending_chunks += 1;
|
||||||
|
status.pending_bytes += stat.st_size as u64;
|
||||||
} else {
|
} else {
|
||||||
if stat.st_atime < oldest_writer {
|
status.disk_chunks += 1;
|
||||||
status.pending_chunks += 1;
|
status.disk_bytes += stat.st_size as u64;
|
||||||
status.pending_bytes += stat.st_size as u64;
|
|
||||||
} else {
|
|
||||||
status.disk_chunks += 1;
|
|
||||||
status.disk_bytes += stat.st_size as u64;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
@ -3,21 +3,22 @@ use std::io::{self, Write};
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::fs::File;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked};
|
||||||
|
|
||||||
use super::backup_info::{BackupGroup, BackupDir};
|
use super::backup_info::{BackupGroup, BackupDir};
|
||||||
use super::chunk_store::ChunkStore;
|
use super::chunk_store::ChunkStore;
|
||||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
use super::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||||
use super::index::*;
|
use super::index::*;
|
||||||
use super::{DataBlob, ArchiveType, archive_type};
|
use super::{DataBlob, ArchiveType, archive_type};
|
||||||
use crate::config::datastore;
|
use crate::config::datastore::{self, DataStoreConfig};
|
||||||
use crate::task::TaskState;
|
use crate::task::TaskState;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::tools::format::HumanByte;
|
use crate::tools::format::HumanByte;
|
||||||
@ -37,6 +38,7 @@ pub struct DataStore {
|
|||||||
chunk_store: Arc<ChunkStore>,
|
chunk_store: Arc<ChunkStore>,
|
||||||
gc_mutex: Mutex<bool>,
|
gc_mutex: Mutex<bool>,
|
||||||
last_gc_status: Mutex<GarbageCollectionStatus>,
|
last_gc_status: Mutex<GarbageCollectionStatus>,
|
||||||
|
verify_new: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataStore {
|
impl DataStore {
|
||||||
@ -45,17 +47,20 @@ impl DataStore {
|
|||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
|
let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
|
||||||
|
let path = PathBuf::from(&config.path);
|
||||||
|
|
||||||
let mut map = DATASTORE_MAP.lock().unwrap();
|
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||||
|
|
||||||
if let Some(datastore) = map.get(name) {
|
if let Some(datastore) = map.get(name) {
|
||||||
// Compare Config - if changed, create new Datastore object!
|
// Compare Config - if changed, create new Datastore object!
|
||||||
if datastore.chunk_store.base == PathBuf::from(&config.path) {
|
if datastore.chunk_store.base == path &&
|
||||||
|
datastore.verify_new == config.verify_new.unwrap_or(false)
|
||||||
|
{
|
||||||
return Ok(datastore.clone());
|
return Ok(datastore.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let datastore = DataStore::open(name)?;
|
let datastore = DataStore::open_with_path(name, &path, config)?;
|
||||||
|
|
||||||
let datastore = Arc::new(datastore);
|
let datastore = Arc::new(datastore);
|
||||||
map.insert(name.to_string(), datastore.clone());
|
map.insert(name.to_string(), datastore.clone());
|
||||||
@ -63,26 +68,29 @@ impl DataStore {
|
|||||||
Ok(datastore)
|
Ok(datastore)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn open(store_name: &str) -> Result<Self, Error> {
|
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
|
||||||
let (_, store_config) = config.sections.get(store_name)
|
|
||||||
.ok_or(format_err!("no such datastore '{}'", store_name))?;
|
|
||||||
|
|
||||||
let path = store_config["path"].as_str().unwrap();
|
|
||||||
|
|
||||||
Self::open_with_path(store_name, Path::new(path))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
|
|
||||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||||
|
|
||||||
let gc_status = GarbageCollectionStatus::default();
|
let mut gc_status_path = chunk_store.base_path();
|
||||||
|
gc_status_path.push(".gc-status");
|
||||||
|
|
||||||
|
let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
|
||||||
|
match serde_json::from_str(&state) {
|
||||||
|
Ok(state) => state,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("error reading gc-status: {}", err);
|
||||||
|
GarbageCollectionStatus::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GarbageCollectionStatus::default()
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
chunk_store: Arc::new(chunk_store),
|
chunk_store: Arc::new(chunk_store),
|
||||||
gc_mutex: Mutex::new(false),
|
gc_mutex: Mutex::new(false),
|
||||||
last_gc_status: Mutex::new(gc_status),
|
last_gc_status: Mutex::new(gc_status),
|
||||||
|
verify_new: config.verify_new.unwrap_or(false),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,10 +216,17 @@ impl DataStore {
|
|||||||
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
||||||
|
|
||||||
log::info!("removing backup group {:?}", full_path);
|
log::info!("removing backup group {:?}", full_path);
|
||||||
|
|
||||||
|
// remove all individual backup dirs first to ensure nothing is using them
|
||||||
|
for snap in backup_group.list_backups(&self.base_path())? {
|
||||||
|
self.remove_backup_dir(&snap.backup_dir, false)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// no snapshots left, we can now safely remove the empty folder
|
||||||
std::fs::remove_dir_all(&full_path)
|
std::fs::remove_dir_all(&full_path)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
format_err!(
|
format_err!(
|
||||||
"removing backup group {:?} failed - {}",
|
"removing backup group directory {:?} failed - {}",
|
||||||
full_path,
|
full_path,
|
||||||
err,
|
err,
|
||||||
)
|
)
|
||||||
@ -225,9 +240,10 @@ impl DataStore {
|
|||||||
|
|
||||||
let full_path = self.snapshot_path(backup_dir);
|
let full_path = self.snapshot_path(backup_dir);
|
||||||
|
|
||||||
let _guard;
|
let (_guard, _manifest_guard);
|
||||||
if !force {
|
if !force {
|
||||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
|
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
||||||
|
_manifest_guard = self.lock_manifest(backup_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
log::info!("removing backup snapshot {:?}", full_path);
|
log::info!("removing backup snapshot {:?}", full_path);
|
||||||
@ -293,7 +309,7 @@ impl DataStore {
|
|||||||
let mut file = open_options.open(&path)
|
let mut file = open_options.open(&path)
|
||||||
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
||||||
|
|
||||||
write!(file, "{}\n", userid)
|
writeln!(file, "{}", userid)
|
||||||
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -454,13 +470,25 @@ impl DataStore {
|
|||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Ok(archive_type) = archive_type(&path) {
|
let full_path = self.chunk_store.relative_path(&path);
|
||||||
if archive_type == ArchiveType::FixedIndex {
|
match std::fs::File::open(&full_path) {
|
||||||
let index = self.open_fixed_reader(&path)?;
|
Ok(file) => {
|
||||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
if let Ok(archive_type) = archive_type(&path) {
|
||||||
} else if archive_type == ArchiveType::DynamicIndex {
|
if archive_type == ArchiveType::FixedIndex {
|
||||||
let index = self.open_dynamic_reader(&path)?;
|
let index = FixedIndexReader::new(file)?;
|
||||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||||
|
} else if archive_type == ArchiveType::DynamicIndex {
|
||||||
|
let index = DynamicIndexReader::new(file)?;
|
||||||
|
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
if err.kind() == std::io::ErrorKind::NotFound {
|
||||||
|
// simply ignore vanished files
|
||||||
|
} else {
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
done += 1;
|
done += 1;
|
||||||
@ -557,6 +585,23 @@ impl DataStore {
|
|||||||
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
||||||
|
let mut path = self.base_path();
|
||||||
|
path.push(".gc-status");
|
||||||
|
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
|
||||||
|
// set the correct owner/group/permissions while saving file
|
||||||
|
// owner(rw) = backup, group(r)= backup
|
||||||
|
let options = CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(backup_user.uid)
|
||||||
|
.group(backup_user.gid);
|
||||||
|
|
||||||
|
// ignore errors
|
||||||
|
let _ = replace_file(path, serialized.as_bytes(), options);
|
||||||
|
}
|
||||||
|
|
||||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -611,8 +656,27 @@ impl DataStore {
|
|||||||
digest_str,
|
digest_str,
|
||||||
err,
|
err,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn lock_manifest(
|
||||||
|
&self,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
) -> Result<File, Error> {
|
||||||
|
let mut path = self.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(&MANIFEST_LOCK_NAME);
|
||||||
|
|
||||||
|
// update_manifest should never take a long time, so if someone else has
|
||||||
|
// the lock we can simply block a bit and should get it soon
|
||||||
|
open_file_locked(&path, Duration::from_secs(5), true)
|
||||||
|
.map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"unable to acquire manifest lock {:?} - {}", &path, err
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load the manifest without a lock. Must not be written back.
|
||||||
pub fn load_manifest(
|
pub fn load_manifest(
|
||||||
&self,
|
&self,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
@ -623,22 +687,20 @@ impl DataStore {
|
|||||||
Ok((manifest, raw_size))
|
Ok((manifest, raw_size))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_manifest_json(
|
/// Update the manifest of the specified snapshot. Never write a manifest directly,
|
||||||
|
/// only use this method - anything else may break locking guarantees.
|
||||||
|
pub fn update_manifest(
|
||||||
&self,
|
&self,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
) -> Result<Value, Error> {
|
update_fn: impl FnOnce(&mut BackupManifest),
|
||||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
|
||||||
// no expected digest available
|
|
||||||
let manifest_data = blob.decode(None, None)?;
|
|
||||||
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
|
|
||||||
Ok(manifest)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn store_manifest(
|
|
||||||
&self,
|
|
||||||
backup_dir: &BackupDir,
|
|
||||||
manifest: Value,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _guard = self.lock_manifest(backup_dir)?;
|
||||||
|
let (mut manifest, _) = self.load_manifest(&backup_dir)?;
|
||||||
|
|
||||||
|
update_fn(&mut manifest);
|
||||||
|
|
||||||
|
let manifest = serde_json::to_value(manifest)?;
|
||||||
let manifest = serde_json::to_string_pretty(&manifest)?;
|
let manifest = serde_json::to_string_pretty(&manifest)?;
|
||||||
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||||
let raw_data = blob.raw_data();
|
let raw_data = blob.raw_data();
|
||||||
@ -647,8 +709,13 @@ impl DataStore {
|
|||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(MANIFEST_BLOB_NAME);
|
path.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
|
// atomic replace invalidates flock - no other writes past this point!
|
||||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn verify_new(&self) -> bool {
|
||||||
|
self.verify_new
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||||
|
|
||||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||||
|
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
|
||||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||||
|
|
||||||
mod hex_csum {
|
mod hex_csum {
|
||||||
|
@ -2,6 +2,7 @@ use std::collections::HashSet;
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
@ -23,6 +24,7 @@ use crate::{
|
|||||||
task::TaskState,
|
task::TaskState,
|
||||||
task_log,
|
task_log,
|
||||||
tools::ParallelHandler,
|
tools::ParallelHandler,
|
||||||
|
tools::fs::lock_dir_noblock_shared,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
@ -283,8 +285,44 @@ pub fn verify_backup_dir(
|
|||||||
worker: Arc<dyn TaskState + Send + Sync>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
upid: UPID,
|
upid: UPID,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
|
let snap_lock = lock_dir_noblock_shared(
|
||||||
|
&datastore.snapshot_path(&backup_dir),
|
||||||
|
"snapshot",
|
||||||
|
"locked by another operation");
|
||||||
|
match snap_lock {
|
||||||
|
Ok(snap_lock) => verify_backup_dir_with_lock(
|
||||||
|
datastore,
|
||||||
|
backup_dir,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker,
|
||||||
|
upid,
|
||||||
|
snap_lock
|
||||||
|
),
|
||||||
|
Err(err) => {
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
||||||
|
datastore.name(),
|
||||||
|
backup_dir,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
/// See verify_backup_dir
|
||||||
|
pub fn verify_backup_dir_with_lock(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
|
upid: UPID,
|
||||||
|
_snap_lock: Dir,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -351,9 +389,10 @@ pub fn verify_backup_dir(
|
|||||||
state: verify_result,
|
state: verify_result,
|
||||||
upid,
|
upid,
|
||||||
};
|
};
|
||||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
let verify_state = serde_json::to_value(verify_state)?;
|
||||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
datastore.update_manifest(&backup_dir, |manifest| {
|
||||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
manifest.unprotected["verify_state"] = verify_state;
|
||||||
|
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(error_count == 0)
|
||||||
}
|
}
|
||||||
@ -373,6 +412,7 @@ pub fn verify_backup_group(
|
|||||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||||
worker: Arc<dyn TaskState + Send + Sync>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
upid: &UPID,
|
upid: &UPID,
|
||||||
|
filter: &dyn Fn(&BackupInfo) -> bool,
|
||||||
) -> Result<(usize, Vec<String>), Error> {
|
) -> Result<(usize, Vec<String>), Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
@ -398,6 +438,17 @@ pub fn verify_backup_group(
|
|||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for info in list {
|
for info in list {
|
||||||
count += 1;
|
count += 1;
|
||||||
|
|
||||||
|
if filter(&info) == false {
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"SKIPPED: verify {}:{} (recently verified)",
|
||||||
|
datastore.name(),
|
||||||
|
info.backup_dir,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if !verify_backup_dir(
|
if !verify_backup_dir(
|
||||||
datastore.clone(),
|
datastore.clone(),
|
||||||
&info.backup_dir,
|
&info.backup_dir,
|
||||||
@ -435,6 +486,7 @@ pub fn verify_all_backups(
|
|||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
worker: Arc<dyn TaskState + Send + Sync>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
upid: &UPID,
|
upid: &UPID,
|
||||||
|
filter: &dyn Fn(&BackupInfo) -> bool,
|
||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
@ -479,6 +531,7 @@ pub fn verify_all_backups(
|
|||||||
Some((done, snapshot_count)),
|
Some((done, snapshot_count)),
|
||||||
worker.clone(),
|
worker.clone(),
|
||||||
upid,
|
upid,
|
||||||
|
filter,
|
||||||
)?;
|
)?;
|
||||||
errors.append(&mut group_errors);
|
errors.append(&mut group_errors);
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
config::update_self_signed_cert(false)?;
|
config::update_self_signed_cert(false)?;
|
||||||
|
|
||||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||||
proxmox_backup::config::jobstate::create_jobstate_dir()?;
|
proxmox_backup::server::jobstate::create_jobstate_dir()?;
|
||||||
|
|
||||||
if let Err(err) = generate_auth_key() {
|
if let Err(err) = generate_auth_key() {
|
||||||
bail!("unable to generate auth key - {}", err);
|
bail!("unable to generate auth key - {}", err);
|
||||||
@ -49,9 +49,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
let _ = csrf_secret(); // load with lazy_static
|
let _ = csrf_secret(); // load with lazy_static
|
||||||
|
|
||||||
let config = server::ApiConfig::new(
|
let mut config = server::ApiConfig::new(
|
||||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
||||||
|
|
||||||
|
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
|
||||||
|
|
||||||
let rest_server = RestServer::new(config);
|
let rest_server = RestServer::new(config);
|
||||||
|
|
||||||
// http server future:
|
// http server future:
|
||||||
|
@ -510,7 +510,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
.sortby("backup-id", false)
|
.sortby("backup-id", false)
|
||||||
.sortby("backup-time", false)
|
.sortby("backup-time", false)
|
||||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||||
.column(ColumnConfig::new("size"))
|
.column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
|
||||||
.column(ColumnConfig::new("files").renderer(render_files))
|
.column(ColumnConfig::new("files").renderer(render_files))
|
||||||
;
|
;
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use std::sync::{Arc};
|
use std::sync::{Arc};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -9,16 +10,45 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
|||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
use proxmox::api::RpcEnvironmentType;
|
use proxmox::api::RpcEnvironmentType;
|
||||||
|
|
||||||
|
use proxmox_backup::{
|
||||||
|
backup::DataStore,
|
||||||
|
server::{
|
||||||
|
UPID,
|
||||||
|
WorkerTask,
|
||||||
|
ApiConfig,
|
||||||
|
rest::*,
|
||||||
|
jobstate::{
|
||||||
|
self,
|
||||||
|
Job,
|
||||||
|
},
|
||||||
|
rotate_task_log_archive,
|
||||||
|
},
|
||||||
|
tools::systemd::time::{
|
||||||
|
parse_calendar_event,
|
||||||
|
compute_next_event,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Userid;
|
use proxmox_backup::api2::types::Userid;
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
use proxmox_backup::server;
|
use proxmox_backup::server;
|
||||||
use proxmox_backup::tools::daemon;
|
|
||||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
use proxmox_backup::tools::{
|
||||||
|
daemon,
|
||||||
|
disks::{
|
||||||
|
DiskManage,
|
||||||
|
zfs_pool_stats,
|
||||||
|
},
|
||||||
|
socket::{
|
||||||
|
set_tcp_keepalive,
|
||||||
|
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
use proxmox_backup::api2::pull::do_sync_job;
|
use proxmox_backup::api2::pull::do_sync_job;
|
||||||
|
use proxmox_backup::server::do_verification_job;
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::setup_safe_path_env();
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
@ -63,6 +93,8 @@ async fn run() -> Result<(), Error> {
|
|||||||
config.register_template("index", &indexpath)?;
|
config.register_template("index", &indexpath)?;
|
||||||
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||||
|
|
||||||
|
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
|
||||||
|
|
||||||
let rest_server = RestServer::new(config);
|
let rest_server = RestServer::new(config);
|
||||||
|
|
||||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||||
@ -87,6 +119,9 @@ async fn run() -> Result<(), Error> {
|
|||||||
let acceptor = Arc::clone(&acceptor);
|
let acceptor = Arc::clone(&acceptor);
|
||||||
async move {
|
async move {
|
||||||
sock.set_nodelay(true).unwrap();
|
sock.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
|
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||||
|
|
||||||
Ok(tokio_openssl::accept(&acceptor, sock)
|
Ok(tokio_openssl::accept(&acceptor, sock)
|
||||||
.await
|
.await
|
||||||
.ok() // handshake errors aren't be fatal, so return None to filter
|
.ok() // handshake errors aren't be fatal, so return None to filter
|
||||||
@ -196,8 +231,8 @@ async fn schedule_tasks() -> Result<(), Error> {
|
|||||||
|
|
||||||
schedule_datastore_garbage_collection().await;
|
schedule_datastore_garbage_collection().await;
|
||||||
schedule_datastore_prune().await;
|
schedule_datastore_prune().await;
|
||||||
schedule_datastore_verification().await;
|
|
||||||
schedule_datastore_sync_jobs().await;
|
schedule_datastore_sync_jobs().await;
|
||||||
|
schedule_datastore_verify_jobs().await;
|
||||||
schedule_task_log_rotate().await;
|
schedule_task_log_rotate().await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -205,14 +240,14 @@ async fn schedule_tasks() -> Result<(), Error> {
|
|||||||
|
|
||||||
async fn schedule_datastore_garbage_collection() {
|
async fn schedule_datastore_garbage_collection() {
|
||||||
|
|
||||||
use proxmox_backup::backup::DataStore;
|
|
||||||
use proxmox_backup::server::{UPID, WorkerTask};
|
|
||||||
use proxmox_backup::config::{
|
use proxmox_backup::config::{
|
||||||
jobstate::{self, Job},
|
datastore::{
|
||||||
datastore::{self, DataStoreConfig}
|
self,
|
||||||
|
DataStoreConfig,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use proxmox_backup::tools::systemd::time::{
|
|
||||||
parse_calendar_event, compute_next_event};
|
let email = server::lookup_user_email(Userid::root_userid());
|
||||||
|
|
||||||
let config = match datastore::config() {
|
let config = match datastore::config() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@ -294,6 +329,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let store2 = store.clone();
|
let store2 = store.clone();
|
||||||
|
let email2 = email.clone();
|
||||||
|
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
@ -314,6 +350,13 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(email2) = email2 {
|
||||||
|
let gc_status = datastore.last_gc_status();
|
||||||
|
if let Err(err) = crate::server::send_gc_status(&email2, datastore.name(), &gc_status, &result) {
|
||||||
|
eprintln!("send gc notification failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
) {
|
) {
|
||||||
@ -324,15 +367,17 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
|
|
||||||
async fn schedule_datastore_prune() {
|
async fn schedule_datastore_prune() {
|
||||||
|
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::{
|
||||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
backup::{
|
||||||
use proxmox_backup::server::{WorkerTask};
|
PruneOptions,
|
||||||
use proxmox_backup::config::{
|
BackupGroup,
|
||||||
jobstate::{self, Job},
|
compute_prune_info,
|
||||||
datastore::{self, DataStoreConfig}
|
},
|
||||||
|
config::datastore::{
|
||||||
|
self,
|
||||||
|
DataStoreConfig,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use proxmox_backup::tools::systemd::time::{
|
|
||||||
parse_calendar_event, compute_next_event};
|
|
||||||
|
|
||||||
let config = match datastore::config() {
|
let config = match datastore::config() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@ -469,126 +514,11 @@ async fn schedule_datastore_prune() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn schedule_datastore_verification() {
|
|
||||||
use proxmox_backup::backup::{DataStore, verify_all_backups};
|
|
||||||
use proxmox_backup::server::{WorkerTask};
|
|
||||||
use proxmox_backup::config::{
|
|
||||||
jobstate::{self, Job},
|
|
||||||
datastore::{self, DataStoreConfig}
|
|
||||||
};
|
|
||||||
use proxmox_backup::tools::systemd::time::{
|
|
||||||
parse_calendar_event, compute_next_event};
|
|
||||||
|
|
||||||
let config = match datastore::config() {
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("unable to read datastore config - {}", err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Ok((config, _digest)) => config,
|
|
||||||
};
|
|
||||||
|
|
||||||
for (store, (_, store_config)) in config.sections {
|
|
||||||
let datastore = match DataStore::lookup_datastore(&store) {
|
|
||||||
Ok(datastore) => datastore,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("lookup_datastore failed - {}", err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("datastore config from_value failed - {}", err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let event_str = match store_config.verify_schedule {
|
|
||||||
Some(event_str) => event_str,
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
let event = match parse_calendar_event(&event_str) {
|
|
||||||
Ok(event) => event,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let worker_type = "verify";
|
|
||||||
|
|
||||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
|
||||||
Ok(time) => time,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let next = match compute_next_event(&event, last, false) {
|
|
||||||
Ok(Some(next)) => next,
|
|
||||||
Ok(None) => continue,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
|
||||||
|
|
||||||
if next > now { continue; }
|
|
||||||
|
|
||||||
let mut job = match Job::new(worker_type, &store) {
|
|
||||||
Ok(job) => job,
|
|
||||||
Err(_) => continue, // could not get lock
|
|
||||||
};
|
|
||||||
|
|
||||||
let worker_id = store.clone();
|
|
||||||
let store2 = store.clone();
|
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
|
||||||
worker_type,
|
|
||||||
Some(worker_id),
|
|
||||||
Userid::backup_userid().clone(),
|
|
||||||
false,
|
|
||||||
move |worker| {
|
|
||||||
job.start(&worker.upid().to_string())?;
|
|
||||||
worker.log(format!("starting verification on store {}", store2));
|
|
||||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
|
||||||
let result = try_block!({
|
|
||||||
let failed_dirs =
|
|
||||||
verify_all_backups(datastore, worker.clone(), worker.upid())?;
|
|
||||||
if failed_dirs.len() > 0 {
|
|
||||||
worker.log("Failed to verify following snapshots:");
|
|
||||||
for dir in failed_dirs {
|
|
||||||
worker.log(format!("\t{}", dir));
|
|
||||||
}
|
|
||||||
Err(format_err!("verification failed - please check the log for details"))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let status = worker.create_state(&result);
|
|
||||||
|
|
||||||
if let Err(err) = job.finish(status) {
|
|
||||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
},
|
|
||||||
) {
|
|
||||||
eprintln!("unable to start verification on store {} - {}", store, err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn schedule_datastore_sync_jobs() {
|
async fn schedule_datastore_sync_jobs() {
|
||||||
|
|
||||||
use proxmox_backup::{
|
use proxmox_backup::config::sync::{
|
||||||
config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
|
self,
|
||||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
SyncJobConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
let config = match sync::config() {
|
let config = match sync::config() {
|
||||||
@ -657,17 +587,72 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn schedule_task_log_rotate() {
|
async fn schedule_datastore_verify_jobs() {
|
||||||
use proxmox_backup::{
|
|
||||||
config::jobstate::{self, Job},
|
use proxmox_backup::config::verify::{
|
||||||
server::rotate_task_log_archive,
|
self,
|
||||||
|
VerificationJobConfig,
|
||||||
};
|
};
|
||||||
use proxmox_backup::server::WorkerTask;
|
|
||||||
use proxmox_backup::tools::systemd::time::{
|
let config = match verify::config() {
|
||||||
parse_calendar_event, compute_next_event};
|
Err(err) => {
|
||||||
|
eprintln!("unable to read verification job config - {}", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok((config, _digest)) => config,
|
||||||
|
};
|
||||||
|
for (job_id, (_, job_config)) in config.sections {
|
||||||
|
let job_config: VerificationJobConfig = match serde_json::from_value(job_config) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("verification job config from_value failed - {}", err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let event_str = match job_config.schedule {
|
||||||
|
Some(ref event_str) => event_str.clone(),
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let event = match parse_calendar_event(&event_str) {
|
||||||
|
Ok(event) => event,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let worker_type = "verificationjob";
|
||||||
|
let last = match jobstate::last_run_time(worker_type, &job_id) {
|
||||||
|
Ok(time) => time,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let next = match compute_next_event(&event, last, false) {
|
||||||
|
Ok(Some(next)) => next,
|
||||||
|
Ok(None) => continue,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
if next > now { continue; }
|
||||||
|
let job = match Job::new(worker_type, &job_id) {
|
||||||
|
Ok(job) => job,
|
||||||
|
Err(_) => continue, // could not get lock
|
||||||
|
};
|
||||||
|
let userid = Userid::backup_userid().clone();
|
||||||
|
if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
|
||||||
|
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn schedule_task_log_rotate() {
|
||||||
|
|
||||||
let worker_type = "logrotate";
|
let worker_type = "logrotate";
|
||||||
let job_id = "task-archive";
|
let job_id = "task_archive";
|
||||||
|
|
||||||
let last = match jobstate::last_run_time(worker_type, job_id) {
|
let last = match jobstate::last_run_time(worker_type, job_id) {
|
||||||
Ok(time) => time,
|
Ok(time) => time,
|
||||||
@ -724,10 +709,11 @@ async fn schedule_task_log_rotate() {
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
worker.log(format!("starting task log rotation"));
|
worker.log(format!("starting task log rotation"));
|
||||||
// one entry has normally about ~100-150 bytes
|
|
||||||
let max_size = 500000; // at least 5000 entries
|
|
||||||
let max_files = 20; // at least 100000 entries
|
|
||||||
let result = try_block!({
|
let result = try_block!({
|
||||||
|
// rotate task log archive
|
||||||
|
let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
|
||||||
|
let max_files = 20; // times twenty files gives at least 100000 task entries
|
||||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
worker.log(format!("task log archive was rotated"));
|
worker.log(format!("task log archive was rotated"));
|
||||||
|
@ -414,13 +414,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
|||||||
println!("</p>");
|
println!("</p>");
|
||||||
|
|
||||||
let data = data.join("\n");
|
let data = data.join("\n");
|
||||||
let qr_code = generate_qr_code("png", data.as_bytes())?;
|
let qr_code = generate_qr_code("svg", data.as_bytes())?;
|
||||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||||
|
|
||||||
println!("<center>");
|
println!("<center>");
|
||||||
println!("<img");
|
println!("<img");
|
||||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
println!("src=\"data:image/svg+xml;base64,{}\"/>", qr_code);
|
||||||
println!("</center>");
|
println!("</center>");
|
||||||
println!("</div>");
|
println!("</div>");
|
||||||
}
|
}
|
||||||
@ -447,13 +447,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
|||||||
|
|
||||||
println!("</p>");
|
println!("</p>");
|
||||||
|
|
||||||
let qr_code = generate_qr_code("png", key_text.as_bytes())?;
|
let qr_code = generate_qr_code("svg", key_text.as_bytes())?;
|
||||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||||
|
|
||||||
println!("<center>");
|
println!("<center>");
|
||||||
println!("<img");
|
println!("<img");
|
||||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
println!("src=\"data:image/svg+xml;base64,{}\"/>", qr_code);
|
||||||
println!("</center>");
|
println!("</center>");
|
||||||
|
|
||||||
println!("</div>");
|
println!("</div>");
|
||||||
|
@ -144,7 +144,7 @@ fn mount(
|
|||||||
// Process should be deamonized.
|
// Process should be deamonized.
|
||||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||||
let pipe = pipe()?;
|
let pipe = pipe()?;
|
||||||
match fork() {
|
match unsafe { fork() } {
|
||||||
Ok(ForkResult::Parent { .. }) => {
|
Ok(ForkResult::Parent { .. }) => {
|
||||||
nix::unistd::close(pipe.1).unwrap();
|
nix::unistd::close(pipe.1).unwrap();
|
||||||
// Blocks the parent process until we are ready to go in the child
|
// Blocks the parent process until we are ready to go in the child
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
||||||
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
||||||
|
|
||||||
|
pub const API_ACCESS_LOG_FN: &str = "/var/log/proxmox-backup/api/access.log";
|
||||||
|
|
||||||
/// Prepend configuration directory to a file name
|
/// Prepend configuration directory to a file name
|
||||||
///
|
///
|
||||||
/// This is a simply way to get the full path for configuration files.
|
/// This is a simply way to get the full path for configuration files.
|
||||||
|
@ -38,6 +38,9 @@ pub struct BackupStats {
|
|||||||
pub csum: [u8; 32],
|
pub csum: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||||
|
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
|
|
||||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||||
@ -262,7 +265,7 @@ impl BackupWriter {
|
|||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
crate::tools::format::strip_server_file_extension(archive_name.clone())
|
crate::tools::format::strip_server_file_extension(archive_name)
|
||||||
};
|
};
|
||||||
if archive_name != CATALOG_NAME {
|
if archive_name != CATALOG_NAME {
|
||||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||||
@ -335,15 +338,15 @@ impl BackupWriter {
|
|||||||
(verify_queue_tx, verify_result_rx)
|
(verify_queue_tx, verify_result_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn append_chunk_queue(h2: H2Client, wid: u64, path: String, verbose: bool) -> (
|
fn append_chunk_queue(
|
||||||
mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>,
|
h2: H2Client,
|
||||||
oneshot::Receiver<Result<(), Error>>,
|
wid: u64,
|
||||||
) {
|
path: String,
|
||||||
|
verbose: bool,
|
||||||
|
) -> (UploadQueueSender, UploadResultReceiver) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
||||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||||
|
|
||||||
let h2_2 = h2.clone();
|
|
||||||
|
|
||||||
// FIXME: async-block-ify this code!
|
// FIXME: async-block-ify this code!
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
verify_queue_rx
|
verify_queue_rx
|
||||||
@ -381,7 +384,7 @@ impl BackupWriter {
|
|||||||
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
||||||
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
||||||
let upload_data = Some(param_data);
|
let upload_data = Some(param_data);
|
||||||
h2_2.send_request(request, upload_data)
|
h2.send_request(request, upload_data)
|
||||||
.and_then(move |response| {
|
.and_then(move |response| {
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
@ -489,6 +492,10 @@ impl BackupWriter {
|
|||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
|
||||||
|
// funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use
|
||||||
|
// since this is a private method.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn upload_chunk_info_stream(
|
fn upload_chunk_info_stream(
|
||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
wid: u64,
|
wid: u64,
|
||||||
@ -515,7 +522,7 @@ impl BackupWriter {
|
|||||||
let is_fixed_chunk_size = prefix == "fixed";
|
let is_fixed_chunk_size = prefix == "fixed";
|
||||||
|
|
||||||
let (upload_queue, upload_result) =
|
let (upload_queue, upload_result) =
|
||||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path.to_owned(), verbose);
|
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
@ -574,10 +581,12 @@ impl BackupWriter {
|
|||||||
let digest = chunk_info.digest;
|
let digest = chunk_info.digest;
|
||||||
let digest_str = digest_to_hex(&digest);
|
let digest_str = digest_to_hex(&digest);
|
||||||
|
|
||||||
if false && verbose { // TO verbose, needs finer verbosity setting granularity
|
/* too verbose, needs finer verbosity setting granularity
|
||||||
|
if verbose {
|
||||||
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
||||||
chunk_info.chunk_len, offset);
|
chunk_info.chunk_len, offset);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
let chunk_data = chunk_info.chunk.into_inner();
|
let chunk_data = chunk_info.chunk.into_inner();
|
||||||
let param = json!({
|
let param = json!({
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::task::{Context, Poll};
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@ -18,15 +17,17 @@ use xdg::BaseDirectories;
|
|||||||
use proxmox::{
|
use proxmox::{
|
||||||
api::error::HttpError,
|
api::error::HttpError,
|
||||||
sys::linux::tty,
|
sys::linux::tty,
|
||||||
tools::{
|
tools::fs::{file_get_json, replace_file, CreateOptions},
|
||||||
fs::{file_get_json, replace_file, CreateOptions},
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::pipe_to_stream::PipeToSendStream;
|
use super::pipe_to_stream::PipeToSendStream;
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Userid;
|
||||||
use crate::tools::async_io::EitherStream;
|
use crate::tools::{
|
||||||
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
self,
|
||||||
|
BroadcastFuture,
|
||||||
|
DEFAULT_ENCODE_SET,
|
||||||
|
http::HttpsConnector,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AuthInfo {
|
pub struct AuthInfo {
|
||||||
@ -181,10 +182,8 @@ fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
|
|||||||
|
|
||||||
for line in raw.split('\n') {
|
for line in raw.split('\n') {
|
||||||
let items: Vec<String> = line.split_whitespace().map(String::from).collect();
|
let items: Vec<String> = line.split_whitespace().map(String::from).collect();
|
||||||
if items.len() == 2 {
|
if items.len() == 2 && &items[0] == server {
|
||||||
if &items[0] == server {
|
return Some(items[1].clone());
|
||||||
return Some(items[1].clone());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,11 +211,11 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
|||||||
|
|
||||||
let empty = serde_json::map::Map::new();
|
let empty = serde_json::map::Map::new();
|
||||||
for (server, info) in data.as_object().unwrap_or(&empty) {
|
for (server, info) in data.as_object().unwrap_or(&empty) {
|
||||||
for (_user, uinfo) in info.as_object().unwrap_or(&empty) {
|
for (user, uinfo) in info.as_object().unwrap_or(&empty) {
|
||||||
if let Some(timestamp) = uinfo["timestamp"].as_i64() {
|
if let Some(timestamp) = uinfo["timestamp"].as_i64() {
|
||||||
let age = now - timestamp;
|
let age = now - timestamp;
|
||||||
if age < ticket_lifetime {
|
if age < ticket_lifetime {
|
||||||
new_data[server][username] = uinfo.clone();
|
new_data[server][user] = uinfo.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -294,10 +293,11 @@ impl HttpClient {
|
|||||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut httpc = hyper::client::HttpConnector::new();
|
let mut httpc = HttpConnector::new();
|
||||||
httpc.set_nodelay(true); // important for h2 download performance!
|
httpc.set_nodelay(true); // important for h2 download performance!
|
||||||
httpc.enforce_http(false); // we want https...
|
httpc.enforce_http(false); // we want https...
|
||||||
|
|
||||||
|
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
|
||||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
|
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
|
||||||
|
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
@ -609,7 +609,7 @@ impl HttpClient {
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let connection = connection
|
let connection = connection
|
||||||
.map_err(|_| panic!("HTTP/2.0 connection failed"));
|
.map_err(|_| eprintln!("HTTP/2.0 connection failed"));
|
||||||
|
|
||||||
let (connection, abort) = futures::future::abortable(connection);
|
let (connection, abort) = futures::future::abortable(connection);
|
||||||
// A cancellable future returns an Option which is None when cancelled and
|
// A cancellable future returns an Option which is None when cancelled and
|
||||||
@ -921,61 +921,3 @@ impl H2Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct HttpsConnector {
|
|
||||||
http: HttpConnector,
|
|
||||||
ssl_connector: std::sync::Arc<SslConnector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HttpsConnector {
|
|
||||||
pub fn with_connector(mut http: HttpConnector, ssl_connector: SslConnector) -> Self {
|
|
||||||
http.enforce_http(false);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
http,
|
|
||||||
ssl_connector: std::sync::Arc::new(ssl_connector),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type MaybeTlsStream = EitherStream<
|
|
||||||
tokio::net::TcpStream,
|
|
||||||
tokio_openssl::SslStream<tokio::net::TcpStream>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
impl hyper::service::Service<Uri> for HttpsConnector {
|
|
||||||
type Response = MaybeTlsStream;
|
|
||||||
type Error = Error;
|
|
||||||
type Future = std::pin::Pin<Box<
|
|
||||||
dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static
|
|
||||||
>>;
|
|
||||||
|
|
||||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
|
||||||
// This connector is always ready, but others might not be.
|
|
||||||
Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn call(&mut self, dst: Uri) -> Self::Future {
|
|
||||||
let mut this = self.clone();
|
|
||||||
async move {
|
|
||||||
let is_https = dst
|
|
||||||
.scheme()
|
|
||||||
.ok_or_else(|| format_err!("missing URL scheme"))?
|
|
||||||
== "https";
|
|
||||||
let host = dst
|
|
||||||
.host()
|
|
||||||
.ok_or_else(|| format_err!("missing hostname in destination url?"))?
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let config = this.ssl_connector.configure();
|
|
||||||
let conn = this.http.call(dst).await?;
|
|
||||||
if is_https {
|
|
||||||
let conn = tokio_openssl::connect(config?, &host, conn).await?;
|
|
||||||
Ok(MaybeTlsStream::Right(conn))
|
|
||||||
} else {
|
|
||||||
Ok(MaybeTlsStream::Left(conn))
|
|
||||||
}
|
|
||||||
}.boxed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -531,20 +531,22 @@ pub async fn pull_store(
|
|||||||
item.backup_type, item.backup_id, userid, owner));
|
item.backup_type, item.backup_id, userid, owner));
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
|
|
||||||
} else {
|
} else if let Err(err) = pull_group(
|
||||||
|
worker,
|
||||||
if let Err(err) = pull_group(
|
client,
|
||||||
worker,
|
src_repo,
|
||||||
client,
|
tgt_store.clone(),
|
||||||
src_repo,
|
&group,
|
||||||
tgt_store.clone(),
|
delete,
|
||||||
&group,
|
Some((groups_done, group_count)),
|
||||||
delete,
|
).await {
|
||||||
Some((groups_done, group_count)),
|
worker.log(format!(
|
||||||
).await {
|
"sync group {}/{} failed - {}",
|
||||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
item.backup_type,
|
||||||
errors = true; // do not stop here, instead continue
|
item.backup_id,
|
||||||
}
|
err,
|
||||||
|
));
|
||||||
|
errors = true; // do not stop here, instead continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ pub async fn display_task_log(
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else if lines != limit {
|
||||||
if lines != limit { bail!("got wrong number of lines from server ({} != {})", lines, limit); }
|
bail!("got wrong number of lines from server ({} != {})", lines, limit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,11 +18,11 @@ use crate::buildcfg;
|
|||||||
pub mod acl;
|
pub mod acl;
|
||||||
pub mod cached_user_info;
|
pub mod cached_user_info;
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod jobstate;
|
|
||||||
pub mod network;
|
pub mod network;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod user;
|
pub mod user;
|
||||||
|
pub mod verify;
|
||||||
|
|
||||||
/// Check configuration directory permissions
|
/// Check configuration directory permissions
|
||||||
///
|
///
|
||||||
|
@ -64,10 +64,8 @@ impl CachedUserInfo {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if let Some(expire) = info.expire {
|
if let Some(expire) = info.expire {
|
||||||
if expire > 0 {
|
if expire > 0 && expire <= now() {
|
||||||
if expire <= now() {
|
return false;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -44,10 +44,6 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
"verify-schedule": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"keep-last": {
|
"keep-last": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
@ -72,6 +68,10 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
},
|
},
|
||||||
|
"verify-new": {
|
||||||
|
optional: true,
|
||||||
|
type: bool,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all="kebab-case")]
|
||||||
@ -87,8 +87,6 @@ pub struct DataStoreConfig {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub prune_schedule: Option<String>,
|
pub prune_schedule: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub verify_schedule: Option<String>,
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub keep_last: Option<u64>,
|
pub keep_last: Option<u64>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub keep_hourly: Option<u64>,
|
pub keep_hourly: Option<u64>,
|
||||||
@ -100,6 +98,9 @@ pub struct DataStoreConfig {
|
|||||||
pub keep_monthly: Option<u64>,
|
pub keep_monthly: Option<u64>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub keep_yearly: Option<u64>,
|
pub keep_yearly: Option<u64>,
|
||||||
|
/// If enabled, all backups will be verified right after completion.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verify_new: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
@ -157,12 +158,12 @@ pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<St
|
|||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
list.push(String::from("/"));
|
list.push(String::from("/"));
|
||||||
list.push(String::from("/storage"));
|
list.push(String::from("/datastore"));
|
||||||
list.push(String::from("/storage/"));
|
list.push(String::from("/datastore/"));
|
||||||
|
|
||||||
if let Ok((data, _digest)) = config() {
|
if let Ok((data, _digest)) = config() {
|
||||||
for id in data.sections.keys() {
|
for id in data.sections.keys() {
|
||||||
list.push(format!("/storage/{}", id));
|
list.push(format!("/datastore/{}", id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,8 +289,12 @@ impl Interface {
|
|||||||
|
|
||||||
if let Some(method6) = self.method6 {
|
if let Some(method6) = self.method6 {
|
||||||
let mut skip_v6 = false; // avoid empty inet6 manual entry
|
let mut skip_v6 = false; // avoid empty inet6 manual entry
|
||||||
if self.method.is_some() && method6 == NetworkConfigMethod::Manual {
|
if self.method.is_some()
|
||||||
if self.comments6.is_none() && self.options6.is_empty() { skip_v6 = true; }
|
&& method6 == NetworkConfigMethod::Manual
|
||||||
|
&& self.comments6.is_none()
|
||||||
|
&& self.options6.is_empty()
|
||||||
|
{
|
||||||
|
skip_v6 = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !skip_v6 {
|
if !skip_v6 {
|
||||||
|
@ -10,7 +10,7 @@ use regex::Regex;
|
|||||||
|
|
||||||
use proxmox::*; // for IP macros
|
use proxmox::*; // for IP macros
|
||||||
|
|
||||||
pub static IPV4_REVERSE_MASK: &[&'static str] = &[
|
pub static IPV4_REVERSE_MASK: &[&str] = &[
|
||||||
"0.0.0.0",
|
"0.0.0.0",
|
||||||
"128.0.0.0",
|
"128.0.0.0",
|
||||||
"192.0.0.0",
|
"192.0.0.0",
|
||||||
|
205
src/config/verify.rs
Normal file
205
src/config/verify.rs
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
schema::*,
|
||||||
|
section_config::{
|
||||||
|
SectionConfig,
|
||||||
|
SectionConfigData,
|
||||||
|
SectionConfigPlugin,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref CONFIG: SectionConfig = init();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Verification Job
|
||||||
|
pub struct VerificationJobConfig {
|
||||||
|
/// unique ID to address this job
|
||||||
|
pub id: String,
|
||||||
|
/// the datastore ID this verificaiton job affects
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// if not set to false, check the age of the last snapshot verification to filter
|
||||||
|
/// out recent ones, depending on 'outdated_after' configuration.
|
||||||
|
pub ignore_verified: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||||
|
pub outdated_after: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// when to schedule this job in calendar event notation
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"next-run": {
|
||||||
|
description: "Estimated time of the next run (UNIX epoch).",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
"last-run-state": {
|
||||||
|
description: "Result of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-upid": {
|
||||||
|
description: "Task UPID of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-endtime": {
|
||||||
|
description: "Endtime of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Status of Verification Job
|
||||||
|
pub struct VerificationJobStatus {
|
||||||
|
/// unique ID to address this job
|
||||||
|
pub id: String,
|
||||||
|
/// the datastore ID this verificaiton job affects
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// if not set to false, check the age of the last snapshot verification to filter
|
||||||
|
/// out recent ones, depending on 'outdated_after' configuration.
|
||||||
|
pub ignore_verified: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||||
|
pub outdated_after: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// when to schedule this job in calendar event notation
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// The timestamp when this job runs the next time.
|
||||||
|
pub next_run: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// The state of the last scheduled run, if any
|
||||||
|
pub last_run_state: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// The task UPID of the last scheduled run, if any
|
||||||
|
pub last_run_upid: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// When the last run was finished, combined with UPID.starttime one can calculate the duration
|
||||||
|
pub last_run_endtime: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn init() -> SectionConfig {
|
||||||
|
let obj_schema = match VerificationJobConfig::API_SCHEMA {
|
||||||
|
Schema::Object(ref obj_schema) => obj_schema,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let plugin = SectionConfigPlugin::new("verification".to_string(), Some(String::from("id")), obj_schema);
|
||||||
|
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
|
||||||
|
config.register_plugin(plugin);
|
||||||
|
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const VERIFICATION_CFG_FILENAME: &str = "/etc/proxmox-backup/verification.cfg";
|
||||||
|
pub const VERIFICATION_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.verification.lck";
|
||||||
|
|
||||||
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
|
let content = proxmox::tools::fs::file_read_optional_string(VERIFICATION_CFG_FILENAME)?;
|
||||||
|
let content = content.unwrap_or_else(String::new);
|
||||||
|
|
||||||
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
|
let data = CONFIG.parse(VERIFICATION_CFG_FILENAME, &content)?;
|
||||||
|
Ok((data, digest))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
|
let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, &config)?;
|
||||||
|
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||||
|
// set the correct owner/group/permissions while saving file
|
||||||
|
// owner(rw) = root, group(r)= backup
|
||||||
|
|
||||||
|
let options = CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(backup_user.gid);
|
||||||
|
|
||||||
|
replace_file(VERIFICATION_CFG_FILENAME, raw.as_bytes(), options)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// shell completion helper
|
||||||
|
pub fn complete_verification_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
match config() {
|
||||||
|
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||||
|
Err(_) => return vec![],
|
||||||
|
}
|
||||||
|
}
|
@ -1,3 +1,8 @@
|
|||||||
|
//! See the different modules for documentation on their usage.
|
||||||
|
//!
|
||||||
|
//! The [backup](backup/index.html) module contains some detailed information
|
||||||
|
//! on the inner workings of the backup server regarding data storage.
|
||||||
|
|
||||||
pub mod task;
|
pub mod task;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
@ -12,7 +12,7 @@ use nix::errno::Errno;
|
|||||||
use nix::fcntl::OFlag;
|
use nix::fcntl::OFlag;
|
||||||
use nix::sys::stat::{FileStat, Mode};
|
use nix::sys::stat::{FileStat, Mode};
|
||||||
|
|
||||||
use pathpatterns::{MatchEntry, MatchList, MatchType, PatternFlag};
|
use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
|
||||||
use pxar::Metadata;
|
use pxar::Metadata;
|
||||||
use pxar::encoder::LinkOffset;
|
use pxar::encoder::LinkOffset;
|
||||||
|
|
||||||
@ -291,59 +291,68 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
|
fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
|
||||||
let fd = self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)?;
|
let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? {
|
||||||
|
Some(fd) => fd,
|
||||||
|
None => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
let old_pattern_count = self.patterns.len();
|
let old_pattern_count = self.patterns.len();
|
||||||
|
|
||||||
let path_bytes = self.path.as_os_str().as_bytes();
|
let path_bytes = self.path.as_os_str().as_bytes();
|
||||||
|
|
||||||
if let Some(fd) = fd {
|
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
|
||||||
|
|
||||||
use io::BufRead;
|
use io::BufRead;
|
||||||
for line in io::BufReader::new(file).split(b'\n') {
|
for line in io::BufReader::new(file).split(b'\n') {
|
||||||
let line = match line {
|
let line = match line {
|
||||||
Ok(line) => line,
|
Ok(line) => line,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = writeln!(
|
let _ = writeln!(
|
||||||
self.errors,
|
self.errors,
|
||||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||||
self.path,
|
self.path,
|
||||||
err,
|
err,
|
||||||
);
|
);
|
||||||
self.patterns.truncate(old_pattern_count);
|
self.patterns.truncate(old_pattern_count);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let line = crate::tools::strip_ascii_whitespace(&line);
|
|
||||||
|
|
||||||
if line.is_empty() || line[0] == b'#' {
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut buf;
|
let line = crate::tools::strip_ascii_whitespace(&line);
|
||||||
let (line, mode) = if line[0] == b'/' {
|
|
||||||
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
|
||||||
buf.extend(path_bytes);
|
|
||||||
buf.extend(line);
|
|
||||||
(&buf[..], MatchType::Exclude)
|
|
||||||
} else if line.starts_with(b"!/") {
|
|
||||||
// inverted case with absolute path
|
|
||||||
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
|
||||||
buf.extend(path_bytes);
|
|
||||||
buf.extend(&line[1..]); // without the '!'
|
|
||||||
(&buf[..], MatchType::Include)
|
|
||||||
} else {
|
|
||||||
(line, MatchType::Exclude)
|
|
||||||
};
|
|
||||||
|
|
||||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
if line.is_empty() || line[0] == b'#' {
|
||||||
Ok(pattern) => self.patterns.push(pattern),
|
continue;
|
||||||
Err(err) => {
|
}
|
||||||
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
|
||||||
|
let mut buf;
|
||||||
|
let (line, mode, anchored) = if line[0] == b'/' {
|
||||||
|
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
||||||
|
buf.extend(path_bytes);
|
||||||
|
buf.extend(line);
|
||||||
|
(&buf[..], MatchType::Exclude, true)
|
||||||
|
} else if line.starts_with(b"!/") {
|
||||||
|
// inverted case with absolute path
|
||||||
|
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
||||||
|
buf.extend(path_bytes);
|
||||||
|
buf.extend(&line[1..]); // without the '!'
|
||||||
|
(&buf[..], MatchType::Include, true)
|
||||||
|
} else if line.starts_with(b"!") {
|
||||||
|
(&line[1..], MatchType::Include, false)
|
||||||
|
} else {
|
||||||
|
(line, MatchType::Exclude, false)
|
||||||
|
};
|
||||||
|
|
||||||
|
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
||||||
|
Ok(pattern) => {
|
||||||
|
if anchored {
|
||||||
|
self.patterns.push(pattern.add_flags(MatchFlag::ANCHORED));
|
||||||
|
} else {
|
||||||
|
self.patterns.push(pattern);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -997,7 +1006,7 @@ fn process_acl(
|
|||||||
/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
|
/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
|
||||||
/// prefix.
|
/// prefix.
|
||||||
fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
|
fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
|
||||||
use pathpatterns::{MatchFlag, MatchPattern};
|
use pathpatterns::MatchPattern;
|
||||||
|
|
||||||
let mut content = Vec::new();
|
let mut content = Vec::new();
|
||||||
|
|
||||||
|
@ -266,10 +266,8 @@ impl SessionImpl {
|
|||||||
) {
|
) {
|
||||||
let final_result = match err.downcast::<io::Error>() {
|
let final_result = match err.downcast::<io::Error>() {
|
||||||
Ok(err) => {
|
Ok(err) => {
|
||||||
if err.kind() == io::ErrorKind::Other {
|
if err.kind() == io::ErrorKind::Other && self.verbose {
|
||||||
if self.verbose {
|
eprintln!("an IO error occurred: {}", err);
|
||||||
eprintln!("an IO error occurred: {}", err);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// fail the request
|
// fail the request
|
||||||
|
@ -30,3 +30,10 @@ pub mod formatter;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod rest;
|
pub mod rest;
|
||||||
|
|
||||||
|
pub mod jobstate;
|
||||||
|
|
||||||
|
mod verify_job;
|
||||||
|
pub use verify_job::*;
|
||||||
|
|
||||||
|
mod email_notifications;
|
||||||
|
pub use email_notifications::*;
|
||||||
|
@ -2,7 +2,7 @@ use std::collections::HashMap;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use std::fs::metadata;
|
use std::fs::metadata;
|
||||||
use std::sync::RwLock;
|
use std::sync::{Mutex, RwLock};
|
||||||
|
|
||||||
use anyhow::{bail, Error, format_err};
|
use anyhow::{bail, Error, format_err};
|
||||||
use hyper::Method;
|
use hyper::Method;
|
||||||
@ -10,6 +10,9 @@ use handlebars::Handlebars;
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
||||||
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
|
|
||||||
|
use crate::tools::{FileLogger, FileLogOptions};
|
||||||
|
|
||||||
pub struct ApiConfig {
|
pub struct ApiConfig {
|
||||||
basedir: PathBuf,
|
basedir: PathBuf,
|
||||||
@ -18,6 +21,7 @@ pub struct ApiConfig {
|
|||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
templates: RwLock<Handlebars<'static>>,
|
templates: RwLock<Handlebars<'static>>,
|
||||||
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
|
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
|
||||||
|
request_log: Option<Mutex<FileLogger>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ApiConfig {
|
impl ApiConfig {
|
||||||
@ -30,6 +34,7 @@ impl ApiConfig {
|
|||||||
env_type,
|
env_type,
|
||||||
templates: RwLock::new(Handlebars::new()),
|
templates: RwLock::new(Handlebars::new()),
|
||||||
template_files: RwLock::new(HashMap::new()),
|
template_files: RwLock::new(HashMap::new()),
|
||||||
|
request_log: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,4 +123,30 @@ impl ApiConfig {
|
|||||||
templates.render(name, data).map_err(|err| format_err!("{}", err))
|
templates.render(name, data).map_err(|err| format_err!("{}", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn enable_file_log<P>(&mut self, path: P) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
P: Into<PathBuf>
|
||||||
|
{
|
||||||
|
let path: PathBuf = path.into();
|
||||||
|
if let Some(base) = path.parent() {
|
||||||
|
if !base.exists() {
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
let opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
|
||||||
|
create_path(base, None, Some(opts)).map_err(|err| format_err!("{}", err))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let logger_options = FileLogOptions {
|
||||||
|
append: true,
|
||||||
|
owned_by_backup: true,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
self.request_log = Some(Mutex::new(FileLogger::new(&path, logger_options)?));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn get_file_log(&self) -> Option<&Mutex<FileLogger>> {
|
||||||
|
self.request_log.as_ref()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
250
src/server/email_notifications.rs
Normal file
250
src/server/email_notifications.rs
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
use anyhow::Error;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use handlebars::{Handlebars, Helper, Context, RenderError, RenderContext, Output, HelperResult};
|
||||||
|
|
||||||
|
use proxmox::tools::email::sendmail;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::verify::VerificationJobConfig,
|
||||||
|
api2::types::{
|
||||||
|
Userid,
|
||||||
|
GarbageCollectionStatus,
|
||||||
|
},
|
||||||
|
tools::format::HumanByte,
|
||||||
|
};
|
||||||
|
|
||||||
|
const GC_OK_TEMPLATE: &str = r###"
|
||||||
|
|
||||||
|
Datastore: {{datastore}}
|
||||||
|
Task ID: {{status.upid}}
|
||||||
|
Index file count: {{status.index-file-count}}
|
||||||
|
|
||||||
|
Removed garbage: {{human-bytes status.removed-bytes}}
|
||||||
|
Removed chunks: {{status.removed-chunks}}
|
||||||
|
Remove bad files: {{status.removed-bad}}
|
||||||
|
|
||||||
|
Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
|
||||||
|
|
||||||
|
Original Data usage: {{human-bytes status.index-data-bytes}}
|
||||||
|
On Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
|
||||||
|
On Disk chunks: {{status.disk-chunks}}
|
||||||
|
|
||||||
|
Garbage collection successful.
|
||||||
|
|
||||||
|
"###;
|
||||||
|
|
||||||
|
|
||||||
|
const GC_ERR_TEMPLATE: &str = r###"
|
||||||
|
|
||||||
|
Datastore: {{datastore}}
|
||||||
|
|
||||||
|
Garbage collection failed: {{error}}
|
||||||
|
|
||||||
|
"###;
|
||||||
|
|
||||||
|
const VERIFY_OK_TEMPLATE: &str = r###"
|
||||||
|
|
||||||
|
Job ID: {{job.id}}
|
||||||
|
Datastore: {{job.store}}
|
||||||
|
|
||||||
|
Verification successful.
|
||||||
|
|
||||||
|
"###;
|
||||||
|
|
||||||
|
const VERIFY_ERR_TEMPLATE: &str = r###"
|
||||||
|
|
||||||
|
Job ID: {{job.id}}
|
||||||
|
Datastore: {{job.store}}
|
||||||
|
|
||||||
|
Verification failed on these snapshots:
|
||||||
|
|
||||||
|
{{#each errors}}
|
||||||
|
{{this}}
|
||||||
|
{{/each}}
|
||||||
|
|
||||||
|
"###;
|
||||||
|
|
||||||
|
lazy_static::lazy_static!{
|
||||||
|
|
||||||
|
static ref HANDLEBARS: Handlebars<'static> = {
|
||||||
|
let mut hb = Handlebars::new();
|
||||||
|
|
||||||
|
hb.set_strict_mode(true);
|
||||||
|
|
||||||
|
hb.register_helper("human-bytes", Box::new(handlebars_humam_bytes_helper));
|
||||||
|
hb.register_helper("relative-percentage", Box::new(handlebars_relative_percentage_helper));
|
||||||
|
|
||||||
|
hb.register_template_string("gc_ok_template", GC_OK_TEMPLATE).unwrap();
|
||||||
|
hb.register_template_string("gc_err_template", GC_ERR_TEMPLATE).unwrap();
|
||||||
|
|
||||||
|
hb.register_template_string("verify_ok_template", VERIFY_OK_TEMPLATE).unwrap();
|
||||||
|
hb.register_template_string("verify_err_template", VERIFY_ERR_TEMPLATE).unwrap();
|
||||||
|
|
||||||
|
hb
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_job_status_mail(
|
||||||
|
email: &str,
|
||||||
|
subject: &str,
|
||||||
|
text: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
// Note: OX has serious problems displaying text mails,
|
||||||
|
// so we include html as well
|
||||||
|
let html = format!("<html><body><pre>\n{}\n<pre>", text);
|
||||||
|
|
||||||
|
let nodename = proxmox::tools::nodename();
|
||||||
|
|
||||||
|
let author = format!("Proxmox Backup Server - {}", nodename);
|
||||||
|
|
||||||
|
sendmail(
|
||||||
|
&[email],
|
||||||
|
&subject,
|
||||||
|
Some(&text),
|
||||||
|
Some(&html),
|
||||||
|
None,
|
||||||
|
Some(&author),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send_gc_status(
|
||||||
|
email: &str,
|
||||||
|
datastore: &str,
|
||||||
|
status: &GarbageCollectionStatus,
|
||||||
|
result: &Result<(), Error>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let text = match result {
|
||||||
|
Ok(()) => {
|
||||||
|
let data = json!({
|
||||||
|
"status": status,
|
||||||
|
"datastore": datastore,
|
||||||
|
});
|
||||||
|
HANDLEBARS.render("gc_ok_template", &data)?
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let data = json!({
|
||||||
|
"error": err.to_string(),
|
||||||
|
"datastore": datastore,
|
||||||
|
});
|
||||||
|
HANDLEBARS.render("gc_err_template", &data)?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let subject = match result {
|
||||||
|
Ok(()) => format!(
|
||||||
|
"Garbage Collect Datastore '{}' successful",
|
||||||
|
datastore,
|
||||||
|
),
|
||||||
|
Err(_) => format!(
|
||||||
|
"Garbage Collect Datastore '{}' failed",
|
||||||
|
datastore,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
send_job_status_mail(email, &subject, &text)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send_verify_status(
|
||||||
|
email: &str,
|
||||||
|
job: VerificationJobConfig,
|
||||||
|
result: &Result<Vec<String>, Error>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
|
||||||
|
let text = match result {
|
||||||
|
Ok(errors) if errors.is_empty() => {
|
||||||
|
let data = json!({ "job": job });
|
||||||
|
HANDLEBARS.render("verify_ok_template", &data)?
|
||||||
|
}
|
||||||
|
Ok(errors) => {
|
||||||
|
let data = json!({ "job": job, "errors": errors });
|
||||||
|
HANDLEBARS.render("verify_err_template", &data)?
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// aboreted job - do not send any email
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let subject = match result {
|
||||||
|
Ok(errors) if errors.is_empty() => format!(
|
||||||
|
"Verify Datastore '{}' successful",
|
||||||
|
job.store,
|
||||||
|
),
|
||||||
|
_ => format!(
|
||||||
|
"Verify Datastore '{}' failed",
|
||||||
|
job.store,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
send_job_status_mail(email, &subject, &text)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lookup users email address
|
||||||
|
///
|
||||||
|
/// For "backup@pam", this returns the address from "root@pam".
|
||||||
|
pub fn lookup_user_email(userid: &Userid) -> Option<String> {
|
||||||
|
|
||||||
|
use crate::config::user::{self, User};
|
||||||
|
|
||||||
|
if userid == Userid::backup_userid() {
|
||||||
|
return lookup_user_email(Userid::root_userid());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(user_config) = user::cached_config() {
|
||||||
|
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
||||||
|
return user.email.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handlerbar helper functions
|
||||||
|
|
||||||
|
fn handlebars_humam_bytes_helper(
|
||||||
|
h: &Helper,
|
||||||
|
_: &Handlebars,
|
||||||
|
_: &Context,
|
||||||
|
_rc: &mut RenderContext,
|
||||||
|
out: &mut dyn Output
|
||||||
|
) -> HelperResult {
|
||||||
|
let param = h.param(0).map(|v| v.value().as_u64())
|
||||||
|
.flatten()
|
||||||
|
.ok_or(RenderError::new("human-bytes: param not found"))?;
|
||||||
|
|
||||||
|
out.write(&HumanByte::from(param).to_string())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handlebars_relative_percentage_helper(
|
||||||
|
h: &Helper,
|
||||||
|
_: &Handlebars,
|
||||||
|
_: &Context,
|
||||||
|
_rc: &mut RenderContext,
|
||||||
|
out: &mut dyn Output
|
||||||
|
) -> HelperResult {
|
||||||
|
let param0 = h.param(0).map(|v| v.value().as_f64())
|
||||||
|
.flatten()
|
||||||
|
.ok_or(RenderError::new("relative-percentage: param0 not found"))?;
|
||||||
|
let param1 = h.param(1).map(|v| v.value().as_f64())
|
||||||
|
.flatten()
|
||||||
|
.ok_or(RenderError::new("relative-percentage: param1 not found"))?;
|
||||||
|
|
||||||
|
if param1 == 0.0 {
|
||||||
|
out.write("-")?;
|
||||||
|
} else {
|
||||||
|
out.write(&format!("{:.2}%", (param0*100.0)/param1))?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -7,6 +7,7 @@ pub struct RestEnvironment {
|
|||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: Option<String>,
|
user: Option<String>,
|
||||||
|
client_ip: Option<std::net::SocketAddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RestEnvironment {
|
impl RestEnvironment {
|
||||||
@ -14,6 +15,7 @@ impl RestEnvironment {
|
|||||||
Self {
|
Self {
|
||||||
result_attributes: json!({}),
|
result_attributes: json!({}),
|
||||||
user: None,
|
user: None,
|
||||||
|
client_ip: None,
|
||||||
env_type,
|
env_type,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -40,4 +42,12 @@ impl RpcEnvironment for RestEnvironment {
|
|||||||
fn get_user(&self) -> Option<String> {
|
fn get_user(&self) -> Option<String> {
|
||||||
self.user.clone()
|
self.user.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
|
||||||
|
self.client_ip = client_ip;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_client_ip(&self) -> Option<std::net::SocketAddr> {
|
||||||
|
self.client_ip.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
//! ```no_run
|
//! ```no_run
|
||||||
//! # use anyhow::{bail, Error};
|
//! # use anyhow::{bail, Error};
|
||||||
//! # use proxmox_backup::server::TaskState;
|
//! # use proxmox_backup::server::TaskState;
|
||||||
//! # use proxmox_backup::config::jobstate::*;
|
//! # use proxmox_backup::server::jobstate::*;
|
||||||
//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
|
//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
|
||||||
//! # fn code() -> Result<(), Error> {
|
//! # fn code() -> Result<(), Error> {
|
||||||
//! // locks the correct file under /var/lib
|
//! // locks the correct file under /var/lib
|
@ -3,19 +3,22 @@ use std::future::Future;
|
|||||||
use std::hash::BuildHasher;
|
use std::hash::BuildHasher;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::future::{self, FutureExt, TryFutureExt};
|
use futures::future::{self, FutureExt, TryFutureExt};
|
||||||
use futures::stream::TryStreamExt;
|
use futures::stream::TryStreamExt;
|
||||||
use hyper::header;
|
use hyper::header::{self, HeaderMap};
|
||||||
|
use hyper::body::HttpBody;
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
use url::form_urlencoded;
|
use url::form_urlencoded;
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
use proxmox::http_err;
|
use proxmox::http_err;
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
@ -41,6 +44,7 @@ use super::ApiConfig;
|
|||||||
use crate::auth_helpers::*;
|
use crate::auth_helpers::*;
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Userid;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
|
use crate::tools::FileLogger;
|
||||||
use crate::tools::ticket::Ticket;
|
use crate::tools::ticket::Ticket;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
@ -50,6 +54,8 @@ pub struct RestServer {
|
|||||||
pub api_config: Arc<ApiConfig>,
|
pub api_config: Arc<ApiConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const MAX_URI_QUERY_LENGTH: usize = 3072;
|
||||||
|
|
||||||
impl RestServer {
|
impl RestServer {
|
||||||
|
|
||||||
pub fn new(api_config: ApiConfig) -> Self {
|
pub fn new(api_config: ApiConfig) -> Self {
|
||||||
@ -105,14 +111,20 @@ pub struct ApiService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn log_response(
|
fn log_response(
|
||||||
|
logfile: Option<&Mutex<FileLogger>>,
|
||||||
peer: &std::net::SocketAddr,
|
peer: &std::net::SocketAddr,
|
||||||
method: hyper::Method,
|
method: hyper::Method,
|
||||||
path: &str,
|
path_query: &str,
|
||||||
resp: &Response<Body>,
|
resp: &Response<Body>,
|
||||||
|
user_agent: Option<String>,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
if resp.extensions().get::<NoLogExtension>().is_some() { return; };
|
if resp.extensions().get::<NoLogExtension>().is_some() { return; };
|
||||||
|
|
||||||
|
// we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
|
||||||
|
// to profit from atomicty guarantees for O_APPEND opened logfiles
|
||||||
|
let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
|
||||||
|
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
|
|
||||||
if !(status.is_success() || status.is_informational()) {
|
if !(status.is_success() || status.is_informational()) {
|
||||||
@ -125,6 +137,51 @@ fn log_response(
|
|||||||
|
|
||||||
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
|
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
|
||||||
}
|
}
|
||||||
|
if let Some(logfile) = logfile {
|
||||||
|
let user = match resp.extensions().get::<Userid>() {
|
||||||
|
Some(userid) => userid.as_str(),
|
||||||
|
None => "-",
|
||||||
|
};
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
// time format which apache/nginx use (by default), copied from pve-http-server
|
||||||
|
let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
||||||
|
.unwrap_or("-".into());
|
||||||
|
|
||||||
|
logfile
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.log(format!(
|
||||||
|
"{} - {} [{}] \"{} {}\" {} {} {}",
|
||||||
|
peer.ip(),
|
||||||
|
user,
|
||||||
|
datetime,
|
||||||
|
method.as_str(),
|
||||||
|
path,
|
||||||
|
status.as_str(),
|
||||||
|
resp.body().size_hint().lower(),
|
||||||
|
user_agent.unwrap_or("-".into()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
|
||||||
|
lazy_static! {
|
||||||
|
static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
|
||||||
|
}
|
||||||
|
let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
|
||||||
|
let capture = RE.captures(&forwarded)?;
|
||||||
|
let rhost = capture.get(1)?.as_str();
|
||||||
|
|
||||||
|
rhost.parse().ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_user_agent(headers: &HeaderMap) -> Option<String> {
|
||||||
|
let agent = headers.get(header::USER_AGENT)?.to_str();
|
||||||
|
agent.map(|s| {
|
||||||
|
let mut s = s.to_owned();
|
||||||
|
s.truncate(128);
|
||||||
|
s
|
||||||
|
}).ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tower_service::Service<Request<Body>> for ApiService {
|
impl tower_service::Service<Request<Body>> for ApiService {
|
||||||
@ -137,31 +194,29 @@ impl tower_service::Service<Request<Body>> for ApiService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
||||||
let path = req.uri().path().to_owned();
|
let path = req.uri().path_and_query().unwrap().as_str().to_owned();
|
||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
|
let user_agent = get_user_agent(req.headers());
|
||||||
|
|
||||||
let config = Arc::clone(&self.api_config);
|
let config = Arc::clone(&self.api_config);
|
||||||
let peer = self.peer;
|
let peer = match get_proxied_peer(req.headers()) {
|
||||||
|
Some(proxied_peer) => proxied_peer,
|
||||||
|
None => self.peer,
|
||||||
|
};
|
||||||
async move {
|
async move {
|
||||||
match handle_request(config, req).await {
|
let response = match handle_request(Arc::clone(&config), req, &peer).await {
|
||||||
Ok(res) => {
|
Ok(response) => response,
|
||||||
log_response(&peer, method, &path, &res);
|
|
||||||
Ok::<_, Self::Error>(res)
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if let Some(apierr) = err.downcast_ref::<HttpError>() {
|
let (err, code) = match err.downcast_ref::<HttpError>() {
|
||||||
let mut resp = Response::new(Body::from(apierr.message.clone()));
|
Some(apierr) => (apierr.message.clone(), apierr.code),
|
||||||
*resp.status_mut() = apierr.code;
|
_ => (err.to_string(), StatusCode::BAD_REQUEST),
|
||||||
log_response(&peer, method, &path, &resp);
|
};
|
||||||
Ok(resp)
|
Response::builder().status(code).body(err.into())?
|
||||||
} else {
|
|
||||||
let mut resp = Response::new(Body::from(err.to_string()));
|
|
||||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
|
||||||
log_response(&peer, method, &path, &resp);
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
let logger = config.get_file_log();
|
||||||
|
log_response(logger, &peer, method, &path, &response, user_agent);
|
||||||
|
Ok(response)
|
||||||
}
|
}
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
@ -253,6 +308,7 @@ async fn proxy_protected_request(
|
|||||||
info: &'static ApiMethod,
|
info: &'static ApiMethod,
|
||||||
mut parts: Parts,
|
mut parts: Parts,
|
||||||
req_body: Body,
|
req_body: Body,
|
||||||
|
peer: &std::net::SocketAddr,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
|
|
||||||
let mut uri_parts = parts.uri.clone().into_parts();
|
let mut uri_parts = parts.uri.clone().into_parts();
|
||||||
@ -263,7 +319,10 @@ async fn proxy_protected_request(
|
|||||||
|
|
||||||
parts.uri = new_uri;
|
parts.uri = new_uri;
|
||||||
|
|
||||||
let request = Request::from_parts(parts, req_body);
|
let mut request = Request::from_parts(parts, req_body);
|
||||||
|
request
|
||||||
|
.headers_mut()
|
||||||
|
.insert(header::FORWARDED, format!("for=\"{}\";", peer).parse().unwrap());
|
||||||
|
|
||||||
let reload_timezone = info.reload_timezone;
|
let reload_timezone = info.reload_timezone;
|
||||||
|
|
||||||
@ -375,11 +434,17 @@ fn get_index(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Response::builder()
|
let mut resp = Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(header::CONTENT_TYPE, ct)
|
.header(header::CONTENT_TYPE, ct)
|
||||||
.body(index.into())
|
.body(index.into())
|
||||||
.unwrap()
|
.unwrap();
|
||||||
|
|
||||||
|
if let Some(userid) = userid {
|
||||||
|
resp.extensions_mut().insert(userid);
|
||||||
|
}
|
||||||
|
|
||||||
|
resp
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
|
fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
|
||||||
@ -477,18 +542,18 @@ fn extract_auth_data(headers: &http::HeaderMap) -> (Option<String>, Option<Strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let token = match headers.get("CSRFPreventionToken").map(|v| v.to_str()) {
|
let csrf_token = match headers.get("CSRFPreventionToken").map(|v| v.to_str()) {
|
||||||
Some(Ok(v)) => Some(v.to_owned()),
|
Some(Ok(v)) => Some(v.to_owned()),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
(ticket, token, language)
|
(ticket, csrf_token, language)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_auth(
|
fn check_auth(
|
||||||
method: &hyper::Method,
|
method: &hyper::Method,
|
||||||
ticket: &Option<String>,
|
ticket: &Option<String>,
|
||||||
token: &Option<String>,
|
csrf_token: &Option<String>,
|
||||||
user_info: &CachedUserInfo,
|
user_info: &CachedUserInfo,
|
||||||
) -> Result<Userid, Error> {
|
) -> Result<Userid, Error> {
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||||
@ -502,8 +567,8 @@ fn check_auth(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if method != hyper::Method::GET {
|
if method != hyper::Method::GET {
|
||||||
if let Some(token) = token {
|
if let Some(csrf_token) = csrf_token {
|
||||||
verify_csrf_prevention_token(csrf_secret(), &userid, &token, -300, ticket_lifetime)?;
|
verify_csrf_prevention_token(csrf_secret(), &userid, &csrf_token, -300, ticket_lifetime)?;
|
||||||
} else {
|
} else {
|
||||||
bail!("missing CSRF prevention token");
|
bail!("missing CSRF prevention token");
|
||||||
}
|
}
|
||||||
@ -512,21 +577,31 @@ fn check_auth(
|
|||||||
Ok(userid)
|
Ok(userid)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Response<Body>, Error> {
|
async fn handle_request(
|
||||||
|
api: Arc<ApiConfig>,
|
||||||
|
req: Request<Body>,
|
||||||
|
peer: &std::net::SocketAddr,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
|
||||||
let (parts, body) = req.into_parts();
|
let (parts, body) = req.into_parts();
|
||||||
|
|
||||||
let method = parts.method.clone();
|
let method = parts.method.clone();
|
||||||
let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||||
|
|
||||||
let comp_len = components.len();
|
let comp_len = components.len();
|
||||||
|
|
||||||
//println!("REQUEST {} {}", method, path);
|
let query = parts.uri.query().unwrap_or_default();
|
||||||
//println!("COMPO {:?}", components);
|
if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
|
||||||
|
return Ok(Response::builder()
|
||||||
|
.status(StatusCode::URI_TOO_LONG)
|
||||||
|
.body("".into())
|
||||||
|
.unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
let env_type = api.env_type();
|
let env_type = api.env_type();
|
||||||
let mut rpcenv = RestEnvironment::new(env_type);
|
let mut rpcenv = RestEnvironment::new(env_type);
|
||||||
|
|
||||||
|
rpcenv.set_client_ip(Some(*peer));
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
|
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
|
||||||
@ -555,8 +630,8 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if auth_required {
|
if auth_required {
|
||||||
let (ticket, token, _) = extract_auth_data(&parts.headers);
|
let (ticket, csrf_token, _) = extract_auth_data(&parts.headers);
|
||||||
match check_auth(&method, &ticket, &token, &user_info) {
|
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
||||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// always delay unauthorized calls by 3 seconds (from start of request)
|
// always delay unauthorized calls by 3 seconds (from start of request)
|
||||||
@ -581,15 +656,22 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
|||||||
}
|
}
|
||||||
|
|
||||||
let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
|
let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
|
||||||
proxy_protected_request(api_method, parts, body).await
|
proxy_protected_request(api_method, parts, body, peer).await
|
||||||
} else {
|
} else {
|
||||||
handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param).await
|
handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param).await
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(err) = result {
|
let mut response = match result {
|
||||||
return Ok((formatter.format_error)(err));
|
Ok(resp) => resp,
|
||||||
|
Err(err) => (formatter.format_error)(err),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(user) = user {
|
||||||
|
let userid: Userid = user.parse()?;
|
||||||
|
response.extensions_mut().insert(userid);
|
||||||
}
|
}
|
||||||
return result;
|
|
||||||
|
return Ok(response);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -602,12 +684,12 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if comp_len == 0 {
|
if comp_len == 0 {
|
||||||
let (ticket, token, language) = extract_auth_data(&parts.headers);
|
let (ticket, csrf_token, language) = extract_auth_data(&parts.headers);
|
||||||
if ticket != None {
|
if ticket != None {
|
||||||
match check_auth(&method, &ticket, &token, &user_info) {
|
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
||||||
Ok(userid) => {
|
Ok(userid) => {
|
||||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
||||||
return Ok(get_index(Some(userid), Some(new_token), language, &api, parts));
|
return Ok(get_index(Some(userid), Some(new_csrf_token), language, &api, parts));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||||
|
@ -75,11 +75,6 @@ impl UPID {
|
|||||||
if worker_type.contains(bad) {
|
if worker_type.contains(bad) {
|
||||||
bail!("illegal characters in worker type '{}'", worker_type);
|
bail!("illegal characters in worker type '{}'", worker_type);
|
||||||
}
|
}
|
||||||
if let Some(ref worker_id) = worker_id {
|
|
||||||
if worker_id.contains(bad) {
|
|
||||||
bail!("illegal characters in worker id '{}'", worker_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
@ -112,13 +107,21 @@ impl std::str::FromStr for UPID {
|
|||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||||
|
|
||||||
|
let worker_id = if cap["wid"].is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let wid = crate::tools::systemd::unescape_unit(&cap["wid"])?;
|
||||||
|
Some(wid)
|
||||||
|
};
|
||||||
|
|
||||||
Ok(UPID {
|
Ok(UPID {
|
||||||
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||||
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||||
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
||||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||||
worker_type: cap["wtype"].to_string(),
|
worker_type: cap["wtype"].to_string(),
|
||||||
worker_id: if cap["wid"].is_empty() { None } else { Some(cap["wid"].to_string()) },
|
worker_id,
|
||||||
userid: cap["userid"].parse()?,
|
userid: cap["userid"].parse()?,
|
||||||
node: cap["node"].to_string(),
|
node: cap["node"].to_string(),
|
||||||
})
|
})
|
||||||
@ -133,7 +136,11 @@ impl std::fmt::Display for UPID {
|
|||||||
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
|
||||||
let wid = if let Some(ref id) = self.worker_id { id } else { "" };
|
let wid = if let Some(ref id) = self.worker_id {
|
||||||
|
crate::tools::systemd::escape_unit(id, false)
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
||||||
// more that 8 characters for pstart
|
// more that 8 characters for pstart
|
||||||
|
102
src/server/verify_job.rs
Normal file
102
src/server/verify_job.rs
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
server::WorkerTask,
|
||||||
|
api2::types::*,
|
||||||
|
server::jobstate::Job,
|
||||||
|
config::verify::VerificationJobConfig,
|
||||||
|
backup::{
|
||||||
|
DataStore,
|
||||||
|
BackupInfo,
|
||||||
|
verify_all_backups,
|
||||||
|
},
|
||||||
|
task_log,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Runs a verification job.
|
||||||
|
pub fn do_verification_job(
|
||||||
|
mut job: Job,
|
||||||
|
verification_job: VerificationJobConfig,
|
||||||
|
userid: &Userid,
|
||||||
|
schedule: Option<String>,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
||||||
|
|
||||||
|
let datastore2 = datastore.clone();
|
||||||
|
|
||||||
|
let outdated_after = verification_job.outdated_after.clone();
|
||||||
|
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
||||||
|
|
||||||
|
let filter = move |backup_info: &BackupInfo| {
|
||||||
|
if !ignore_verified_snapshots {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
let manifest = match datastore2.load_manifest(&backup_info.backup_dir) {
|
||||||
|
Ok((manifest, _)) => manifest,
|
||||||
|
Err(_) => return true, // include, so task picks this up as error
|
||||||
|
};
|
||||||
|
|
||||||
|
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
||||||
|
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
||||||
|
Err(_) => return true, // no last verification, always include
|
||||||
|
Ok(last_verify) => {
|
||||||
|
match outdated_after {
|
||||||
|
None => false, // never re-verify if ignored and no max age
|
||||||
|
Some(max_age) => {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
|
||||||
|
|
||||||
|
days_since_last_verify > max_age
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let email = crate::server::lookup_user_email(userid);
|
||||||
|
|
||||||
|
let job_id = job.jobname().to_string();
|
||||||
|
let worker_type = job.jobtype().to_string();
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
&worker_type,
|
||||||
|
Some(job.jobname().to_string()),
|
||||||
|
userid.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
|
task_log!(worker,"Starting datastore verify job '{}'", job_id);
|
||||||
|
if let Some(event_str) = schedule {
|
||||||
|
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), &filter);
|
||||||
|
let job_result = match result {
|
||||||
|
Ok(ref errors) if errors.is_empty() => Ok(()),
|
||||||
|
Ok(_) => Err(format_err!("verification failed - please check the log for details")),
|
||||||
|
Err(_) => Err(format_err!("verification failed - job aborted")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = worker.create_state(&job_result);
|
||||||
|
|
||||||
|
match job.finish(status) {
|
||||||
|
Err(err) => eprintln!(
|
||||||
|
"could not finish job state for {}: {}",
|
||||||
|
job.jobtype().to_string(),
|
||||||
|
err
|
||||||
|
),
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(email) = email {
|
||||||
|
if let Err(err) = crate::server::send_verify_status(&email, verification_job, &result) {
|
||||||
|
eprintln!("send verify notification failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
job_result
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
@ -1,6 +1,5 @@
|
|||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::Path;
|
|
||||||
use std::io::{Read, Write, BufRead, BufReader};
|
use std::io::{Read, Write, BufRead, BufReader};
|
||||||
use std::panic::UnwindSafe;
|
use std::panic::UnwindSafe;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@ -21,7 +20,7 @@ use proxmox::tools::fs::{create_path, open_file_locked, replace_file, CreateOpti
|
|||||||
use super::UPID;
|
use super::UPID;
|
||||||
|
|
||||||
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
||||||
use crate::tools::FileLogger;
|
use crate::tools::{FileLogger, FileLogOptions};
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Userid;
|
||||||
|
|
||||||
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||||
@ -93,22 +92,27 @@ pub fn create_task_control_socket() -> Result<(), Error> {
|
|||||||
"\0{}/proxmox-task-control-{}.sock", PROXMOX_BACKUP_VAR_RUN_DIR, *MY_PID);
|
"\0{}/proxmox-task-control-{}.sock", PROXMOX_BACKUP_VAR_RUN_DIR, *MY_PID);
|
||||||
|
|
||||||
let control_future = super::create_control_socket(socketname, |param| {
|
let control_future = super::create_control_socket(socketname, |param| {
|
||||||
let param = param.as_object()
|
let param = param
|
||||||
|
.as_object()
|
||||||
.ok_or_else(|| format_err!("unable to parse parameters (expected json object)"))?;
|
.ok_or_else(|| format_err!("unable to parse parameters (expected json object)"))?;
|
||||||
if param.keys().count() != 2 { bail!("wrong number of parameters"); }
|
if param.keys().count() != 2 { bail!("wrong number of parameters"); }
|
||||||
|
|
||||||
let command = param["command"].as_str()
|
let command = param["command"]
|
||||||
|
.as_str()
|
||||||
.ok_or_else(|| format_err!("unable to parse parameters (missing command)"))?;
|
.ok_or_else(|| format_err!("unable to parse parameters (missing command)"))?;
|
||||||
|
|
||||||
// we have only two commands for now
|
// we have only two commands for now
|
||||||
if !(command == "abort-task" || command == "status") { bail!("got unknown command '{}'", command); }
|
if !(command == "abort-task" || command == "status") {
|
||||||
|
bail!("got unknown command '{}'", command);
|
||||||
|
}
|
||||||
|
|
||||||
let upid_str = param["upid"].as_str()
|
let upid_str = param["upid"]
|
||||||
|
.as_str()
|
||||||
.ok_or_else(|| format_err!("unable to parse parameters (missing upid)"))?;
|
.ok_or_else(|| format_err!("unable to parse parameters (missing upid)"))?;
|
||||||
|
|
||||||
let upid = upid_str.parse::<UPID>()?;
|
let upid = upid_str.parse::<UPID>()?;
|
||||||
|
|
||||||
if !((upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART)) {
|
if !(upid.pid == *MY_PID && upid.pstart == *MY_PID_PSTART) {
|
||||||
bail!("upid does not belong to this process");
|
bail!("upid does not belong to this process");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,26 +348,11 @@ fn lock_task_list_files(exclusive: bool) -> Result<std::fs::File, Error> {
|
|||||||
/// rotates it if it is
|
/// rotates it if it is
|
||||||
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
||||||
let _lock = lock_task_list_files(true)?;
|
let _lock = lock_task_list_files(true)?;
|
||||||
let path = Path::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN);
|
|
||||||
let metadata = match path.metadata() {
|
|
||||||
Ok(metadata) => metadata,
|
|
||||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
|
||||||
Err(err) => bail!("unable to open task archive - {}", err),
|
|
||||||
};
|
|
||||||
|
|
||||||
if metadata.len() > size_threshold {
|
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
||||||
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress).ok_or_else(|| format_err!("could not get archive file names"))?;
|
.ok_or(format_err!("could not get archive file names"))?;
|
||||||
let backup_user = crate::backup::backup_user()?;
|
|
||||||
logrotate.rotate(
|
logrotate.rotate(size_threshold, None, max_files)
|
||||||
CreateOptions::new()
|
|
||||||
.owner(backup_user.uid)
|
|
||||||
.group(backup_user.gid),
|
|
||||||
max_files,
|
|
||||||
)?;
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// atomically read/update the task list, update status of finished tasks
|
// atomically read/update the task list, update status of finished tasks
|
||||||
@ -560,7 +549,8 @@ impl TaskListInfoIterator {
|
|||||||
let archive = if active_only {
|
let archive = if active_only {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true).ok_or_else(|| format_err!("could not get archive file names"))?;
|
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true)
|
||||||
|
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||||
Some(logrotate.files())
|
Some(logrotate.files())
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -672,7 +662,14 @@ impl WorkerTask {
|
|||||||
|
|
||||||
println!("FILE: {:?}", path);
|
println!("FILE: {:?}", path);
|
||||||
|
|
||||||
let logger = FileLogger::new(&path, to_stdout)?;
|
let logger_options = FileLogOptions {
|
||||||
|
to_stdout: to_stdout,
|
||||||
|
exclusive: true,
|
||||||
|
prefix_time: true,
|
||||||
|
read: true,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let logger = FileLogger::new(&path, logger_options)?;
|
||||||
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||||
|
|
||||||
let worker = Arc::new(Self {
|
let worker = Arc::new(Self {
|
||||||
|
16
src/tools.rs
16
src/tools.rs
@ -35,6 +35,10 @@ pub mod nom;
|
|||||||
pub mod logrotate;
|
pub mod logrotate;
|
||||||
pub mod loopdev;
|
pub mod loopdev;
|
||||||
pub mod fuse_loop;
|
pub mod fuse_loop;
|
||||||
|
pub mod socket;
|
||||||
|
pub mod subscription;
|
||||||
|
pub mod zip;
|
||||||
|
pub mod http;
|
||||||
|
|
||||||
mod parallel_handler;
|
mod parallel_handler;
|
||||||
pub use parallel_handler::*;
|
pub use parallel_handler::*;
|
||||||
@ -42,6 +46,10 @@ pub use parallel_handler::*;
|
|||||||
mod wrapped_reader_stream;
|
mod wrapped_reader_stream;
|
||||||
pub use wrapped_reader_stream::*;
|
pub use wrapped_reader_stream::*;
|
||||||
|
|
||||||
|
mod async_channel_writer;
|
||||||
|
pub use async_channel_writer::*;
|
||||||
|
|
||||||
|
|
||||||
mod std_channel_writer;
|
mod std_channel_writer;
|
||||||
pub use std_channel_writer::*;
|
pub use std_channel_writer::*;
|
||||||
|
|
||||||
@ -317,10 +325,12 @@ pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
|
|||||||
pub fn get_hardware_address() -> Result<String, Error> {
|
pub fn get_hardware_address() -> Result<String, Error> {
|
||||||
static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
|
static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
|
||||||
|
|
||||||
let contents = proxmox::tools::fs::file_get_contents(FILENAME)?;
|
let contents = proxmox::tools::fs::file_get_contents(FILENAME)
|
||||||
let digest = md5sum(&contents)?;
|
.map_err(|e| format_err!("Error getting host key - {}", e))?;
|
||||||
|
let digest = md5sum(&contents)
|
||||||
|
.map_err(|e| format_err!("Error digesting host key - {}", e))?;
|
||||||
|
|
||||||
Ok(proxmox::tools::bin_to_hex(&digest))
|
Ok(proxmox::tools::bin_to_hex(&digest).to_uppercase())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
|
pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
|
||||||
|
@ -43,8 +43,8 @@ pub const ACL_NEXT_ENTRY: c_int = 1;
|
|||||||
|
|
||||||
// acl to extended attribute names constants
|
// acl to extended attribute names constants
|
||||||
// from: acl/include/acl_ea.h
|
// from: acl/include/acl_ea.h
|
||||||
pub const ACL_EA_ACCESS: &'static str = "system.posix_acl_access";
|
pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
|
||||||
pub const ACL_EA_DEFAULT: &'static str = "system.posix_acl_default";
|
pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
|
||||||
pub const ACL_EA_VERSION: u32 = 0x0002;
|
pub const ACL_EA_VERSION: u32 = 0x0002;
|
||||||
|
|
||||||
#[link(name = "acl")]
|
#[link(name = "acl")]
|
||||||
|
106
src/tools/async_channel_writer.rs
Normal file
106
src/tools/async_channel_writer.rs
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use anyhow::{Error, Result};
|
||||||
|
use futures::{future::FutureExt, ready};
|
||||||
|
use tokio::io::AsyncWrite;
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
|
|
||||||
|
use proxmox::io_format_err;
|
||||||
|
use proxmox::tools::byte_buffer::ByteBuffer;
|
||||||
|
use proxmox::sys::error::io_err_other;
|
||||||
|
|
||||||
|
/// Wrapper around tokio::sync::mpsc::Sender, which implements Write
|
||||||
|
pub struct AsyncChannelWriter {
|
||||||
|
sender: Option<Sender<Result<Vec<u8>, Error>>>,
|
||||||
|
buf: ByteBuffer,
|
||||||
|
state: WriterState,
|
||||||
|
}
|
||||||
|
|
||||||
|
type SendResult = io::Result<Sender<Result<Vec<u8>>>>;
|
||||||
|
|
||||||
|
enum WriterState {
|
||||||
|
Ready,
|
||||||
|
Sending(Pin<Box<dyn Future<Output = SendResult> + Send + 'static>>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncChannelWriter {
|
||||||
|
pub fn new(sender: Sender<Result<Vec<u8>, Error>>, buf_size: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
sender: Some(sender),
|
||||||
|
buf: ByteBuffer::with_capacity(buf_size),
|
||||||
|
state: WriterState::Ready,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_write_impl(
|
||||||
|
&mut self,
|
||||||
|
cx: &mut Context,
|
||||||
|
buf: &[u8],
|
||||||
|
flush: bool,
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
loop {
|
||||||
|
match &mut self.state {
|
||||||
|
WriterState::Ready => {
|
||||||
|
if flush {
|
||||||
|
if self.buf.is_empty() {
|
||||||
|
return Poll::Ready(Ok(0));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let free_size = self.buf.free_size();
|
||||||
|
if free_size > buf.len() || self.buf.is_empty() {
|
||||||
|
let count = free_size.min(buf.len());
|
||||||
|
self.buf.get_free_mut_slice()[..count].copy_from_slice(&buf[..count]);
|
||||||
|
self.buf.add_size(count);
|
||||||
|
return Poll::Ready(Ok(count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut sender = match self.sender.take() {
|
||||||
|
Some(sender) => sender,
|
||||||
|
None => return Poll::Ready(Err(io_err_other("no sender"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let data = self.buf.remove_data(self.buf.len()).to_vec();
|
||||||
|
let future = async move {
|
||||||
|
sender
|
||||||
|
.send(Ok(data))
|
||||||
|
.await
|
||||||
|
.map(move |_| sender)
|
||||||
|
.map_err(|err| io_format_err!("could not send: {}", err))
|
||||||
|
};
|
||||||
|
|
||||||
|
self.state = WriterState::Sending(future.boxed());
|
||||||
|
}
|
||||||
|
WriterState::Sending(ref mut future) => match ready!(future.as_mut().poll(cx)) {
|
||||||
|
Ok(sender) => {
|
||||||
|
self.sender = Some(sender);
|
||||||
|
self.state = WriterState::Ready;
|
||||||
|
}
|
||||||
|
Err(err) => return Poll::Ready(Err(err)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWrite for AsyncChannelWriter {
|
||||||
|
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
|
||||||
|
let this = self.get_mut();
|
||||||
|
this.poll_write_impl(cx, buf, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||||
|
let this = self.get_mut();
|
||||||
|
match ready!(this.poll_write_impl(cx, &[], true)) {
|
||||||
|
Ok(_) => Poll::Ready(Ok(())),
|
||||||
|
Err(err) => Poll::Ready(Err(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||||
|
self.poll_flush(cx)
|
||||||
|
}
|
||||||
|
}
|
@ -101,10 +101,10 @@ impl Reloader {
|
|||||||
|
|
||||||
// Start ourselves in the background:
|
// Start ourselves in the background:
|
||||||
use nix::unistd::{fork, ForkResult};
|
use nix::unistd::{fork, ForkResult};
|
||||||
match fork() {
|
match unsafe { fork() } {
|
||||||
Ok(ForkResult::Child) => {
|
Ok(ForkResult::Child) => {
|
||||||
// Double fork so systemd can supervise us without nagging...
|
// Double fork so systemd can supervise us without nagging...
|
||||||
match fork() {
|
match unsafe { fork() } {
|
||||||
Ok(ForkResult::Child) => {
|
Ok(ForkResult::Child) => {
|
||||||
std::mem::drop(pold);
|
std::mem::drop(pold);
|
||||||
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Log messages with timestamps into files
|
/// Log messages with optional automatically added timestamps into files
|
||||||
///
|
///
|
||||||
/// Logs messages to file, and optionally to standard output.
|
/// Logs messages to file, and optionally to standard output.
|
||||||
///
|
///
|
||||||
@ -10,18 +10,44 @@ use std::io::Write;
|
|||||||
/// ```
|
/// ```
|
||||||
/// #[macro_use] extern crate proxmox_backup;
|
/// #[macro_use] extern crate proxmox_backup;
|
||||||
/// # use anyhow::{bail, format_err, Error};
|
/// # use anyhow::{bail, format_err, Error};
|
||||||
/// use proxmox_backup::tools::FileLogger;
|
/// use proxmox_backup::tools::{FileLogger, FileLogOptions};
|
||||||
///
|
///
|
||||||
/// # std::fs::remove_file("test.log");
|
/// # std::fs::remove_file("test.log");
|
||||||
/// let mut log = FileLogger::new("test.log", true).unwrap();
|
/// let options = FileLogOptions {
|
||||||
|
/// to_stdout: true,
|
||||||
|
/// exclusive: true,
|
||||||
|
/// ..Default::default()
|
||||||
|
/// };
|
||||||
|
/// let mut log = FileLogger::new("test.log", options).unwrap();
|
||||||
/// flog!(log, "A simple log: {}", "Hello!");
|
/// flog!(log, "A simple log: {}", "Hello!");
|
||||||
/// ```
|
/// ```
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
/// Options to control the behavior of a ['FileLogger'] instance
|
||||||
|
pub struct FileLogOptions {
|
||||||
|
/// Open underlying log file in append mode, useful when multiple concurrent processes
|
||||||
|
/// want to log to the same file (e.g., HTTP access log). Note that it is only atomic
|
||||||
|
/// for writes smaller than the PIPE_BUF (4k on Linux).
|
||||||
|
/// Inside the same process you may need to still use an mutex, for shared access.
|
||||||
|
pub append: bool,
|
||||||
|
/// Open underlying log file as readable
|
||||||
|
pub read: bool,
|
||||||
|
/// If set, ensure that the file is newly created or error out if already existing.
|
||||||
|
pub exclusive: bool,
|
||||||
|
/// Duplicate logged messages to STDOUT, like tee
|
||||||
|
pub to_stdout: bool,
|
||||||
|
/// Prefix messages logged to the file with the current local time as RFC 3339
|
||||||
|
pub prefix_time: bool,
|
||||||
|
/// if set, the file is tried to be chowned by the backup:backup user/group
|
||||||
|
/// Note, this is not designed race free as anybody could set it to another user afterwards
|
||||||
|
/// anyway. It must thus be used by all processes which doe not run as backup uid/gid.
|
||||||
|
pub owned_by_backup: bool,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct FileLogger {
|
pub struct FileLogger {
|
||||||
file: std::fs::File,
|
file: std::fs::File,
|
||||||
to_stdout: bool,
|
options: FileLogOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Log messages to [FileLogger](tools/struct.FileLogger.html)
|
/// Log messages to [FileLogger](tools/struct.FileLogger.html)
|
||||||
@ -33,24 +59,31 @@ macro_rules! flog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FileLogger {
|
impl FileLogger {
|
||||||
|
pub fn new<P: AsRef<std::path::Path>>(
|
||||||
pub fn new<P: AsRef<std::path::Path>>(file_name: P, to_stdout: bool) -> Result<Self, Error> {
|
file_name: P,
|
||||||
|
options: FileLogOptions,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
let file = std::fs::OpenOptions::new()
|
let file = std::fs::OpenOptions::new()
|
||||||
.read(true)
|
.read(options.read)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.append(options.append)
|
||||||
.open(file_name)?;
|
.create_new(options.exclusive)
|
||||||
|
.create(!options.exclusive)
|
||||||
|
.open(&file_name)?;
|
||||||
|
|
||||||
Ok(Self { file , to_stdout })
|
if options.owned_by_backup {
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
nix::unistd::chown(file_name.as_ref(), Some(backup_user.uid), Some(backup_user.gid))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { file, options })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn log<S: AsRef<str>>(&mut self, msg: S) {
|
pub fn log<S: AsRef<str>>(&mut self, msg: S) {
|
||||||
|
|
||||||
let msg = msg.as_ref();
|
let msg = msg.as_ref();
|
||||||
|
|
||||||
let mut stdout = std::io::stdout();
|
if self.options.to_stdout {
|
||||||
if self.to_stdout {
|
let mut stdout = std::io::stdout();
|
||||||
stdout.write_all(msg.as_bytes()).unwrap();
|
stdout.write_all(msg.as_bytes()).unwrap();
|
||||||
stdout.write_all(b"\n").unwrap();
|
stdout.write_all(b"\n").unwrap();
|
||||||
}
|
}
|
||||||
@ -58,19 +91,27 @@ impl FileLogger {
|
|||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
let rfc3339 = proxmox::tools::time::epoch_to_rfc3339(now).unwrap();
|
let rfc3339 = proxmox::tools::time::epoch_to_rfc3339(now).unwrap();
|
||||||
|
|
||||||
let line = format!("{}: {}\n", rfc3339, msg);
|
let line = if self.options.prefix_time {
|
||||||
|
format!("{}: {}\n", rfc3339, msg)
|
||||||
|
} else {
|
||||||
|
format!("{}\n", msg)
|
||||||
|
};
|
||||||
self.file.write_all(line.as_bytes()).unwrap();
|
self.file.write_all(line.as_bytes()).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::io::Write for FileLogger {
|
impl std::io::Write for FileLogger {
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||||
if self.to_stdout { let _ = std::io::stdout().write(buf); }
|
if self.options.to_stdout {
|
||||||
|
let _ = std::io::stdout().write(buf);
|
||||||
|
}
|
||||||
self.file.write(buf)
|
self.file.write(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||||
if self.to_stdout { let _ = std::io::stdout().flush(); }
|
if self.options.to_stdout {
|
||||||
|
let _ = std::io::stdout().flush();
|
||||||
|
}
|
||||||
self.file.flush()
|
self.file.flush()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,6 +50,19 @@ pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<S
|
|||||||
Ok((if value { "1" } else { "0" }).to_string())
|
Ok((if value { "1" } else { "0" }).to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result<String, Error> {
|
||||||
|
if value.is_null() { return Ok(String::new()); }
|
||||||
|
let text = match value.as_u64() {
|
||||||
|
Some(bytes) => {
|
||||||
|
HumanByte::from(bytes).to_string()
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(text)
|
||||||
|
}
|
||||||
|
|
||||||
pub struct HumanByte {
|
pub struct HumanByte {
|
||||||
b: usize,
|
b: usize,
|
||||||
}
|
}
|
||||||
|
@ -265,11 +265,31 @@ impl Default for FSXAttr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to acquire a shared flock on the given path, 'what' and
|
||||||
|
/// 'would_block_message' are used for error formatting.
|
||||||
|
pub fn lock_dir_noblock_shared(
|
||||||
|
path: &std::path::Path,
|
||||||
|
what: &str,
|
||||||
|
would_block_msg: &str,
|
||||||
|
) -> Result<DirLockGuard, Error> {
|
||||||
|
do_lock_dir_noblock(path, what, would_block_msg, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to acquire an exclusive flock on the given path, 'what' and
|
||||||
|
/// 'would_block_message' are used for error formatting.
|
||||||
pub fn lock_dir_noblock(
|
pub fn lock_dir_noblock(
|
||||||
path: &std::path::Path,
|
path: &std::path::Path,
|
||||||
what: &str,
|
what: &str,
|
||||||
would_block_msg: &str,
|
would_block_msg: &str,
|
||||||
|
) -> Result<DirLockGuard, Error> {
|
||||||
|
do_lock_dir_noblock(path, what, would_block_msg, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_lock_dir_noblock(
|
||||||
|
path: &std::path::Path,
|
||||||
|
what: &str,
|
||||||
|
would_block_msg: &str,
|
||||||
|
exclusive: bool,
|
||||||
) -> Result<DirLockGuard, Error> {
|
) -> Result<DirLockGuard, Error> {
|
||||||
let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
|
let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
@ -278,7 +298,7 @@ pub fn lock_dir_noblock(
|
|||||||
|
|
||||||
// acquire in non-blocking mode, no point in waiting here since other
|
// acquire in non-blocking mode, no point in waiting here since other
|
||||||
// backups could still take a very long time
|
// backups could still take a very long time
|
||||||
proxmox::tools::fs::lock_file(&mut handle, true, Some(std::time::Duration::from_nanos(0)))
|
proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0)))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
format_err!(
|
format_err!(
|
||||||
"unable to acquire lock on {} directory {:?} - {}", what, path,
|
"unable to acquire lock on {} directory {:?} - {}", what, path,
|
||||||
|
@ -21,7 +21,7 @@ use proxmox_fuse::{*, requests::FuseRequest};
|
|||||||
use super::loopdev;
|
use super::loopdev;
|
||||||
use super::fs;
|
use super::fs;
|
||||||
|
|
||||||
const RUN_DIR: &'static str = "/run/pbs-loopdev";
|
const RUN_DIR: &str = "/run/pbs-loopdev";
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
pub LOOPDEV_REGEX = r"^loop\d+$";
|
pub LOOPDEV_REGEX = r"^loop\d+$";
|
||||||
|
130
src/tools/http.rs
Normal file
130
src/tools/http.rs
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
use anyhow::{Error, format_err, bail};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
|
use hyper::{Uri, Body};
|
||||||
|
use hyper::client::{Client, HttpConnector};
|
||||||
|
use http::{Request, Response};
|
||||||
|
use openssl::ssl::{SslConnector, SslMethod};
|
||||||
|
use futures::*;
|
||||||
|
|
||||||
|
use crate::tools::{
|
||||||
|
async_io::EitherStream,
|
||||||
|
socket::{
|
||||||
|
set_tcp_keepalive,
|
||||||
|
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref HTTP_CLIENT: Client<HttpsConnector, Body> = {
|
||||||
|
let connector = SslConnector::builder(SslMethod::tls()).unwrap().build();
|
||||||
|
let httpc = HttpConnector::new();
|
||||||
|
let https = HttpsConnector::with_connector(httpc, connector);
|
||||||
|
Client::builder().build(https)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_string(uri: &str) -> Result<String, Error> {
|
||||||
|
let res = HTTP_CLIENT.get(uri.parse()?).await?;
|
||||||
|
|
||||||
|
let status = res.status();
|
||||||
|
if !status.is_success() {
|
||||||
|
bail!("Got bad status '{}' from server", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
response_body_string(res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn response_body_string(res: Response<Body>) -> Result<String, Error> {
|
||||||
|
let buf = hyper::body::to_bytes(res).await?;
|
||||||
|
String::from_utf8(buf.to_vec())
|
||||||
|
.map_err(|err| format_err!("Error converting HTTP result data: {}", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn post(
|
||||||
|
uri: &str,
|
||||||
|
body: Option<String>,
|
||||||
|
content_type: Option<&str>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let body = if let Some(body) = body {
|
||||||
|
Body::from(body)
|
||||||
|
} else {
|
||||||
|
Body::empty()
|
||||||
|
};
|
||||||
|
let content_type = content_type.unwrap_or("application/json");
|
||||||
|
|
||||||
|
let request = Request::builder()
|
||||||
|
.method("POST")
|
||||||
|
.uri(uri)
|
||||||
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||||
|
.header(hyper::header::CONTENT_TYPE, content_type)
|
||||||
|
.body(body)?;
|
||||||
|
|
||||||
|
|
||||||
|
HTTP_CLIENT.request(request)
|
||||||
|
.map_err(Error::from)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HttpsConnector {
|
||||||
|
http: HttpConnector,
|
||||||
|
ssl_connector: std::sync::Arc<SslConnector>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HttpsConnector {
|
||||||
|
pub fn with_connector(mut http: HttpConnector, ssl_connector: SslConnector) -> Self {
|
||||||
|
http.enforce_http(false);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
http,
|
||||||
|
ssl_connector: std::sync::Arc::new(ssl_connector),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MaybeTlsStream = EitherStream<
|
||||||
|
tokio::net::TcpStream,
|
||||||
|
tokio_openssl::SslStream<tokio::net::TcpStream>,
|
||||||
|
>;
|
||||||
|
|
||||||
|
impl hyper::service::Service<Uri> for HttpsConnector {
|
||||||
|
type Response = MaybeTlsStream;
|
||||||
|
type Error = Error;
|
||||||
|
type Future = std::pin::Pin<Box<
|
||||||
|
dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static
|
||||||
|
>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
// This connector is always ready, but others might not be.
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, dst: Uri) -> Self::Future {
|
||||||
|
let mut this = self.clone();
|
||||||
|
async move {
|
||||||
|
let is_https = dst
|
||||||
|
.scheme()
|
||||||
|
.ok_or_else(|| format_err!("missing URL scheme"))?
|
||||||
|
== "https";
|
||||||
|
let host = dst
|
||||||
|
.host()
|
||||||
|
.ok_or_else(|| format_err!("missing hostname in destination url?"))?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let config = this.ssl_connector.configure();
|
||||||
|
let conn = this.http.call(dst).await?;
|
||||||
|
|
||||||
|
let _ = set_tcp_keepalive(conn.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||||
|
|
||||||
|
if is_https {
|
||||||
|
let conn = tokio_openssl::connect(config?, &host, conn).await?;
|
||||||
|
Ok(MaybeTlsStream::Right(conn))
|
||||||
|
} else {
|
||||||
|
Ok(MaybeTlsStream::Left(conn))
|
||||||
|
}
|
||||||
|
}.boxed()
|
||||||
|
}
|
||||||
|
}
|
@ -6,7 +6,7 @@ use std::io::Read;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use nix::unistd;
|
use nix::unistd;
|
||||||
|
|
||||||
use proxmox::tools::fs::{CreateOptions, make_tmp_file, replace_file};
|
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
|
||||||
|
|
||||||
/// Used for rotating log files and iterating over them
|
/// Used for rotating log files and iterating over them
|
||||||
pub struct LogRotate {
|
pub struct LogRotate {
|
||||||
@ -46,73 +46,74 @@ impl LogRotate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
|
||||||
|
let mut source = File::open(source_path)?;
|
||||||
|
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
|
||||||
|
let target = unsafe { File::from_raw_fd(fd) };
|
||||||
|
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
||||||
|
Ok(encoder) => encoder,
|
||||||
|
Err(err) => {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("creating zstd encoder failed - {}", err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = encoder.finish() {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("zstd finish failed for file {:?} - {}", target_path, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = rename(&tmp_path, target_path) {
|
||||||
|
let _ = unistd::unlink(&tmp_path);
|
||||||
|
bail!("rename failed for file {:?} - {}", target_path, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = unistd::unlink(source_path) {
|
||||||
|
bail!("unlink failed for file {:?} - {}", source_path, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Rotates the files up to 'max_files'
|
/// Rotates the files up to 'max_files'
|
||||||
/// if the 'compress' option was given it will compress the newest file
|
/// if the 'compress' option was given it will compress the newest file
|
||||||
///
|
///
|
||||||
/// e.g. rotates
|
/// e.g. rotates
|
||||||
/// foo.2.zst => foo.3.zst
|
/// foo.2.zst => foo.3.zst
|
||||||
/// foo.1.zst => foo.2.zst
|
/// foo.1 => foo.2.zst
|
||||||
/// foo => foo.1.zst
|
/// foo => foo.1
|
||||||
/// => foo
|
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
||||||
pub fn rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
|
||||||
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
||||||
if filenames.is_empty() {
|
if filenames.is_empty() {
|
||||||
return Ok(()); // no file means nothing to rotate
|
return Ok(()); // no file means nothing to rotate
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
||||||
|
next_filename.push(format!(".{}", filenames.len()));
|
||||||
if self.compress {
|
|
||||||
next_filename.push(format!(".{}.zst", filenames.len()));
|
|
||||||
} else {
|
|
||||||
next_filename.push(format!(".{}", filenames.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
filenames.push(PathBuf::from(next_filename));
|
filenames.push(PathBuf::from(next_filename));
|
||||||
let count = filenames.len();
|
let count = filenames.len();
|
||||||
|
|
||||||
// rotate all but the first, that we maybe have to compress
|
for i in (0..count-1).rev() {
|
||||||
for i in (1..count-1).rev() {
|
|
||||||
rename(&filenames[i], &filenames[i+1])?;
|
rename(&filenames[i], &filenames[i+1])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.compress {
|
if self.compress {
|
||||||
let mut source = File::open(&filenames[0])?;
|
for i in 2..count {
|
||||||
let (fd, tmp_path) = make_tmp_file(&filenames[1], options.clone())?;
|
if filenames[i].extension().unwrap_or(std::ffi::OsStr::new("")) != "zst" {
|
||||||
let target = unsafe { File::from_raw_fd(fd) };
|
let mut target = filenames[i].clone().into_os_string();
|
||||||
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
target.push(".zstd");
|
||||||
Ok(encoder) => encoder,
|
Self::compress(&filenames[i], &target.into(), &options)?;
|
||||||
Err(err) => {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("creating zstd encoder failed - {}", err);
|
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("zstd encoding failed for file {:?} - {}", &filenames[1], err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = encoder.finish() {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("zstd finish failed for file {:?} - {}", &filenames[1], err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = rename(&tmp_path, &filenames[1]) {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("rename failed for file {:?} - {}", &filenames[1], err);
|
|
||||||
}
|
|
||||||
|
|
||||||
unistd::unlink(&filenames[0])?;
|
|
||||||
} else {
|
|
||||||
rename(&filenames[0], &filenames[1])?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// create empty original file
|
|
||||||
replace_file(&filenames[0], b"", options)?;
|
|
||||||
|
|
||||||
if let Some(max_files) = max_files {
|
if let Some(max_files) = max_files {
|
||||||
// delete all files > max_files
|
|
||||||
for file in filenames.iter().skip(max_files) {
|
for file in filenames.iter().skip(max_files) {
|
||||||
if let Err(err) = unistd::unlink(file) {
|
if let Err(err) = unistd::unlink(file) {
|
||||||
eprintln!("could not remove {:?}: {}", &file, err);
|
eprintln!("could not remove {:?}: {}", &file, err);
|
||||||
@ -122,6 +123,35 @@ impl LogRotate {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn rotate(
|
||||||
|
&mut self,
|
||||||
|
max_size: u64,
|
||||||
|
options: Option<CreateOptions>,
|
||||||
|
max_files: Option<usize>
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let options = match options {
|
||||||
|
Some(options) => options,
|
||||||
|
None => {
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
CreateOptions::new().owner(backup_user.uid).group(backup_user.gid)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let metadata = match self.base_path.metadata() {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
||||||
|
Err(err) => bail!("unable to open task archive - {}", err),
|
||||||
|
};
|
||||||
|
|
||||||
|
if metadata.len() > max_size {
|
||||||
|
self.do_rotate(options, max_files)?;
|
||||||
|
Ok(true)
|
||||||
|
} else {
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterator over logrotated file names
|
/// Iterator over logrotated file names
|
||||||
|
23
src/tools/socket.rs
Normal file
23
src/tools/socket.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use std::os::unix::io::RawFd;
|
||||||
|
|
||||||
|
use nix::sys::socket::sockopt::{KeepAlive, TcpKeepIdle};
|
||||||
|
use nix::sys::socket::setsockopt;
|
||||||
|
|
||||||
|
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||||
|
|
||||||
|
/// Set TCP keepalive time on a socket
|
||||||
|
///
|
||||||
|
/// See "man 7 tcp" for details.
|
||||||
|
///
|
||||||
|
/// The default on Linux is 7200 (2 hours) which is far too long for
|
||||||
|
/// our backup tools.
|
||||||
|
pub fn set_tcp_keepalive(
|
||||||
|
socket_fd: RawFd,
|
||||||
|
tcp_keepalive_time: u32,
|
||||||
|
) -> nix::Result<()> {
|
||||||
|
|
||||||
|
setsockopt(socket_fd, KeepAlive, &true)?;
|
||||||
|
setsockopt(socket_fd, TcpKeepIdle, &tcp_keepalive_time)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
325
src/tools/subscription.rs
Normal file
325
src/tools/subscription.rs
Normal file
@ -0,0 +1,325 @@
|
|||||||
|
use anyhow::{Error, format_err, bail};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use serde_json::json;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use crate::tools;
|
||||||
|
use crate::tools::http;
|
||||||
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
|
|
||||||
|
/// How long the local key is valid for in between remote checks
|
||||||
|
pub const MAX_LOCAL_KEY_AGE: i64 = 15 * 24 * 3600;
|
||||||
|
const MAX_KEY_CHECK_FAILURE_AGE: i64 = 5 * 24 * 3600;
|
||||||
|
|
||||||
|
const SHARED_KEY_DATA: &str = "kjfdlskfhiuewhfk947368";
|
||||||
|
const SUBSCRIPTION_FN: &str = "/etc/proxmox-backup/subscription";
|
||||||
|
const APT_AUTH_FN: &str = "/etc/apt/auth.conf.d/pbs.conf";
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Subscription status
|
||||||
|
pub enum SubscriptionStatus {
|
||||||
|
// FIXME: remove?
|
||||||
|
/// newly set subscription, not yet checked
|
||||||
|
NEW,
|
||||||
|
/// no subscription set
|
||||||
|
NOTFOUND,
|
||||||
|
/// subscription set and active
|
||||||
|
ACTIVE,
|
||||||
|
/// subscription set but invalid for this server
|
||||||
|
INVALID,
|
||||||
|
}
|
||||||
|
impl Default for SubscriptionStatus {
|
||||||
|
fn default() -> Self { SubscriptionStatus::NOTFOUND }
|
||||||
|
}
|
||||||
|
impl std::fmt::Display for SubscriptionStatus {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
SubscriptionStatus::NEW => write!(f, "New"),
|
||||||
|
SubscriptionStatus::NOTFOUND => write!(f, "NotFound"),
|
||||||
|
SubscriptionStatus::ACTIVE => write!(f, "Active"),
|
||||||
|
SubscriptionStatus::INVALID => write!(f, "Invalid"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
status: {
|
||||||
|
type: SubscriptionStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Proxmox subscription information
|
||||||
|
pub struct SubscriptionInfo {
|
||||||
|
/// Subscription status from the last check
|
||||||
|
pub status: SubscriptionStatus,
|
||||||
|
/// the server ID, if permitted to access
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub serverid: Option<String>,
|
||||||
|
/// timestamp of the last check done
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub checktime: Option<i64>,
|
||||||
|
/// the subscription key, if set and permitted to access
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub key: Option<String>,
|
||||||
|
/// a more human readable status message
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub message: Option<String>,
|
||||||
|
/// human readable productname of the set subscription
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub productname: Option<String>,
|
||||||
|
/// register date of the set subscription
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub regdate: Option<String>,
|
||||||
|
/// next due date of the set subscription
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub nextduedate: Option<String>,
|
||||||
|
/// URL to the web shop
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub url: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn register_subscription(
|
||||||
|
key: &String,
|
||||||
|
server_id: &String,
|
||||||
|
checktime: i64
|
||||||
|
) -> Result<(String, String), Error> {
|
||||||
|
// WHCMS sample code feeds the key into this, but it's just a challenge, so keep it simple
|
||||||
|
let rand = proxmox::tools::bin_to_hex(&proxmox::sys::linux::random_data(16)?);
|
||||||
|
let challenge = format!("{}{}", checktime, rand);
|
||||||
|
|
||||||
|
let params = json!({
|
||||||
|
"licensekey": key,
|
||||||
|
"dir": server_id,
|
||||||
|
"domain": "www.proxmox.com",
|
||||||
|
"ip": "localhost",
|
||||||
|
"check_token": challenge,
|
||||||
|
});
|
||||||
|
let uri = "https://shop.maurer-it.com/modules/servers/licensing/verify.php";
|
||||||
|
let query = tools::json_object_to_query(params)?;
|
||||||
|
let response = http::post(uri, Some(query), Some("application/x-www-form-urlencoded")).await?;
|
||||||
|
let body = http::response_body_string(response).await?;
|
||||||
|
|
||||||
|
Ok((body, challenge))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_status(value: &str) -> SubscriptionStatus {
|
||||||
|
match value.to_lowercase().as_str() {
|
||||||
|
"active" => SubscriptionStatus::ACTIVE,
|
||||||
|
"new" => SubscriptionStatus::NEW,
|
||||||
|
"notfound" => SubscriptionStatus::NOTFOUND,
|
||||||
|
"invalid" => SubscriptionStatus::INVALID,
|
||||||
|
_ => SubscriptionStatus::INVALID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_register_response(
|
||||||
|
body: &str,
|
||||||
|
key: String,
|
||||||
|
server_id: String,
|
||||||
|
checktime: i64,
|
||||||
|
challenge: &str,
|
||||||
|
) -> Result<SubscriptionInfo, Error> {
|
||||||
|
lazy_static! {
|
||||||
|
static ref ATTR_RE: Regex = Regex::new(r"<([^>]+)>([^<]+)</[^>]+>").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut info = SubscriptionInfo {
|
||||||
|
key: Some(key),
|
||||||
|
status: SubscriptionStatus::NOTFOUND,
|
||||||
|
checktime: Some(checktime),
|
||||||
|
url: Some("https://www.proxmox.com/en/proxmox-backup-server/pricing".into()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let mut md5hash = String::new();
|
||||||
|
let is_server_id = |id: &&str| *id == server_id;
|
||||||
|
|
||||||
|
for caps in ATTR_RE.captures_iter(body) {
|
||||||
|
let (key, value) = (&caps[1], &caps[2]);
|
||||||
|
match key {
|
||||||
|
"status" => info.status = parse_status(value),
|
||||||
|
"productname" => info.productname = Some(value.into()),
|
||||||
|
"regdate" => info.regdate = Some(value.into()),
|
||||||
|
"nextduedate" => info.nextduedate = Some(value.into()),
|
||||||
|
"message" if value == "Directory Invalid" =>
|
||||||
|
info.message = Some("Invalid Server ID".into()),
|
||||||
|
"message" => info.message = Some(value.into()),
|
||||||
|
"validdirectory" => {
|
||||||
|
if value.split(",").find(is_server_id) == None {
|
||||||
|
bail!("Server ID does not match");
|
||||||
|
}
|
||||||
|
info.serverid = Some(server_id.to_owned());
|
||||||
|
},
|
||||||
|
"md5hash" => md5hash = value.to_owned(),
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let SubscriptionStatus::ACTIVE = info.status {
|
||||||
|
let response_raw = format!("{}{}", SHARED_KEY_DATA, challenge);
|
||||||
|
let expected = proxmox::tools::bin_to_hex(&tools::md5sum(response_raw.as_bytes())?);
|
||||||
|
if expected != md5hash {
|
||||||
|
bail!("Subscription API challenge failed, expected {} != got {}", expected, md5hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_register_response() -> Result<(), Error> {
|
||||||
|
let response = r#"
|
||||||
|
<status>Active</status>
|
||||||
|
<companyname>Proxmox</companyname>
|
||||||
|
<serviceid>41108</serviceid>
|
||||||
|
<productid>71</productid>
|
||||||
|
<productname>Proxmox Backup Server Test Subscription -1 year</productname>
|
||||||
|
<regdate>2020-09-19 00:00:00</regdate>
|
||||||
|
<nextduedate>2021-09-19</nextduedate>
|
||||||
|
<billingcycle>Annually</billingcycle>
|
||||||
|
<validdomain>proxmox.com,www.proxmox.com</validdomain>
|
||||||
|
<validdirectory>830000000123456789ABCDEF00000042</validdirectory>
|
||||||
|
<customfields>Notes=Test Key!</customfields>
|
||||||
|
<addons></addons>
|
||||||
|
<md5hash>969f4df84fe157ee4f5a2f71950ad154</md5hash>
|
||||||
|
"#;
|
||||||
|
let key = "pbst-123456789a".to_string();
|
||||||
|
let server_id = "830000000123456789ABCDEF00000042".to_string();
|
||||||
|
let checktime = 1600000000;
|
||||||
|
let salt = "cf44486bddb6ad0145732642c45b2957";
|
||||||
|
|
||||||
|
let info = parse_register_response(response, key.to_owned(), server_id.to_owned(), checktime, salt)?;
|
||||||
|
|
||||||
|
assert_eq!(info, SubscriptionInfo {
|
||||||
|
key: Some(key),
|
||||||
|
serverid: Some(server_id),
|
||||||
|
status: SubscriptionStatus::ACTIVE,
|
||||||
|
checktime: Some(checktime),
|
||||||
|
url: Some("https://www.proxmox.com/en/proxmox-backup-server/pricing".into()),
|
||||||
|
message: None,
|
||||||
|
nextduedate: Some("2021-09-19".into()),
|
||||||
|
regdate: Some("2020-09-19 00:00:00".into()),
|
||||||
|
productname: Some("Proxmox Backup Server Test Subscription -1 year".into()),
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// querys the up to date subscription status and parses the response
|
||||||
|
pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
|
||||||
|
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
|
let (response, challenge) = tools::runtime::block_on(register_subscription(&key, &server_id, now))
|
||||||
|
.map_err(|err| format_err!("Error checking subscription: {}", err))?;
|
||||||
|
|
||||||
|
parse_register_response(&response, key, server_id, now, &challenge)
|
||||||
|
.map_err(|err| format_err!("Error parsing subscription check response: {}", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// reads in subscription information and does a basic integrity verification
|
||||||
|
pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
|
||||||
|
|
||||||
|
let cfg = proxmox::tools::fs::file_read_optional_string(&SUBSCRIPTION_FN)?;
|
||||||
|
let cfg = if let Some(cfg) = cfg { cfg } else { return Ok(None); };
|
||||||
|
|
||||||
|
let mut cfg = cfg.lines();
|
||||||
|
|
||||||
|
// first line is key in plain
|
||||||
|
let _key = if let Some(key) = cfg.next() { key } else { return Ok(None) };
|
||||||
|
// second line is checksum of encoded data
|
||||||
|
let checksum = if let Some(csum) = cfg.next() { csum } else { return Ok(None) };
|
||||||
|
|
||||||
|
let encoded: String = cfg.collect::<String>();
|
||||||
|
let decoded = base64::decode(encoded.to_owned())?;
|
||||||
|
let decoded = std::str::from_utf8(&decoded)?;
|
||||||
|
|
||||||
|
let info: SubscriptionInfo = serde_json::from_str(decoded)?;
|
||||||
|
|
||||||
|
let new_checksum = format!("{}{}{}", info.checktime.unwrap_or(0), encoded, SHARED_KEY_DATA);
|
||||||
|
let new_checksum = base64::encode(tools::md5sum(new_checksum.as_bytes())?);
|
||||||
|
|
||||||
|
if checksum != new_checksum {
|
||||||
|
bail!("stored checksum doesn't matches computed one '{}' != '{}'", checksum, new_checksum);
|
||||||
|
}
|
||||||
|
|
||||||
|
let age = proxmox::tools::time::epoch_i64() - info.checktime.unwrap_or(0);
|
||||||
|
if age < -5400 { // allow some delta for DST changes or time syncs, 1.5h
|
||||||
|
bail!("Last check time to far in the future.");
|
||||||
|
} else if age > MAX_LOCAL_KEY_AGE + MAX_KEY_CHECK_FAILURE_AGE {
|
||||||
|
if let SubscriptionStatus::ACTIVE = info.status {
|
||||||
|
bail!("subscription information too old");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(info))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// writes out subscription status
|
||||||
|
pub fn write_subscription(info: SubscriptionInfo) -> Result<(), Error> {
|
||||||
|
let key = info.key.to_owned();
|
||||||
|
let server_id = info.serverid.to_owned();
|
||||||
|
|
||||||
|
let raw = if info.key == None || info.checktime == None {
|
||||||
|
String::new()
|
||||||
|
} else if let SubscriptionStatus::NEW = info.status {
|
||||||
|
format!("{}\n", info.key.unwrap())
|
||||||
|
} else {
|
||||||
|
let encoded = base64::encode(serde_json::to_string(&info)?);
|
||||||
|
let csum = format!("{}{}{}", info.checktime.unwrap_or(0), encoded, SHARED_KEY_DATA);
|
||||||
|
let csum = base64::encode(tools::md5sum(csum.as_bytes())?);
|
||||||
|
format!("{}\n{}\n{}\n", info.key.unwrap(), csum, encoded)
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||||
|
let file_opts = CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(backup_user.gid);
|
||||||
|
|
||||||
|
let subscription_file = std::path::Path::new(SUBSCRIPTION_FN);
|
||||||
|
replace_file(subscription_file, raw.as_bytes(), file_opts)?;
|
||||||
|
|
||||||
|
update_apt_auth(key, server_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deletes subscription from server
|
||||||
|
pub fn delete_subscription() -> Result<(), Error> {
|
||||||
|
let subscription_file = std::path::Path::new(SUBSCRIPTION_FN);
|
||||||
|
nix::unistd::unlink(subscription_file)?;
|
||||||
|
update_apt_auth(None, None)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// updates apt authenification for repo access
|
||||||
|
pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> {
|
||||||
|
let auth_conf = std::path::Path::new(APT_AUTH_FN);
|
||||||
|
match (key, password) {
|
||||||
|
(Some(key), Some(password)) => {
|
||||||
|
let conf = format!(
|
||||||
|
"machine enterprise.proxmox.com/debian/pbs\n login {}\n password {}\n",
|
||||||
|
key,
|
||||||
|
password,
|
||||||
|
);
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||||
|
let file_opts = CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT);
|
||||||
|
|
||||||
|
// we use a namespaced .conf file, so just overwrite..
|
||||||
|
replace_file(auth_conf, conf.as_bytes(), file_opts)
|
||||||
|
.map_err(|e| format_err!("Error saving apt auth config - {}", e))?;
|
||||||
|
}
|
||||||
|
_ => nix::unistd::unlink(auth_conf)
|
||||||
|
.map_err(|e| format_err!("Error clearing apt auth config - {}", e))?,
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -25,7 +25,12 @@ pub fn escape_unit(mut unit: &str, is_path: bool) -> String {
|
|||||||
escaped.push('-');
|
escaped.push('-');
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (i == 0 && *c == b'.') || !((*c >= b'0' && *c <= b'9') || (*c >= b'A' && *c <= b'Z') || (*c >= b'a' && *c <= b'z')) {
|
if (i == 0 && *c == b'.')
|
||||||
|
|| !(*c == b'_' ||
|
||||||
|
*c == b'.' ||
|
||||||
|
(*c >= b'0' && *c <= b'9') ||
|
||||||
|
(*c >= b'A' && *c <= b'Z') ||
|
||||||
|
(*c >= b'a' && *c <= b'z')) {
|
||||||
escaped.push_str(&format!("\\x{:0x}", c));
|
escaped.push_str(&format!("\\x{:0x}", c));
|
||||||
} else {
|
} else {
|
||||||
escaped.push(*c as char);
|
escaped.push(*c as char);
|
||||||
@ -125,3 +130,32 @@ pub fn stop_unit(unit: &str) -> Result<(), Error> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_escape_unit() -> Result<(), Error> {
|
||||||
|
|
||||||
|
fn test_escape(i: &str, expected: &str, is_path: bool) {
|
||||||
|
let escaped = escape_unit(i, is_path);
|
||||||
|
assert_eq!(escaped, expected);
|
||||||
|
let unescaped = unescape_unit(&escaped).unwrap();
|
||||||
|
if is_path {
|
||||||
|
let mut p = i.trim_matches('/');
|
||||||
|
if p.is_empty() { p = "/"; }
|
||||||
|
assert_eq!(p, unescaped);
|
||||||
|
} else {
|
||||||
|
assert_eq!(i, unescaped);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test_escape(".test", "\\x2etest", false);
|
||||||
|
test_escape("t.est", "t.est", false);
|
||||||
|
test_escape("_test_", "_test_", false);
|
||||||
|
|
||||||
|
test_escape("/", "-", false);
|
||||||
|
test_escape("//", "--", false);
|
||||||
|
|
||||||
|
test_escape("/", "-", true);
|
||||||
|
test_escape("//", "-", true);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
520
src/tools/zip.rs
Normal file
520
src/tools/zip.rs
Normal file
@ -0,0 +1,520 @@
|
|||||||
|
//! ZIP Helper
|
||||||
|
//!
|
||||||
|
//! Provides an interface to create a ZIP File from ZipEntries
|
||||||
|
//! for a more detailed description of the ZIP format, see:
|
||||||
|
//! https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||||
|
|
||||||
|
use std::convert::TryInto;
|
||||||
|
use std::ffi::OsString;
|
||||||
|
use std::io;
|
||||||
|
use std::mem::size_of;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::{Component, Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::{Error, Result};
|
||||||
|
use endian_trait::Endian;
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
|
|
||||||
|
use crc32fast::Hasher;
|
||||||
|
use proxmox::tools::time::gmtime;
|
||||||
|
use proxmox::tools::byte_buffer::ByteBuffer;
|
||||||
|
|
||||||
|
const LOCAL_FH_SIG: u32 = 0x04034B50;
|
||||||
|
const LOCAL_FF_SIG: u32 = 0x08074B50;
|
||||||
|
const CENTRAL_DIRECTORY_FH_SIG: u32 = 0x02014B50;
|
||||||
|
const END_OF_CENTRAL_DIR: u32 = 0x06054B50;
|
||||||
|
const VERSION_NEEDED: u16 = 0x002d;
|
||||||
|
const VERSION_MADE_BY: u16 = 0x032d;
|
||||||
|
|
||||||
|
const ZIP64_EOCD_RECORD: u32 = 0x06064B50;
|
||||||
|
const ZIP64_EOCD_LOCATOR: u32 = 0x07064B50;
|
||||||
|
|
||||||
|
// bits for time:
|
||||||
|
// 0-4: day of the month (1-31)
|
||||||
|
// 5-8: month: (1 = jan, etc.)
|
||||||
|
// 9-15: year offset from 1980
|
||||||
|
//
|
||||||
|
// bits for date:
|
||||||
|
// 0-4: second / 2
|
||||||
|
// 5-10: minute (0-59)
|
||||||
|
// 11-15: hour (0-23)
|
||||||
|
//
|
||||||
|
// see https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
|
||||||
|
fn epoch_to_dos(epoch: i64) -> (u16, u16) {
|
||||||
|
let gmtime = match gmtime(epoch) {
|
||||||
|
Ok(gmtime) => gmtime,
|
||||||
|
Err(_) => return (0, 0),
|
||||||
|
};
|
||||||
|
|
||||||
|
let seconds = (gmtime.tm_sec / 2) & 0b11111;
|
||||||
|
let minutes = gmtime.tm_min & 0xb111111;
|
||||||
|
let hours = gmtime.tm_hour & 0b11111;
|
||||||
|
let time: u16 = ((hours << 11) | (minutes << 5) | (seconds)) as u16;
|
||||||
|
|
||||||
|
let date: u16 = if gmtime.tm_year > (2108 - 1900) || gmtime.tm_year < (1980 - 1900) {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
let day = gmtime.tm_mday & 0b11111;
|
||||||
|
let month = (gmtime.tm_mon + 1) & 0b1111;
|
||||||
|
let year = (gmtime.tm_year + 1900 - 1980) & 0b1111111;
|
||||||
|
((year << 9) | (month << 5) | (day)) as u16
|
||||||
|
};
|
||||||
|
|
||||||
|
(date, time)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct Zip64Field {
|
||||||
|
field_type: u16,
|
||||||
|
field_size: u16,
|
||||||
|
uncompressed_size: u64,
|
||||||
|
compressed_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct Zip64FieldWithOffset {
|
||||||
|
field_type: u16,
|
||||||
|
field_size: u16,
|
||||||
|
uncompressed_size: u64,
|
||||||
|
compressed_size: u64,
|
||||||
|
offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct LocalFileHeader {
|
||||||
|
signature: u32,
|
||||||
|
version_needed: u16,
|
||||||
|
flags: u16,
|
||||||
|
compression: u16,
|
||||||
|
time: u16,
|
||||||
|
date: u16,
|
||||||
|
crc32: u32,
|
||||||
|
compressed_size: u32,
|
||||||
|
uncompressed_size: u32,
|
||||||
|
filename_len: u16,
|
||||||
|
extra_field_len: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct LocalFileFooter {
|
||||||
|
signature: u32,
|
||||||
|
crc32: u32,
|
||||||
|
compressed_size: u64,
|
||||||
|
uncompressed_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct CentralDirectoryFileHeader {
|
||||||
|
signature: u32,
|
||||||
|
version_made_by: u16,
|
||||||
|
version_needed: u16,
|
||||||
|
flags: u16,
|
||||||
|
compression: u16,
|
||||||
|
time: u16,
|
||||||
|
date: u16,
|
||||||
|
crc32: u32,
|
||||||
|
compressed_size: u32,
|
||||||
|
uncompressed_size: u32,
|
||||||
|
filename_len: u16,
|
||||||
|
extra_field_len: u16,
|
||||||
|
comment_len: u16,
|
||||||
|
start_disk: u16,
|
||||||
|
internal_flags: u16,
|
||||||
|
external_flags: u32,
|
||||||
|
offset: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct EndOfCentralDir {
|
||||||
|
signature: u32,
|
||||||
|
disk_number: u16,
|
||||||
|
start_disk: u16,
|
||||||
|
disk_record_count: u16,
|
||||||
|
total_record_count: u16,
|
||||||
|
directory_size: u32,
|
||||||
|
directory_offset: u32,
|
||||||
|
comment_len: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct Zip64EOCDRecord {
|
||||||
|
signature: u32,
|
||||||
|
field_size: u64,
|
||||||
|
version_made_by: u16,
|
||||||
|
version_needed: u16,
|
||||||
|
disk_number: u32,
|
||||||
|
disk_number_central_dir: u32,
|
||||||
|
disk_record_count: u64,
|
||||||
|
total_record_count: u64,
|
||||||
|
directory_size: u64,
|
||||||
|
directory_offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Endian)]
|
||||||
|
#[repr(C, packed)]
|
||||||
|
struct Zip64EOCDLocator {
|
||||||
|
signature: u32,
|
||||||
|
disk_number: u32,
|
||||||
|
offset: u64,
|
||||||
|
disk_count: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_struct<E, T>(output: &mut T, data: E) -> io::Result<()>
|
||||||
|
where
|
||||||
|
T: AsyncWrite + ?Sized + Unpin,
|
||||||
|
E: Endian,
|
||||||
|
{
|
||||||
|
let data = data.to_le();
|
||||||
|
|
||||||
|
let data = unsafe {
|
||||||
|
std::slice::from_raw_parts(
|
||||||
|
&data as *const E as *const u8,
|
||||||
|
core::mem::size_of_val(&data),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
output.write_all(data).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents an Entry in a ZIP File
|
||||||
|
///
|
||||||
|
/// used to add to a ZipEncoder
|
||||||
|
pub struct ZipEntry {
|
||||||
|
filename: OsString,
|
||||||
|
mtime: i64,
|
||||||
|
mode: u16,
|
||||||
|
crc32: u32,
|
||||||
|
uncompressed_size: u64,
|
||||||
|
compressed_size: u64,
|
||||||
|
offset: u64,
|
||||||
|
is_file: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZipEntry {
|
||||||
|
/// Creates a new ZipEntry
|
||||||
|
///
|
||||||
|
/// if is_file is false the path will contain an trailing separator,
|
||||||
|
/// so that the zip file understands that it is a directory
|
||||||
|
pub fn new<P: AsRef<Path>>(path: P, mtime: i64, mode: u16, is_file: bool) -> Self {
|
||||||
|
let mut relpath = PathBuf::new();
|
||||||
|
|
||||||
|
for comp in path.as_ref().components() {
|
||||||
|
if let Component::Normal(_) = comp {
|
||||||
|
relpath.push(comp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !is_file {
|
||||||
|
relpath.push(""); // adds trailing slash
|
||||||
|
}
|
||||||
|
|
||||||
|
Self {
|
||||||
|
filename: relpath.into(),
|
||||||
|
crc32: 0,
|
||||||
|
mtime,
|
||||||
|
mode,
|
||||||
|
uncompressed_size: 0,
|
||||||
|
compressed_size: 0,
|
||||||
|
offset: 0,
|
||||||
|
is_file,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_local_header<W>(&self, mut buf: &mut W) -> io::Result<usize>
|
||||||
|
where
|
||||||
|
W: AsyncWrite + Unpin + ?Sized,
|
||||||
|
{
|
||||||
|
let filename = self.filename.as_bytes();
|
||||||
|
let filename_len = filename.len();
|
||||||
|
let header_size = size_of::<LocalFileHeader>();
|
||||||
|
let zip_field_size = size_of::<Zip64Field>();
|
||||||
|
let size: usize = header_size + filename_len + zip_field_size;
|
||||||
|
|
||||||
|
let (date, time) = epoch_to_dos(self.mtime);
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut buf,
|
||||||
|
LocalFileHeader {
|
||||||
|
signature: LOCAL_FH_SIG,
|
||||||
|
version_needed: 0x2d,
|
||||||
|
flags: 1 << 3,
|
||||||
|
compression: 0,
|
||||||
|
time,
|
||||||
|
date,
|
||||||
|
crc32: 0,
|
||||||
|
compressed_size: 0xFFFFFFFF,
|
||||||
|
uncompressed_size: 0xFFFFFFFF,
|
||||||
|
filename_len: filename_len as u16,
|
||||||
|
extra_field_len: zip_field_size as u16,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
buf.write_all(filename).await?;
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut buf,
|
||||||
|
Zip64Field {
|
||||||
|
field_type: 0x0001,
|
||||||
|
field_size: 2 * 8,
|
||||||
|
uncompressed_size: 0,
|
||||||
|
compressed_size: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_data_descriptor<W: AsyncWrite + Unpin + ?Sized>(
|
||||||
|
&self,
|
||||||
|
mut buf: &mut W,
|
||||||
|
) -> io::Result<usize> {
|
||||||
|
let size = size_of::<LocalFileFooter>();
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut buf,
|
||||||
|
LocalFileFooter {
|
||||||
|
signature: LOCAL_FF_SIG,
|
||||||
|
crc32: self.crc32,
|
||||||
|
compressed_size: self.compressed_size,
|
||||||
|
uncompressed_size: self.uncompressed_size,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_central_directory_header<W: AsyncWrite + Unpin + ?Sized>(
|
||||||
|
&self,
|
||||||
|
mut buf: &mut W,
|
||||||
|
) -> io::Result<usize> {
|
||||||
|
let filename = self.filename.as_bytes();
|
||||||
|
let filename_len = filename.len();
|
||||||
|
let header_size = size_of::<CentralDirectoryFileHeader>();
|
||||||
|
let zip_field_size = size_of::<Zip64FieldWithOffset>();
|
||||||
|
let size: usize = header_size + filename_len + zip_field_size;
|
||||||
|
|
||||||
|
let (date, time) = epoch_to_dos(self.mtime);
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut buf,
|
||||||
|
CentralDirectoryFileHeader {
|
||||||
|
signature: CENTRAL_DIRECTORY_FH_SIG,
|
||||||
|
version_made_by: VERSION_MADE_BY,
|
||||||
|
version_needed: VERSION_NEEDED,
|
||||||
|
flags: 1 << 3,
|
||||||
|
compression: 0,
|
||||||
|
time,
|
||||||
|
date,
|
||||||
|
crc32: self.crc32,
|
||||||
|
compressed_size: 0xFFFFFFFF,
|
||||||
|
uncompressed_size: 0xFFFFFFFF,
|
||||||
|
filename_len: filename_len as u16,
|
||||||
|
extra_field_len: zip_field_size as u16,
|
||||||
|
comment_len: 0,
|
||||||
|
start_disk: 0,
|
||||||
|
internal_flags: 0,
|
||||||
|
external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
|
||||||
|
offset: 0xFFFFFFFF,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
buf.write_all(filename).await?;
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut buf,
|
||||||
|
Zip64FieldWithOffset {
|
||||||
|
field_type: 1,
|
||||||
|
field_size: 3 * 8,
|
||||||
|
uncompressed_size: self.uncompressed_size,
|
||||||
|
compressed_size: self.compressed_size,
|
||||||
|
offset: self.offset,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps a writer that implements AsyncWrite for creating a ZIP archive
|
||||||
|
///
|
||||||
|
/// This will create a ZIP archive on the fly with files added with
|
||||||
|
/// 'add_entry'. To Finish the file, call 'finish'
|
||||||
|
/// Example:
|
||||||
|
/// ```no_run
|
||||||
|
/// use proxmox_backup::tools::zip::*;
|
||||||
|
/// use tokio::fs::File;
|
||||||
|
/// use tokio::prelude::*;
|
||||||
|
/// use anyhow::{Error, Result};
|
||||||
|
///
|
||||||
|
/// #[tokio::main]
|
||||||
|
/// async fn main() -> Result<(), Error> {
|
||||||
|
/// let target = File::open("foo.zip").await?;
|
||||||
|
/// let mut source = File::open("foo.txt").await?;
|
||||||
|
///
|
||||||
|
/// let mut zip = ZipEncoder::new(target);
|
||||||
|
/// zip.add_entry(ZipEntry::new(
|
||||||
|
/// "foo.txt",
|
||||||
|
/// 0,
|
||||||
|
/// 0o100755,
|
||||||
|
/// true,
|
||||||
|
/// ), Some(source)).await?;
|
||||||
|
///
|
||||||
|
/// zip.finish().await?;
|
||||||
|
///
|
||||||
|
/// Ok(())
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub struct ZipEncoder<W>
|
||||||
|
where
|
||||||
|
W: AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
|
byte_count: usize,
|
||||||
|
files: Vec<ZipEntry>,
|
||||||
|
target: W,
|
||||||
|
buf: ByteBuffer,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: AsyncWrite + Unpin> ZipEncoder<W> {
|
||||||
|
pub fn new(target: W) -> Self {
|
||||||
|
Self {
|
||||||
|
byte_count: 0,
|
||||||
|
files: Vec::new(),
|
||||||
|
target,
|
||||||
|
buf: ByteBuffer::with_capacity(1024*1024),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn add_entry<R: AsyncRead + Unpin>(
|
||||||
|
&mut self,
|
||||||
|
mut entry: ZipEntry,
|
||||||
|
content: Option<R>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
entry.offset = self.byte_count.try_into()?;
|
||||||
|
self.byte_count += entry.write_local_header(&mut self.target).await?;
|
||||||
|
if let Some(mut content) = content {
|
||||||
|
let mut hasher = Hasher::new();
|
||||||
|
let mut size = 0;
|
||||||
|
loop {
|
||||||
|
|
||||||
|
let count = self.buf.read_from_async(&mut content).await?;
|
||||||
|
|
||||||
|
// end of file
|
||||||
|
if count == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
size += count;
|
||||||
|
hasher.update(&self.buf);
|
||||||
|
self.target.write_all(&self.buf).await?;
|
||||||
|
self.buf.consume(count);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.byte_count += size;
|
||||||
|
entry.compressed_size = size.try_into()?;
|
||||||
|
entry.uncompressed_size = size.try_into()?;
|
||||||
|
entry.crc32 = hasher.finalize();
|
||||||
|
}
|
||||||
|
self.byte_count += entry.write_data_descriptor(&mut self.target).await?;
|
||||||
|
|
||||||
|
self.files.push(entry);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_eocd(
|
||||||
|
&mut self,
|
||||||
|
central_dir_size: usize,
|
||||||
|
central_dir_offset: usize,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let entrycount = self.files.len();
|
||||||
|
|
||||||
|
let mut count = entrycount as u16;
|
||||||
|
let mut directory_size = central_dir_size as u32;
|
||||||
|
let mut directory_offset = central_dir_offset as u32;
|
||||||
|
|
||||||
|
if central_dir_size > u32::MAX as usize
|
||||||
|
|| central_dir_offset > u32::MAX as usize
|
||||||
|
|| entrycount > u16::MAX as usize
|
||||||
|
{
|
||||||
|
count = 0xFFFF;
|
||||||
|
directory_size = 0xFFFFFFFF;
|
||||||
|
directory_offset = 0xFFFFFFFF;
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut self.target,
|
||||||
|
Zip64EOCDRecord {
|
||||||
|
signature: ZIP64_EOCD_RECORD,
|
||||||
|
field_size: 44,
|
||||||
|
version_made_by: VERSION_MADE_BY,
|
||||||
|
version_needed: VERSION_NEEDED,
|
||||||
|
disk_number: 0,
|
||||||
|
disk_number_central_dir: 0,
|
||||||
|
disk_record_count: entrycount.try_into()?,
|
||||||
|
total_record_count: entrycount.try_into()?,
|
||||||
|
directory_size: central_dir_size.try_into()?,
|
||||||
|
directory_offset: central_dir_offset.try_into()?,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let locator_offset = central_dir_offset + central_dir_size;
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut self.target,
|
||||||
|
Zip64EOCDLocator {
|
||||||
|
signature: ZIP64_EOCD_LOCATOR,
|
||||||
|
disk_number: 0,
|
||||||
|
offset: locator_offset.try_into()?,
|
||||||
|
disk_count: 1,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_struct(
|
||||||
|
&mut self.target,
|
||||||
|
EndOfCentralDir {
|
||||||
|
signature: END_OF_CENTRAL_DIR,
|
||||||
|
disk_number: 0,
|
||||||
|
start_disk: 0,
|
||||||
|
disk_record_count: count,
|
||||||
|
total_record_count: count,
|
||||||
|
directory_size,
|
||||||
|
directory_offset,
|
||||||
|
comment_len: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn finish(&mut self) -> Result<(), Error> {
|
||||||
|
let central_dir_offset = self.byte_count;
|
||||||
|
let mut central_dir_size = 0;
|
||||||
|
|
||||||
|
for file in &self.files {
|
||||||
|
central_dir_size += file
|
||||||
|
.write_central_directory_header(&mut self.target)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.write_eocd(central_dir_size, central_dir_offset)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.target.flush().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -8,6 +8,7 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
type: 'date',
|
type: 'date',
|
||||||
dateFormat: 'timestamp',
|
dateFormat: 'timestamp',
|
||||||
},
|
},
|
||||||
|
'comment',
|
||||||
'files',
|
'files',
|
||||||
'owner',
|
'owner',
|
||||||
'verification',
|
'verification',
|
||||||
@ -285,6 +286,23 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
win.show();
|
win.show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
verifyAll: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: `/admin/datastore/${view.datastore}/verify`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
onVerify: function(view, rI, cI, item, e, rec) {
|
onVerify: function(view, rI, cI, item, e, rec) {
|
||||||
let me = this;
|
let me = this;
|
||||||
view = me.getView();
|
view = me.getView();
|
||||||
@ -325,6 +343,22 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
|
onNotesEdit: function(view, data) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let url = `/admin/datastore/${view.datastore}/notes`;
|
||||||
|
Ext.create('PBS.window.NotesEdit', {
|
||||||
|
url: url,
|
||||||
|
autoShow: true,
|
||||||
|
apiCallDone: () => me.reload(), // FIXME: do something more efficient?
|
||||||
|
extraRequestParams: {
|
||||||
|
"backup-type": data["backup-type"],
|
||||||
|
"backup-id": data["backup-id"],
|
||||||
|
"backup-time": (data['backup-time'].getTime()/1000).toFixed(0),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
onForget: function(view, rI, cI, item, e, rec) {
|
onForget: function(view, rI, cI, item, e, rec) {
|
||||||
let me = this;
|
let me = this;
|
||||||
view = this.getView();
|
view = this.getView();
|
||||||
@ -495,6 +529,49 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
dataIndex: 'text',
|
dataIndex: 'text',
|
||||||
flex: 1,
|
flex: 1,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Comment'),
|
||||||
|
dataIndex: 'comment',
|
||||||
|
flex: 1,
|
||||||
|
renderer: (v, meta, record) => {
|
||||||
|
let data = record.data;
|
||||||
|
if (!data || data.leaf || record.parentNode.id === 'root') {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
if (v === undefined || v === null) {
|
||||||
|
v = '';
|
||||||
|
}
|
||||||
|
v = Ext.String.htmlEncode(v);
|
||||||
|
let icon = 'fa fa-fw fa-pencil';
|
||||||
|
|
||||||
|
return `<span class="snapshot-comment-column">${v}</span>
|
||||||
|
<i data-qtip="${gettext('Edit')}" style="float: right;" class="${icon}"></i>`;
|
||||||
|
},
|
||||||
|
listeners: {
|
||||||
|
afterrender: function(component) {
|
||||||
|
// a bit of a hack, but relatively easy, cheap and works out well.
|
||||||
|
// more efficient to use one handler for the whole column than for each icon
|
||||||
|
component.on('click', function(tree, cell, rowI, colI, e, rec) {
|
||||||
|
let el = e.target;
|
||||||
|
if (el.tagName !== "I" || !el.classList.contains("fa-pencil")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let view = tree.up();
|
||||||
|
let controller = view.controller;
|
||||||
|
controller.onNotesEdit(view, rec.data);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
dblclick: function(tree, el, row, col, ev, rec) {
|
||||||
|
let data = rec.data || {};
|
||||||
|
if (data.leaf || rec.parentNode.id === 'root') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let view = tree.up();
|
||||||
|
let controller = view.controller;
|
||||||
|
controller.onNotesEdit(view, rec.data);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Actions'),
|
header: gettext('Actions'),
|
||||||
xtype: 'actioncolumn',
|
xtype: 'actioncolumn',
|
||||||
@ -502,25 +579,25 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
handler: 'onVerify',
|
handler: 'onVerify',
|
||||||
tooltip: gettext('Verify'),
|
getTip: (v, m, rec) => Ext.String.format(gettext("Verify '{0}'"), v),
|
||||||
getClass: (v, m, rec) => rec.data.leaf ? 'pmx-hidden' : 'fa fa-search',
|
getClass: (v, m, rec) => rec.data.leaf ? 'pmx-hidden' : 'pve-icon-verify-lettering',
|
||||||
isDisabled: (v, r, c, i, rec) => !!rec.data.leaf,
|
isDisabled: (v, r, c, i, rec) => !!rec.data.leaf,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
handler: 'onPrune',
|
handler: 'onPrune',
|
||||||
tooltip: gettext('Prune'),
|
getTip: (v, m, rec) => Ext.String.format(gettext("Prune '{0}'"), v),
|
||||||
getClass: (v, m, rec) => rec.parentNode.id ==='root' ? 'fa fa-scissors' : 'pmx-hidden',
|
getClass: (v, m, rec) => rec.parentNode.id ==='root' ? 'fa fa-scissors' : 'pmx-hidden',
|
||||||
isDisabled: (v, r, c, i, rec) => rec.parentNode.id !=='root',
|
isDisabled: (v, r, c, i, rec) => rec.parentNode.id !=='root',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
handler: 'onForget',
|
handler: 'onForget',
|
||||||
tooltip: gettext('Forget Snapshot'),
|
getTip: (v, m, rec) => Ext.String.format(gettext("Permanently forget snapshot '{0}'"), v),
|
||||||
getClass: (v, m, rec) => !rec.data.leaf && rec.parentNode.id !== 'root' ? 'fa critical fa-trash-o' : 'pmx-hidden',
|
getClass: (v, m, rec) => !rec.data.leaf && rec.parentNode.id !== 'root' ? 'fa critical fa-trash-o' : 'pmx-hidden',
|
||||||
isDisabled: (v, r, c, i, rec) => rec.data.leaf || rec.parentNode.id === 'root',
|
isDisabled: (v, r, c, i, rec) => rec.data.leaf || rec.parentNode.id === 'root',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
handler: 'downloadFile',
|
handler: 'downloadFile',
|
||||||
tooltip: gettext('Download'),
|
getTip: (v, m, rec) => Ext.String.format(gettext("Download '{0}'"), v),
|
||||||
getClass: (v, m, rec) => rec.data.leaf && rec.data.filename ? 'fa fa-download' : 'pmx-hidden',
|
getClass: (v, m, rec) => rec.data.leaf && rec.data.filename ? 'fa fa-download' : 'pmx-hidden',
|
||||||
isDisabled: (v, r, c, i, rec) => !rec.data.leaf || !rec.data.filename || rec.data['crypt-mode'] > 2,
|
isDisabled: (v, r, c, i, rec) => !rec.data.leaf || !rec.data.filename || rec.data['crypt-mode'] > 2,
|
||||||
},
|
},
|
||||||
@ -679,6 +756,13 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
iconCls: 'fa fa-refresh',
|
iconCls: 'fa fa-refresh',
|
||||||
handler: 'reload',
|
handler: 'reload',
|
||||||
},
|
},
|
||||||
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Verify All'),
|
||||||
|
confirmMsg: gettext('Do you want to verify all snapshots now?'),
|
||||||
|
handler: 'verifyAll',
|
||||||
|
},
|
||||||
'->',
|
'->',
|
||||||
{
|
{
|
||||||
xtype: 'tbtext',
|
xtype: 'tbtext',
|
||||||
|
104
www/DataStoreNotes.js
Normal file
104
www/DataStoreNotes.js
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
Ext.define('PBS.DataStoreNotes', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
xtype: 'pbsDataStoreNotes',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
title: gettext("Comment"),
|
||||||
|
bodyStyle: 'white-space:pre',
|
||||||
|
bodyPadding: 10,
|
||||||
|
scrollable: true,
|
||||||
|
animCollapse: false,
|
||||||
|
|
||||||
|
cbindData: function(initalConfig) {
|
||||||
|
let me = this;
|
||||||
|
me.url = `/api2/extjs/config/datastore/${me.datastore}`;
|
||||||
|
return { };
|
||||||
|
},
|
||||||
|
|
||||||
|
run_editor: function() {
|
||||||
|
let me = this;
|
||||||
|
let win = Ext.create('Proxmox.window.Edit', {
|
||||||
|
title: gettext('Comment'),
|
||||||
|
width: 600,
|
||||||
|
resizable: true,
|
||||||
|
layout: 'fit',
|
||||||
|
defaultButton: undefined,
|
||||||
|
items: {
|
||||||
|
xtype: 'textfield',
|
||||||
|
name: 'comment',
|
||||||
|
value: '',
|
||||||
|
hideLabel: true,
|
||||||
|
},
|
||||||
|
url: me.url,
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.load();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
win.load();
|
||||||
|
},
|
||||||
|
|
||||||
|
setNotes: function(value) {
|
||||||
|
let me = this;
|
||||||
|
var data = value || '';
|
||||||
|
me.update(Ext.htmlEncode(data));
|
||||||
|
|
||||||
|
if (me.collapsible && me.collapseMode === 'auto') {
|
||||||
|
me.setCollapsed(data === '');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
load: function() {
|
||||||
|
var me = this;
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: me.url,
|
||||||
|
waitMsgTarget: me,
|
||||||
|
failure: function(response, opts) {
|
||||||
|
me.update(gettext('Error') + " " + response.htmlStatus);
|
||||||
|
me.setCollapsed(false);
|
||||||
|
},
|
||||||
|
success: function(response, opts) {
|
||||||
|
me.setNotes(response.result.data.comment);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
render: function(c) {
|
||||||
|
var me = this;
|
||||||
|
me.getEl().on('dblclick', me.run_editor, me);
|
||||||
|
},
|
||||||
|
afterlayout: function() {
|
||||||
|
let me = this;
|
||||||
|
if (me.collapsible && !me.getCollapsed() && me.collapseMode === 'always') {
|
||||||
|
me.setCollapsed(true);
|
||||||
|
me.collapseMode = ''; // only once, on initial load!
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
tools: [{
|
||||||
|
type: 'gear',
|
||||||
|
handler: function() {
|
||||||
|
this.up('panel').run_editor();
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
|
||||||
|
collapsible: true,
|
||||||
|
collapseDirection: 'right',
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
var me = this;
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
|
||||||
|
let sp = Ext.state.Manager.getProvider();
|
||||||
|
me.collapseMode = sp.get('notes-collapse', 'never');
|
||||||
|
|
||||||
|
if (me.collapseMode === 'auto') {
|
||||||
|
me.setCollapsed(true);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
@ -10,6 +10,25 @@ Ext.define('PBS.DataStorePanel', {
|
|||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
|
||||||
|
stateId: 'pbs-datastore-panel',
|
||||||
|
stateful: true,
|
||||||
|
|
||||||
|
stateEvents: ['tabchange'],
|
||||||
|
|
||||||
|
applyState: function(state) {
|
||||||
|
let me = this;
|
||||||
|
if (state.tab !== undefined) {
|
||||||
|
me.setActiveTab(state.tab);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
getState: function() {
|
||||||
|
let me = this;
|
||||||
|
return {
|
||||||
|
tab: me.getActiveTab().getItemId(),
|
||||||
|
};
|
||||||
|
},
|
||||||
|
|
||||||
border: false,
|
border: false,
|
||||||
defaults: {
|
defaults: {
|
||||||
border: false,
|
border: false,
|
||||||
@ -17,13 +36,43 @@ Ext.define('PBS.DataStorePanel', {
|
|||||||
|
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
xtype: 'pbsDataStoreContent',
|
xtype: 'pbsDataStoreSummary',
|
||||||
|
title: gettext('Summary'),
|
||||||
|
itemId: 'summary',
|
||||||
|
iconCls: 'fa fa-book',
|
||||||
cbind: {
|
cbind: {
|
||||||
datastore: '{datastore}',
|
datastore: '{datastore}',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'pbsDataStoreStatistic',
|
xtype: 'pbsDataStoreContent',
|
||||||
|
itemId: 'content',
|
||||||
|
iconCls: 'fa fa-th',
|
||||||
|
cbind: {
|
||||||
|
datastore: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: gettext('Prune & GC'),
|
||||||
|
xtype: 'pbsDataStorePruneAndGC',
|
||||||
|
itemId: 'prunegc',
|
||||||
|
iconCls: 'fa fa-trash-o',
|
||||||
|
cbind: {
|
||||||
|
datastore: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-refresh',
|
||||||
|
itemId: 'syncjobs',
|
||||||
|
xtype: 'pbsSyncJobView',
|
||||||
|
cbind: {
|
||||||
|
datastore: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-check-circle',
|
||||||
|
itemId: 'verifyjobs',
|
||||||
|
xtype: 'pbsVerifyJobView',
|
||||||
cbind: {
|
cbind: {
|
||||||
datastore: '{datastore}',
|
datastore: '{datastore}',
|
||||||
},
|
},
|
||||||
@ -31,6 +80,7 @@ Ext.define('PBS.DataStorePanel', {
|
|||||||
{
|
{
|
||||||
itemId: 'acl',
|
itemId: 'acl',
|
||||||
xtype: 'pbsACLView',
|
xtype: 'pbsACLView',
|
||||||
|
iconCls: 'fa fa-unlock',
|
||||||
aclExact: true,
|
aclExact: true,
|
||||||
cbind: {
|
cbind: {
|
||||||
aclPath: '{aclPath}',
|
aclPath: '{aclPath}',
|
||||||
|
164
www/DataStorePruneAndGC.js
Normal file
164
www/DataStorePruneAndGC.js
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
Ext.define('PBS.DataStorePruneAndGC', {
|
||||||
|
extend: 'Proxmox.grid.ObjectGrid',
|
||||||
|
alias: 'widget.pbsDataStorePruneAndGC',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
cbindData: function(initial) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
me.datastore = encodeURIComponent(me.datastore);
|
||||||
|
me.url = `/api2/json/config/datastore/${me.datastore}`;
|
||||||
|
me.editorConfig = {
|
||||||
|
url: `/api2/extjs/config/datastore/${me.datastore}`,
|
||||||
|
};
|
||||||
|
return {};
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
edit: function() { this.getView().run_editor(); },
|
||||||
|
|
||||||
|
garbageCollect: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: `/admin/datastore/${view.datastore}/gc`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
tbar: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Edit'),
|
||||||
|
disabled: true,
|
||||||
|
handler: 'edit',
|
||||||
|
},
|
||||||
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Start Garbage Collection'),
|
||||||
|
selModel: null,
|
||||||
|
handler: 'garbageCollect',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
activate: function() { this.rstore.startUpdate(); },
|
||||||
|
destroy: function() { this.rstore.stopUpdate(); },
|
||||||
|
deactivate: function() { this.rstore.stopUpdate(); },
|
||||||
|
itemdblclick: 'edit',
|
||||||
|
},
|
||||||
|
|
||||||
|
rows: {
|
||||||
|
"gc-schedule": {
|
||||||
|
required: true,
|
||||||
|
defaultValue: Proxmox.Utils.NoneText,
|
||||||
|
header: gettext('Garbage Collection Schedule'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('GC Schedule'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsCalendarEvent',
|
||||||
|
name: 'gc-schedule',
|
||||||
|
fieldLabel: gettext("GC Schedule"),
|
||||||
|
emptyText: Proxmox.Utils.noneText,
|
||||||
|
deleteEmpty: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"prune-schedule": {
|
||||||
|
required: true,
|
||||||
|
defaultValue: Proxmox.Utils.NoneText,
|
||||||
|
header: gettext('Prune Schedule'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Schedule'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsCalendarEvent',
|
||||||
|
name: 'prune-schedule',
|
||||||
|
fieldLabel: gettext("Prune Schedule"),
|
||||||
|
emptyText: Proxmox.Utils.noneText,
|
||||||
|
deleteEmpty: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-last": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Last'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
isCreate: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Hourly'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Daily'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Weekly'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Monthly'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
required: true,
|
||||||
|
header: gettext('Keep Yearly'),
|
||||||
|
editor: {
|
||||||
|
xtype: 'proxmoxWindowEdit',
|
||||||
|
title: gettext('Prune Options'),
|
||||||
|
items: {
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
@ -1,104 +0,0 @@
|
|||||||
Ext.define('pve-rrd-datastore', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'used',
|
|
||||||
'total',
|
|
||||||
'read_ios',
|
|
||||||
'read_bytes',
|
|
||||||
'write_ios',
|
|
||||||
'write_bytes',
|
|
||||||
'io_ticks',
|
|
||||||
{
|
|
||||||
name: 'io_delay', calculate: function(data) {
|
|
||||||
let ios = 0;
|
|
||||||
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
|
||||||
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
|
||||||
if (data.io_ticks === undefined) {
|
|
||||||
return undefined;
|
|
||||||
} else if (ios === 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return (data.io_ticks*1000.0)/ios;
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' },
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.define('PBS.DataStoreStatistic', {
|
|
||||||
extend: 'Ext.panel.Panel',
|
|
||||||
alias: 'widget.pbsDataStoreStatistic',
|
|
||||||
|
|
||||||
title: gettext('Statistics'),
|
|
||||||
|
|
||||||
scrollable: true,
|
|
||||||
|
|
||||||
initComponent: function() {
|
|
||||||
var me = this;
|
|
||||||
|
|
||||||
if (!me.datastore) {
|
|
||||||
throw "no datastore specified";
|
|
||||||
}
|
|
||||||
|
|
||||||
me.tbar = ['->', { xtype: 'proxmoxRRDTypeSelector' }];
|
|
||||||
|
|
||||||
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
|
||||||
rrdurl: "/api2/json/admin/datastore/" + me.datastore + "/rrd",
|
|
||||||
model: 'pve-rrd-datastore',
|
|
||||||
});
|
|
||||||
|
|
||||||
me.items = {
|
|
||||||
xtype: 'container',
|
|
||||||
itemId: 'itemcontainer',
|
|
||||||
layout: 'column',
|
|
||||||
minWidth: 700,
|
|
||||||
defaults: {
|
|
||||||
minHeight: 320,
|
|
||||||
padding: 5,
|
|
||||||
columnWidth: 1,
|
|
||||||
},
|
|
||||||
items: [
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxRRDChart',
|
|
||||||
title: gettext('Storage usage (bytes)'),
|
|
||||||
fields: ['total', 'used'],
|
|
||||||
fieldTitles: [gettext('Total'), gettext('Storage usage')],
|
|
||||||
store: rrdstore,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxRRDChart',
|
|
||||||
title: gettext('Transfer Rate (bytes/second)'),
|
|
||||||
fields: ['read_bytes', 'write_bytes'],
|
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
|
||||||
store: rrdstore,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxRRDChart',
|
|
||||||
title: gettext('Input/Output Operations per Second (IOPS)'),
|
|
||||||
fields: ['read_ios', 'write_ios'],
|
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
|
||||||
store: rrdstore,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxRRDChart',
|
|
||||||
title: gettext('IO Delay (ms)'),
|
|
||||||
fields: ['io_delay'],
|
|
||||||
fieldTitles: [gettext('IO Delay')],
|
|
||||||
store: rrdstore,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
me.listeners = {
|
|
||||||
activate: function() {
|
|
||||||
rrdstore.startUpdate();
|
|
||||||
},
|
|
||||||
destroy: function() {
|
|
||||||
rrdstore.stopUpdate();
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
me.callParent();
|
|
||||||
},
|
|
||||||
|
|
||||||
});
|
|
296
www/DataStoreSummary.js
Normal file
296
www/DataStoreSummary.js
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
Ext.define('pve-rrd-datastore', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
'used',
|
||||||
|
'total',
|
||||||
|
'read_ios',
|
||||||
|
'read_bytes',
|
||||||
|
'write_ios',
|
||||||
|
'write_bytes',
|
||||||
|
'io_ticks',
|
||||||
|
{
|
||||||
|
name: 'io_delay', calculate: function(data) {
|
||||||
|
let ios = 0;
|
||||||
|
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||||
|
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||||
|
if (data.io_ticks === undefined) {
|
||||||
|
return undefined;
|
||||||
|
} else if (ios === 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return (data.io_ticks*1000.0)/ios;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ type: 'date', dateFormat: 'timestamp', name: 'time' },
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.DataStoreInfo', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pbsDataStoreInfo',
|
||||||
|
|
||||||
|
viewModel: {
|
||||||
|
data: {
|
||||||
|
countstext: '',
|
||||||
|
usage: {},
|
||||||
|
stillbad: 0,
|
||||||
|
removedbytes: 0,
|
||||||
|
mountpoint: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
onLoad: function(store, data, success) {
|
||||||
|
if (!success) return;
|
||||||
|
let me = this;
|
||||||
|
let vm = me.getViewModel();
|
||||||
|
|
||||||
|
let counts = store.getById('counts').data.value;
|
||||||
|
let storage = store.getById('storage').data.value;
|
||||||
|
|
||||||
|
let used = Proxmox.Utils.format_size(storage.used);
|
||||||
|
let total = Proxmox.Utils.format_size(storage.total);
|
||||||
|
let percent = 100*storage.used/storage.total;
|
||||||
|
if (storage.total === 0) {
|
||||||
|
percent = 0;
|
||||||
|
}
|
||||||
|
let used_percent = `${percent.toFixed(2)}%`;
|
||||||
|
|
||||||
|
let usage = used_percent + ' (' +
|
||||||
|
Ext.String.format(gettext('{0} of {1}'),
|
||||||
|
used, total) + ')';
|
||||||
|
vm.set('usagetext', usage);
|
||||||
|
vm.set('usage', storage.used/storage.total);
|
||||||
|
|
||||||
|
let gcstatus = store.getById('gc-status').data.value;
|
||||||
|
|
||||||
|
let dedup = (gcstatus['index-data-bytes'] || 0)/
|
||||||
|
(gcstatus['disk-bytes'] || Infinity);
|
||||||
|
|
||||||
|
let countstext = function(count) {
|
||||||
|
return `${count[0]} ${gettext('Groups')}, ${count[1]} ${gettext('Snapshots')}`;
|
||||||
|
};
|
||||||
|
|
||||||
|
vm.set('ctcount', countstext(counts.ct || [0, 0]));
|
||||||
|
vm.set('vmcount', countstext(counts.vm || [0, 0]));
|
||||||
|
vm.set('hostcount', countstext(counts.host || [0, 0]));
|
||||||
|
vm.set('deduplication', dedup.toFixed(2));
|
||||||
|
vm.set('stillbad', gcstatus['still-bad']);
|
||||||
|
vm.set('removedbytes', Proxmox.Utils.format_size(gcstatus['removed-bytes']));
|
||||||
|
},
|
||||||
|
|
||||||
|
startStore: function() { this.store.startUpdate(); },
|
||||||
|
stopStore: function() { this.store.stopUpdate(); },
|
||||||
|
|
||||||
|
init: function(view) {
|
||||||
|
let me = this;
|
||||||
|
let datastore = encodeURIComponent(view.datastore);
|
||||||
|
me.store = Ext.create('Proxmox.data.ObjectStore', {
|
||||||
|
interval: 5*1000,
|
||||||
|
url: `/api2/json/admin/datastore/${datastore}/status`,
|
||||||
|
});
|
||||||
|
me.store.on('load', me.onLoad, me);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
activate: 'startStore',
|
||||||
|
destroy: 'stopStore',
|
||||||
|
deactivate: 'stopStore',
|
||||||
|
},
|
||||||
|
|
||||||
|
defaults: {
|
||||||
|
xtype: 'pmxInfoWidget',
|
||||||
|
},
|
||||||
|
|
||||||
|
bodyPadding: 20,
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-hdd-o',
|
||||||
|
title: gettext('Usage'),
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
usage: '{usage}',
|
||||||
|
text: '{usagetext}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'box',
|
||||||
|
html: `<b>${gettext('Backup Count')}</b>`,
|
||||||
|
padding: '10 0 5 0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-cube',
|
||||||
|
title: gettext('CT'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{ctcount}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-building',
|
||||||
|
title: gettext('Host'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{hostcount}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-desktop',
|
||||||
|
title: gettext('VM'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{vmcount}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'box',
|
||||||
|
html: `<b>${gettext('Stats from last Garbage Collection')}</b>`,
|
||||||
|
padding: '10 0 5 0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-compress',
|
||||||
|
title: gettext('Deduplication Factor'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{deduplication}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa fa-fw fa-trash-o',
|
||||||
|
title: gettext('Removed Bytes'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{removedbytes}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iconCls: 'fa critical fa-fw fa-exclamation-triangle',
|
||||||
|
title: gettext('Bad Chunks'),
|
||||||
|
printBar: false,
|
||||||
|
bind: {
|
||||||
|
data: {
|
||||||
|
text: '{stillbad}',
|
||||||
|
},
|
||||||
|
visible: '{stillbad}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.DataStoreSummary', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pbsDataStoreSummary',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
layout: 'column',
|
||||||
|
scrollable: true,
|
||||||
|
|
||||||
|
bodyPadding: 5,
|
||||||
|
defaults: {
|
||||||
|
columnWidth: 1,
|
||||||
|
padding: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
tbar: ['->', { xtype: 'proxmoxRRDTypeSelector' }],
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'container',
|
||||||
|
height: 300,
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'pbsDataStoreInfo',
|
||||||
|
flex: 1,
|
||||||
|
padding: '0 10 0 0',
|
||||||
|
cbind: {
|
||||||
|
title: '{datastore}',
|
||||||
|
datastore: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pbsDataStoreNotes',
|
||||||
|
flex: 1,
|
||||||
|
cbind: {
|
||||||
|
datastore: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxRRDChart',
|
||||||
|
title: gettext('Storage usage (bytes)'),
|
||||||
|
fields: ['total', 'used'],
|
||||||
|
fieldTitles: [gettext('Total'), gettext('Storage usage')],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxRRDChart',
|
||||||
|
title: gettext('Transfer Rate (bytes/second)'),
|
||||||
|
fields: ['read_bytes', 'write_bytes'],
|
||||||
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxRRDChart',
|
||||||
|
title: gettext('Input/Output Operations per Second (IOPS)'),
|
||||||
|
fields: ['read_ios', 'write_ios'],
|
||||||
|
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxRRDChart',
|
||||||
|
title: gettext('IO Delay (ms)'),
|
||||||
|
fields: ['io_delay'],
|
||||||
|
fieldTitles: [gettext('IO Delay')],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
activate: function() { this.rrdstore.startUpdate(); },
|
||||||
|
deactivate: function() { this.rrdstore.stopUpdate(); },
|
||||||
|
destroy: function() { this.rrdstore.stopUpdate(); },
|
||||||
|
},
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
me.rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
||||||
|
rrdurl: "/api2/json/admin/datastore/" + me.datastore + "/rrd",
|
||||||
|
model: 'pve-rrd-datastore',
|
||||||
|
});
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: `/config/datastore/${me.datastore}`,
|
||||||
|
waitMsgTarget: me.down('pbsDataStoreInfo'),
|
||||||
|
success: function(response) {
|
||||||
|
let path = Ext.htmlEncode(response.result.data.path);
|
||||||
|
me.down('pbsDataStoreInfo').setTitle(`${me.datastore} (${path})`);
|
||||||
|
me.down('pbsDataStoreNotes').setNotes(response.result.data.comment);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
me.query('proxmoxRRDChart').forEach((chart) => {
|
||||||
|
chart.setStore(me.rrdstore);
|
||||||
|
});
|
||||||
|
|
||||||
|
me.down('pbsDataStoreInfo').relayEvents(me, ['activate', 'deactivate']);
|
||||||
|
},
|
||||||
|
});
|
@ -67,46 +67,48 @@ Ext.define('PBS.MainView', {
|
|||||||
var contentpanel = me.lookupReference('contentpanel');
|
var contentpanel = me.lookupReference('contentpanel');
|
||||||
var lastpanel = contentpanel.getLayout().getActiveItem();
|
var lastpanel = contentpanel.getLayout().getActiveItem();
|
||||||
|
|
||||||
|
let tabChangeListener = function(tp, newc, oldc) {
|
||||||
|
let newpath = path;
|
||||||
|
|
||||||
|
// only add the subpath part for the
|
||||||
|
// non-default tabs
|
||||||
|
if (tp.items.findIndex('id', newc.id) !== 0) {
|
||||||
|
newpath += `:${newc.getItemId()}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
me.redirectTo(newpath);
|
||||||
|
};
|
||||||
|
|
||||||
|
let xtype = path;
|
||||||
var obj;
|
var obj;
|
||||||
|
let datastore;
|
||||||
if (PBS.Utils.isDataStorePath(path)) {
|
if (PBS.Utils.isDataStorePath(path)) {
|
||||||
let datastore = PBS.Utils.getDataStoreFromPath(path);
|
datastore = PBS.Utils.getDataStoreFromPath(path);
|
||||||
obj = contentpanel.add({
|
if (lastpanel && lastpanel.xtype === 'pbsDataStorePanel' && !subpath) {
|
||||||
xtype: 'pbsDataStorePanel',
|
let activeTab = lastpanel.getActiveTab();
|
||||||
nodename: 'localhost',
|
let newpath = path;
|
||||||
datastore,
|
if (lastpanel.items.indexOf(activeTab) !== 0) {
|
||||||
});
|
subpath = activeTab.getItemId();
|
||||||
} else {
|
newpath += `:${subpath}`;
|
||||||
obj = contentpanel.add({
|
}
|
||||||
xtype: path,
|
me.redirectTo(newpath);
|
||||||
nodename: 'localhost',
|
}
|
||||||
border: false,
|
xtype = 'pbsDataStorePanel';
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
obj = contentpanel.add({
|
||||||
|
xtype,
|
||||||
|
datastore,
|
||||||
|
nodename: 'localhost',
|
||||||
|
border: false,
|
||||||
|
activeTab: subpath || 0,
|
||||||
|
listeners: {
|
||||||
|
tabchange: tabChangeListener,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
var treelist = me.lookupReference('navtree');
|
var treelist = me.lookupReference('navtree');
|
||||||
|
|
||||||
treelist.suspendEvents();
|
treelist.select(path, true);
|
||||||
if (subpath === undefined) {
|
|
||||||
treelist.select(path);
|
|
||||||
} else {
|
|
||||||
treelist.select(path + ':' + subpath);
|
|
||||||
}
|
|
||||||
treelist.resumeEvents();
|
|
||||||
|
|
||||||
if (Ext.isFunction(obj.setActiveTab)) {
|
|
||||||
obj.setActiveTab(subpath || 0);
|
|
||||||
obj.addListener('tabchange', function(tabpanel, newc, oldc) {
|
|
||||||
var newpath = path;
|
|
||||||
|
|
||||||
// only add the subpath part for the
|
|
||||||
// non-default tabs
|
|
||||||
if (tabpanel.items.findIndex('id', newc.id) !== 0) {
|
|
||||||
newpath += ":" + newc.getItemId();
|
|
||||||
}
|
|
||||||
|
|
||||||
me.redirectTo(newpath);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
contentpanel.setActiveItem(obj);
|
contentpanel.setActiveItem(obj);
|
||||||
|
|
||||||
|
22
www/Makefile
22
www/Makefile
@ -16,17 +16,19 @@ JSSRC= \
|
|||||||
config/RemoteView.js \
|
config/RemoteView.js \
|
||||||
config/ACLView.js \
|
config/ACLView.js \
|
||||||
config/SyncView.js \
|
config/SyncView.js \
|
||||||
config/DataStoreConfig.js \
|
config/VerifyView.js \
|
||||||
window/UserEdit.js \
|
window/ACLEdit.js \
|
||||||
window/UserPassword.js \
|
window/BackupFileDownloader.js \
|
||||||
|
window/CreateDirectory.js \
|
||||||
|
window/DataStoreEdit.js \
|
||||||
|
window/FileBrowser.js \
|
||||||
|
window/NotesEdit.js \
|
||||||
window/RemoteEdit.js \
|
window/RemoteEdit.js \
|
||||||
window/SyncJobEdit.js \
|
window/SyncJobEdit.js \
|
||||||
window/ACLEdit.js \
|
window/UserEdit.js \
|
||||||
window/DataStoreEdit.js \
|
window/UserPassword.js \
|
||||||
window/CreateDirectory.js \
|
window/VerifyJobEdit.js \
|
||||||
window/ZFSCreate.js \
|
window/ZFSCreate.js \
|
||||||
window/FileBrowser.js \
|
|
||||||
window/BackupFileDownloader.js \
|
|
||||||
dashboard/DataStoreStatistics.js \
|
dashboard/DataStoreStatistics.js \
|
||||||
dashboard/LongestTasks.js \
|
dashboard/LongestTasks.js \
|
||||||
dashboard/RunningTasks.js \
|
dashboard/RunningTasks.js \
|
||||||
@ -38,8 +40,10 @@ JSSRC= \
|
|||||||
VersionInfo.js \
|
VersionInfo.js \
|
||||||
SystemConfiguration.js \
|
SystemConfiguration.js \
|
||||||
Subscription.js \
|
Subscription.js \
|
||||||
|
DataStoreSummary.js \
|
||||||
|
DataStoreNotes.js \
|
||||||
|
DataStorePruneAndGC.js \
|
||||||
DataStorePrune.js \
|
DataStorePrune.js \
|
||||||
DataStoreStatistic.js \
|
|
||||||
DataStoreContent.js \
|
DataStoreContent.js \
|
||||||
DataStorePanel.js \
|
DataStorePanel.js \
|
||||||
ServerStatus.js \
|
ServerStatus.js \
|
||||||
|
@ -1,3 +1,13 @@
|
|||||||
|
Ext.define('pbs-datastore-list', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: ['name', 'comment'],
|
||||||
|
proxy: {
|
||||||
|
type: 'proxmox',
|
||||||
|
url: "/api2/json/admin/datastore",
|
||||||
|
},
|
||||||
|
idProperty: 'store',
|
||||||
|
});
|
||||||
|
|
||||||
Ext.define('PBS.store.NavigationStore', {
|
Ext.define('PBS.store.NavigationStore', {
|
||||||
extend: 'Ext.data.TreeStore',
|
extend: 'Ext.data.TreeStore',
|
||||||
|
|
||||||
@ -36,12 +46,6 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
path: 'pbsRemoteView',
|
path: 'pbsRemoteView',
|
||||||
leaf: true,
|
leaf: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
text: gettext('Sync Jobs'),
|
|
||||||
iconCls: 'fa fa-refresh',
|
|
||||||
path: 'pbsSyncJobView',
|
|
||||||
leaf: true,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
text: gettext('Subscription'),
|
text: gettext('Subscription'),
|
||||||
iconCls: 'fa fa-support',
|
iconCls: 'fa fa-support',
|
||||||
@ -82,9 +86,18 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
{
|
{
|
||||||
text: gettext('Datastore'),
|
text: gettext('Datastore'),
|
||||||
iconCls: 'fa fa-archive',
|
iconCls: 'fa fa-archive',
|
||||||
path: 'pbsDataStoreConfig',
|
id: 'datastores',
|
||||||
expanded: true,
|
expanded: true,
|
||||||
|
expandable: false,
|
||||||
leaf: false,
|
leaf: false,
|
||||||
|
children: [
|
||||||
|
{
|
||||||
|
text: gettext('Add Datastore'),
|
||||||
|
iconCls: 'fa fa-plus-circle',
|
||||||
|
leaf: true,
|
||||||
|
id: 'addbutton',
|
||||||
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -116,21 +129,23 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
|
|
||||||
let root = view.getStore().getRoot();
|
let root = view.getStore().getRoot();
|
||||||
|
|
||||||
// FIXME: newly added always get appended to the end..
|
records.sort((a, b) => a.id.localeCompare(b.id));
|
||||||
records.sort((a, b) => {
|
|
||||||
if (a.id > b.id) return 1;
|
|
||||||
if (a.id < b.id) return -1;
|
|
||||||
return 0;
|
|
||||||
});
|
|
||||||
|
|
||||||
var list = root.findChild('path', 'pbsDataStoreConfig', false);
|
var list = root.findChild('id', 'datastores', false);
|
||||||
var length = records.length;
|
var length = records.length;
|
||||||
var lookup_hash = {};
|
var lookup_hash = {};
|
||||||
for (var i = 0; i < length; i++) {
|
let j = 0;
|
||||||
|
for (let i = 0; i < length; i++) {
|
||||||
let name = records[i].id;
|
let name = records[i].id;
|
||||||
lookup_hash[name] = true;
|
lookup_hash[name] = true;
|
||||||
if (!list.findChild('text', name, false)) {
|
|
||||||
list.appendChild({
|
while (name.localeCompare(list.getChildAt(j).data.text) > 0 &&
|
||||||
|
(j + 1) < list.childNodes.length) {
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (list.getChildAt(j).data.text.localeCompare(name) !== 0) {
|
||||||
|
list.insertChild(j, {
|
||||||
text: name,
|
text: name,
|
||||||
path: `DataStore-${name}`,
|
path: `DataStore-${name}`,
|
||||||
iconCls: 'fa fa-database',
|
iconCls: 'fa fa-database',
|
||||||
@ -142,19 +157,55 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
var erase_list = [];
|
var erase_list = [];
|
||||||
list.eachChild(function(node) {
|
list.eachChild(function(node) {
|
||||||
let name = node.data.text;
|
let name = node.data.text;
|
||||||
if (!lookup_hash[name]) {
|
if (!lookup_hash[name] && node.data.id !== 'addbutton') {
|
||||||
erase_list.push(node);
|
erase_list.push(node);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Ext.Array.forEach(erase_list, function(node) { node.erase(); });
|
Ext.Array.forEach(erase_list, function(node) { list.removeChild(node, true); });
|
||||||
|
|
||||||
|
if (view.pathToSelect !== undefined) {
|
||||||
|
let path = view.pathToSelect;
|
||||||
|
delete view.pathToSelect;
|
||||||
|
view.select(path, true);
|
||||||
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
select: function(path) {
|
listeners: {
|
||||||
|
itemclick: function(tl, info) {
|
||||||
|
if (info.node.data.id === 'datastores') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (info.node.data.id === 'addbutton') {
|
||||||
|
let me = this;
|
||||||
|
Ext.create('PBS.DataStoreEdit', {
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.rstore.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
select: function(path, silent) {
|
||||||
var me = this;
|
var me = this;
|
||||||
var item = me.getStore().findRecord('path', path, 0, false, true, true);
|
if (me.rstore.isLoaded()) {
|
||||||
me.setSelection(item);
|
if (silent) {
|
||||||
|
me.suspendEvents(false);
|
||||||
|
}
|
||||||
|
var item = me.getStore().findRecord('path', path, 0, false, true, true);
|
||||||
|
me.setSelection(item);
|
||||||
|
if (silent) {
|
||||||
|
me.resumeEvents(true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
me.pathToSelect = path;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
animation: false,
|
animation: false,
|
||||||
|
@ -20,7 +20,6 @@ Ext.define('PBS.Subscription', {
|
|||||||
xtype: 'pbsSubscription',
|
xtype: 'pbsSubscription',
|
||||||
|
|
||||||
title: gettext('Subscription'),
|
title: gettext('Subscription'),
|
||||||
|
|
||||||
border: true,
|
border: true,
|
||||||
|
|
||||||
onlineHelp: 'getting_help',
|
onlineHelp: 'getting_help',
|
||||||
@ -30,24 +29,12 @@ Ext.define('PBS.Subscription', {
|
|||||||
},
|
},
|
||||||
|
|
||||||
initComponent: function() {
|
initComponent: function() {
|
||||||
var me = this;
|
let me = this;
|
||||||
|
|
||||||
var reload = function() {
|
let reload = () => me.rstore.load();
|
||||||
me.rstore.load();
|
let baseurl = '/nodes/localhost/subscription';
|
||||||
};
|
|
||||||
|
|
||||||
var baseurl = '/nodes/localhost/subscription';
|
let rows = {
|
||||||
|
|
||||||
var render_status = function(value) {
|
|
||||||
var message = me.getObjectValue('message');
|
|
||||||
|
|
||||||
if (message) {
|
|
||||||
return value + ": " + message;
|
|
||||||
}
|
|
||||||
return value;
|
|
||||||
};
|
|
||||||
|
|
||||||
var rows = {
|
|
||||||
productname: {
|
productname: {
|
||||||
header: gettext('Type'),
|
header: gettext('Type'),
|
||||||
},
|
},
|
||||||
@ -56,7 +43,14 @@ Ext.define('PBS.Subscription', {
|
|||||||
},
|
},
|
||||||
status: {
|
status: {
|
||||||
header: gettext('Status'),
|
header: gettext('Status'),
|
||||||
renderer: render_status,
|
renderer: (value) => {
|
||||||
|
value = Ext.String.capitalize(value);
|
||||||
|
let message = me.getObjectValue('message');
|
||||||
|
if (message) {
|
||||||
|
return value + ": " + message;
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
},
|
},
|
||||||
message: {
|
message: {
|
||||||
visible: false,
|
visible: false,
|
||||||
@ -64,9 +58,6 @@ Ext.define('PBS.Subscription', {
|
|||||||
serverid: {
|
serverid: {
|
||||||
header: gettext('Server ID'),
|
header: gettext('Server ID'),
|
||||||
},
|
},
|
||||||
sockets: {
|
|
||||||
header: gettext('Sockets'),
|
|
||||||
},
|
|
||||||
checktime: {
|
checktime: {
|
||||||
header: gettext('Last checked'),
|
header: gettext('Last checked'),
|
||||||
renderer: Proxmox.Utils.render_timestamp,
|
renderer: Proxmox.Utils.render_timestamp,
|
||||||
@ -77,13 +68,13 @@ Ext.define('PBS.Subscription', {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ext.apply(me, {
|
Ext.apply(me, {
|
||||||
url: '/api2/json' + baseurl,
|
url: `/api2/json${baseurl}`,
|
||||||
cwidth1: 170,
|
cwidth1: 170,
|
||||||
tbar: [
|
tbar: [
|
||||||
{
|
{
|
||||||
text: gettext('Upload Subscription Key'),
|
text: gettext('Upload Subscription Key'),
|
||||||
handler: function() {
|
handler: function() {
|
||||||
var win = Ext.create('PBS.SubscriptionKeyEdit', {
|
let win = Ext.create('PBS.SubscriptionKeyEdit', {
|
||||||
url: '/api2/extjs/' + baseurl,
|
url: '/api2/extjs/' + baseurl,
|
||||||
});
|
});
|
||||||
win.show();
|
win.show();
|
||||||
@ -105,6 +96,16 @@ Ext.define('PBS.Subscription', {
|
|||||||
});
|
});
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Remove Subscription'),
|
||||||
|
xtype: 'proxmoxStdRemoveButton',
|
||||||
|
confirmMsg: gettext('Are you sure you want to remove the subscription key?'),
|
||||||
|
baseurl: baseurl,
|
||||||
|
dangerous: true,
|
||||||
|
selModel: false,
|
||||||
|
callback: reload,
|
||||||
|
},
|
||||||
|
//'-',
|
||||||
],
|
],
|
||||||
rows: rows,
|
rows: rows,
|
||||||
});
|
});
|
||||||
|
@ -91,9 +91,12 @@ Ext.define('PBS.Utils', {
|
|||||||
Proxmox.Utils.override_task_descriptions({
|
Proxmox.Utils.override_task_descriptions({
|
||||||
garbage_collection: ['Datastore', gettext('Garbage collect')],
|
garbage_collection: ['Datastore', gettext('Garbage collect')],
|
||||||
sync: ['Datastore', gettext('Remote Sync')],
|
sync: ['Datastore', gettext('Remote Sync')],
|
||||||
|
verify: ['Datastore', gettext('Verification')],
|
||||||
|
verify_group: ['Group', gettext('Verification')],
|
||||||
|
verify_snapshot: ['Snapshot', gettext('Verification')],
|
||||||
syncjob: [gettext('Sync Job'), gettext('Remote Sync')],
|
syncjob: [gettext('Sync Job'), gettext('Remote Sync')],
|
||||||
|
verifyjob: [gettext('Verify Job'), gettext('Scheduled Verification')],
|
||||||
prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')),
|
prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')),
|
||||||
verify: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Verify')),
|
|
||||||
backup: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Backup')),
|
backup: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Backup')),
|
||||||
reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read objects')),
|
reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read objects')),
|
||||||
logrotate: [gettext('Log'), gettext('Rotation')],
|
logrotate: [gettext('Log'), gettext('Rotation')],
|
||||||
|
@ -1,227 +0,0 @@
|
|||||||
Ext.define('pbs-datastore-list', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: ['name', 'comment'],
|
|
||||||
proxy: {
|
|
||||||
type: 'proxmox',
|
|
||||||
url: "/api2/json/admin/datastore",
|
|
||||||
},
|
|
||||||
idProperty: 'store',
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.define('pbs-data-store-config', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'name', 'path', 'comment', 'gc-schedule', 'prune-schedule',
|
|
||||||
'verify-schedule', 'keep-last', 'keep-hourly', 'keep-daily',
|
|
||||||
'keep-weekly', 'keep-monthly', 'keep-yearly',
|
|
||||||
],
|
|
||||||
proxy: {
|
|
||||||
type: 'proxmox',
|
|
||||||
url: "/api2/json/config/datastore",
|
|
||||||
},
|
|
||||||
idProperty: 'name',
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.define('PBS.DataStoreConfig', {
|
|
||||||
extend: 'Ext.grid.GridPanel',
|
|
||||||
alias: 'widget.pbsDataStoreConfig',
|
|
||||||
|
|
||||||
title: gettext('Datastore Configuration'),
|
|
||||||
|
|
||||||
controller: {
|
|
||||||
xclass: 'Ext.app.ViewController',
|
|
||||||
|
|
||||||
createDataStore: function() {
|
|
||||||
let me = this;
|
|
||||||
Ext.create('PBS.DataStoreEdit', {
|
|
||||||
listeners: {
|
|
||||||
destroy: function() {
|
|
||||||
me.reload();
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}).show();
|
|
||||||
},
|
|
||||||
|
|
||||||
editDataStore: function() {
|
|
||||||
let me = this;
|
|
||||||
let view = me.getView();
|
|
||||||
let selection = view.getSelection();
|
|
||||||
if (selection.length < 1) return;
|
|
||||||
|
|
||||||
let name = encodeURIComponent(selection[0].data.name);
|
|
||||||
Ext.create('PBS.DataStoreEdit', {
|
|
||||||
name: name,
|
|
||||||
listeners: {
|
|
||||||
destroy: function() {
|
|
||||||
me.reload();
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}).show();
|
|
||||||
},
|
|
||||||
|
|
||||||
onVerify: function() {
|
|
||||||
var view = this.getView();
|
|
||||||
|
|
||||||
let rec = view.selModel.getSelection()[0];
|
|
||||||
if (!(rec && rec.data)) return;
|
|
||||||
let data = rec.data;
|
|
||||||
|
|
||||||
Proxmox.Utils.API2Request({
|
|
||||||
url: `/admin/datastore/${data.name}/verify`,
|
|
||||||
method: 'POST',
|
|
||||||
failure: function(response) {
|
|
||||||
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
|
||||||
},
|
|
||||||
success: function(response, options) {
|
|
||||||
Ext.create('Proxmox.window.TaskViewer', {
|
|
||||||
upid: response.result.data,
|
|
||||||
}).show();
|
|
||||||
},
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
garbageCollect: function() {
|
|
||||||
let me = this;
|
|
||||||
let view = me.getView();
|
|
||||||
let selection = view.getSelection();
|
|
||||||
if (selection.length < 1) return;
|
|
||||||
|
|
||||||
let name = encodeURIComponent(selection[0].data.name);
|
|
||||||
Proxmox.Utils.API2Request({
|
|
||||||
url: `/admin/datastore/${name}/gc`,
|
|
||||||
method: 'POST',
|
|
||||||
failure: function(response) {
|
|
||||||
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
|
||||||
},
|
|
||||||
success: function(response, options) {
|
|
||||||
Ext.create('Proxmox.window.TaskViewer', {
|
|
||||||
upid: response.result.data,
|
|
||||||
}).show();
|
|
||||||
},
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
reload: function() { this.getView().getStore().rstore.load(); },
|
|
||||||
|
|
||||||
init: function(view) {
|
|
||||||
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
store: {
|
|
||||||
type: 'diff',
|
|
||||||
autoDestroy: true,
|
|
||||||
autoDestroyRstore: true,
|
|
||||||
sorters: 'name',
|
|
||||||
rstore: {
|
|
||||||
type: 'update',
|
|
||||||
storeid: 'pbs-data-store-config',
|
|
||||||
model: 'pbs-data-store-config',
|
|
||||||
autoStart: true,
|
|
||||||
interval: 10000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
tbar: [
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxButton',
|
|
||||||
selModel: false,
|
|
||||||
text: gettext('Create'),
|
|
||||||
handler: 'createDataStore',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxButton',
|
|
||||||
text: gettext('Edit'),
|
|
||||||
disabled: true,
|
|
||||||
handler: 'editDataStore',
|
|
||||||
},
|
|
||||||
// remove_btn
|
|
||||||
'-',
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxButton',
|
|
||||||
text: gettext('Verify'),
|
|
||||||
disabled: true,
|
|
||||||
handler: 'onVerify',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxButton',
|
|
||||||
text: gettext('Start GC'),
|
|
||||||
disabled: true,
|
|
||||||
handler: 'garbageCollect',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
header: gettext('Name'),
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'name',
|
|
||||||
flex: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext('Path'),
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'path',
|
|
||||||
flex: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext('GC Schedule'),
|
|
||||||
sortable: false,
|
|
||||||
width: 120,
|
|
||||||
dataIndex: 'gc-schedule',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext('Prune Schedule'),
|
|
||||||
sortable: false,
|
|
||||||
width: 120,
|
|
||||||
dataIndex: 'prune-schedule',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext('Keep'),
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
text: gettext('Last'),
|
|
||||||
dataIndex: 'keep-last',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: gettext('Hourly'),
|
|
||||||
dataIndex: 'keep-hourly',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: gettext('Daily'),
|
|
||||||
dataIndex: 'keep-daily',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: gettext('Weekly'),
|
|
||||||
dataIndex: 'keep-weekly',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: gettext('Monthly'),
|
|
||||||
dataIndex: 'keep-monthly',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: gettext('Yearly'),
|
|
||||||
dataIndex: 'keep-yearly',
|
|
||||||
width: 70,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext('Comment'),
|
|
||||||
sortable: false,
|
|
||||||
dataIndex: 'comment',
|
|
||||||
renderer: Ext.String.htmlEncode,
|
|
||||||
flex: 2,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
listeners: {
|
|
||||||
activate: 'reload',
|
|
||||||
itemdblclick: 'editDataStore',
|
|
||||||
},
|
|
||||||
});
|
|
@ -12,6 +12,7 @@ Ext.define('pbs-sync-jobs-status', {
|
|||||||
return endtime - task.starttime;
|
return endtime - task.starttime;
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
'comment',
|
||||||
],
|
],
|
||||||
idProperty: 'id',
|
idProperty: 'id',
|
||||||
proxy: {
|
proxy: {
|
||||||
@ -34,7 +35,9 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
|
|
||||||
addSyncJob: function() {
|
addSyncJob: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
Ext.create('PBS.window.SyncJobEdit', {
|
Ext.create('PBS.window.SyncJobEdit', {
|
||||||
|
datastore: view.datastore,
|
||||||
listeners: {
|
listeners: {
|
||||||
destroy: function() {
|
destroy: function() {
|
||||||
me.reload();
|
me.reload();
|
||||||
@ -50,6 +53,7 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
if (selection.length < 1) return;
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
Ext.create('PBS.window.SyncJobEdit', {
|
Ext.create('PBS.window.SyncJobEdit', {
|
||||||
|
datastore: view.datastore,
|
||||||
id: selection[0].data.id,
|
id: selection[0].data.id,
|
||||||
listeners: {
|
listeners: {
|
||||||
destroy: function() {
|
destroy: function() {
|
||||||
@ -147,15 +151,22 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
return Proxmox.Utils.render_timestamp(value);
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
},
|
},
|
||||||
|
|
||||||
|
startStore: function() { this.getView().getStore().rstore.startUpdate(); },
|
||||||
|
stopStore: function() { this.getView().getStore().rstore.stopUpdate(); },
|
||||||
|
|
||||||
reload: function() { this.getView().getStore().rstore.load(); },
|
reload: function() { this.getView().getStore().rstore.load(); },
|
||||||
|
|
||||||
init: function(view) {
|
init: function(view) {
|
||||||
|
view.getStore().rstore.getProxy().setExtraParams({
|
||||||
|
store: view.datastore,
|
||||||
|
});
|
||||||
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
listeners: {
|
listeners: {
|
||||||
activate: 'reload',
|
activate: 'startStore',
|
||||||
|
deactivate: 'stopStore',
|
||||||
itemdblclick: 'editSyncJob',
|
itemdblclick: 'editSyncJob',
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -168,7 +179,6 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
type: 'update',
|
type: 'update',
|
||||||
storeid: 'pbs-sync-jobs-status',
|
storeid: 'pbs-sync-jobs-status',
|
||||||
model: 'pbs-sync-jobs-status',
|
model: 'pbs-sync-jobs-status',
|
||||||
autoStart: true,
|
|
||||||
interval: 5000,
|
interval: 5000,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -194,7 +204,7 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
'-',
|
'-',
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Log'),
|
text: gettext('Show Log'),
|
||||||
handler: 'openTaskLog',
|
handler: 'openTaskLog',
|
||||||
enableFn: (rec) => !!rec.data['last-run-upid'],
|
enableFn: (rec) => !!rec.data['last-run-upid'],
|
||||||
disabled: true,
|
disabled: true,
|
||||||
@ -214,66 +224,68 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
columns: [
|
columns: [
|
||||||
{
|
{
|
||||||
header: gettext('Sync Job'),
|
header: gettext('Sync Job'),
|
||||||
width: 100,
|
|
||||||
sortable: true,
|
|
||||||
renderer: Ext.String.htmlEncode,
|
|
||||||
dataIndex: 'id',
|
dataIndex: 'id',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
|
hidden: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Remote'),
|
header: gettext('Remote'),
|
||||||
width: 100,
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'remote',
|
dataIndex: 'remote',
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Remote Store'),
|
header: gettext('Remote Store'),
|
||||||
width: 100,
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'remote-store',
|
dataIndex: 'remote-store',
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Local Store'),
|
header: gettext('Local Store'),
|
||||||
width: 100,
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'store',
|
dataIndex: 'store',
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Schedule'),
|
header: gettext('Schedule'),
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'schedule',
|
dataIndex: 'schedule',
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Status'),
|
header: gettext('Status'),
|
||||||
dataIndex: 'last-run-state',
|
dataIndex: 'last-run-state',
|
||||||
flex: 1,
|
|
||||||
renderer: 'render_sync_status',
|
renderer: 'render_sync_status',
|
||||||
|
flex: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Last Sync'),
|
header: gettext('Last Sync'),
|
||||||
sortable: true,
|
|
||||||
minWidth: 200,
|
|
||||||
renderer: 'render_optional_timestamp',
|
|
||||||
dataIndex: 'last-run-endtime',
|
dataIndex: 'last-run-endtime',
|
||||||
|
renderer: 'render_optional_timestamp',
|
||||||
|
flex: 3,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Duration'),
|
text: gettext('Duration'),
|
||||||
dataIndex: 'duration',
|
dataIndex: 'duration',
|
||||||
width: 60,
|
|
||||||
renderer: Proxmox.Utils.render_duration,
|
renderer: Proxmox.Utils.render_duration,
|
||||||
|
flex: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Next Run'),
|
header: gettext('Next Run'),
|
||||||
sortable: true,
|
|
||||||
minWidth: 200,
|
|
||||||
renderer: 'render_next_run',
|
|
||||||
dataIndex: 'next-run',
|
dataIndex: 'next-run',
|
||||||
|
renderer: 'render_next_run',
|
||||||
|
flex: 3,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('Comment'),
|
header: gettext('Comment'),
|
||||||
hidden: true,
|
|
||||||
sortable: true,
|
|
||||||
renderer: Ext.String.htmlEncode,
|
|
||||||
dataIndex: 'comment',
|
dataIndex: 'comment',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 4,
|
||||||
|
sortable: true,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
});
|
});
|
||||||
|
287
www/config/VerifyView.js
Normal file
287
www/config/VerifyView.js
Normal file
@ -0,0 +1,287 @@
|
|||||||
|
Ext.define('pbs-verify-jobs-status', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
'id', 'store', 'outdated-after', 'ignore-verified', 'schedule',
|
||||||
|
'next-run', 'last-run-upid', 'last-run-state', 'last-run-endtime',
|
||||||
|
{
|
||||||
|
name: 'duration',
|
||||||
|
calculate: function(data) {
|
||||||
|
let endtime = data['last-run-endtime'];
|
||||||
|
if (!endtime) return undefined;
|
||||||
|
let task = Proxmox.Utils.parse_task_upid(data['last-run-upid']);
|
||||||
|
return endtime - task.starttime;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'comment',
|
||||||
|
],
|
||||||
|
idProperty: 'id',
|
||||||
|
proxy: {
|
||||||
|
type: 'proxmox',
|
||||||
|
url: '/api2/json/admin/verify',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.config.VerifyJobView', {
|
||||||
|
extend: 'Ext.grid.GridPanel',
|
||||||
|
alias: 'widget.pbsVerifyJobView',
|
||||||
|
|
||||||
|
stateful: true,
|
||||||
|
stateId: 'grid-verify-jobs',
|
||||||
|
|
||||||
|
title: gettext('Verify Jobs'),
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
addVerifyJob: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
Ext.create('PBS.window.VerifyJobEdit', {
|
||||||
|
datastore: view.datastore,
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
editVerifyJob: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
Ext.create('PBS.window.VerifyJobEdit', {
|
||||||
|
datastore: view.datastore,
|
||||||
|
id: selection[0].data.id,
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
openTaskLog: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
let upid = selection[0].data['last-run-upid'];
|
||||||
|
if (!upid) return;
|
||||||
|
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
runVerifyJob: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
let id = selection[0].data.id;
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
method: 'POST',
|
||||||
|
url: `/admin/verify/${id}/run`,
|
||||||
|
success: function(response, opt) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
taskDone: function(success) {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
failure: function(response, opt) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
render_verify_status: function(value, metadata, record) {
|
||||||
|
if (!record.data['last-run-upid']) {
|
||||||
|
return '-';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!record.data['last-run-endtime']) {
|
||||||
|
metadata.tdCls = 'x-grid-row-loading';
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed = Proxmox.Utils.parse_task_status(value);
|
||||||
|
let text = value;
|
||||||
|
let icon = '';
|
||||||
|
switch (parsed) {
|
||||||
|
case 'unknown':
|
||||||
|
icon = 'question faded';
|
||||||
|
text = Proxmox.Utils.unknownText;
|
||||||
|
break;
|
||||||
|
case 'error':
|
||||||
|
icon = 'times critical';
|
||||||
|
text = Proxmox.Utils.errorText + ': ' + value;
|
||||||
|
break;
|
||||||
|
case 'warning':
|
||||||
|
icon = 'exclamation warning';
|
||||||
|
break;
|
||||||
|
case 'ok':
|
||||||
|
icon = 'check good';
|
||||||
|
text = gettext("OK");
|
||||||
|
}
|
||||||
|
|
||||||
|
return `<i class="fa fa-${icon}"></i> ${text}`;
|
||||||
|
},
|
||||||
|
|
||||||
|
render_next_run: function(value, metadat, record) {
|
||||||
|
if (!value) return '-';
|
||||||
|
|
||||||
|
let now = new Date();
|
||||||
|
let next = new Date(value*1000);
|
||||||
|
|
||||||
|
if (next < now) {
|
||||||
|
return gettext('pending');
|
||||||
|
}
|
||||||
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
|
},
|
||||||
|
|
||||||
|
render_optional_timestamp: function(value, metadata, record) {
|
||||||
|
if (!value) return '-';
|
||||||
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
|
},
|
||||||
|
|
||||||
|
startStore: function() { this.getView().getStore().rstore.startUpdate(); },
|
||||||
|
stopStore: function() { this.getView().getStore().rstore.stopUpdate(); },
|
||||||
|
|
||||||
|
reload: function() { this.getView().getStore().rstore.load(); },
|
||||||
|
|
||||||
|
init: function(view) {
|
||||||
|
view.getStore().rstore.getProxy().setExtraParams({
|
||||||
|
store: view.datastore,
|
||||||
|
});
|
||||||
|
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
activate: 'startStore',
|
||||||
|
deactivate: 'stopStore',
|
||||||
|
itemdblclick: 'editVerifyJob',
|
||||||
|
},
|
||||||
|
|
||||||
|
store: {
|
||||||
|
type: 'diff',
|
||||||
|
autoDestroy: true,
|
||||||
|
autoDestroyRstore: true,
|
||||||
|
sorters: 'id',
|
||||||
|
rstore: {
|
||||||
|
type: 'update',
|
||||||
|
storeid: 'pbs-verify-jobs-status',
|
||||||
|
model: 'pbs-verify-jobs-status',
|
||||||
|
interval: 5000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
tbar: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Add'),
|
||||||
|
handler: 'addVerifyJob',
|
||||||
|
selModel: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Edit'),
|
||||||
|
handler: 'editVerifyJob',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxStdRemoveButton',
|
||||||
|
baseurl: '/config/verify/',
|
||||||
|
callback: 'reload',
|
||||||
|
},
|
||||||
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Show Log'),
|
||||||
|
handler: 'openTaskLog',
|
||||||
|
enableFn: (rec) => !!rec.data['last-run-upid'],
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Run now'),
|
||||||
|
handler: 'runVerifyJob',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
viewConfig: {
|
||||||
|
trackOver: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: gettext('Job ID'),
|
||||||
|
dataIndex: 'id',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
|
hidden: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Skip Verified'),
|
||||||
|
dataIndex: 'ignore-verified',
|
||||||
|
renderer: Proxmox.Utils.format_boolean,
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Re-Verfiy Age'),
|
||||||
|
dataIndex: 'outdated-after',
|
||||||
|
renderer: v => v ? v +' '+ gettext('Days') : gettext('Never'),
|
||||||
|
flex: 2,
|
||||||
|
sortable: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Schedule'),
|
||||||
|
dataIndex: 'schedule',
|
||||||
|
sortable: true,
|
||||||
|
flex: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Status'),
|
||||||
|
dataIndex: 'last-run-state',
|
||||||
|
renderer: 'render_verify_status',
|
||||||
|
flex: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Last Verification'),
|
||||||
|
dataIndex: 'last-run-endtime',
|
||||||
|
renderer: 'render_optional_timestamp',
|
||||||
|
flex: 3,
|
||||||
|
sortable: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Duration'),
|
||||||
|
dataIndex: 'duration',
|
||||||
|
renderer: Proxmox.Utils.render_duration,
|
||||||
|
flex: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Next Run'),
|
||||||
|
dataIndex: 'next-run',
|
||||||
|
renderer: 'render_next_run',
|
||||||
|
flex: 3,
|
||||||
|
sortable: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Comment'),
|
||||||
|
dataIndex: 'comment',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 4,
|
||||||
|
sortable: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
@ -213,6 +213,13 @@ p.logs {
|
|||||||
cursor: default;
|
cursor: default;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
span.snapshot-comment-column {
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
overflow: hidden;
|
||||||
|
display: inline-block;
|
||||||
|
width: calc(100% - 18px);
|
||||||
|
}
|
||||||
|
|
||||||
.x-action-col-icon.good:before {
|
.x-action-col-icon.good:before {
|
||||||
color: #21BF4B;
|
color: #21BF4B;
|
||||||
}
|
}
|
||||||
@ -224,3 +231,9 @@ p.logs {
|
|||||||
.x-action-col-icon.critical:before {
|
.x-action-col-icon.critical:before {
|
||||||
color: #FF6C59;
|
color: #FF6C59;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.pve-icon-verify-lettering:after {
|
||||||
|
color: #464d4d;
|
||||||
|
font-weight: bold;
|
||||||
|
content: "V.";
|
||||||
|
}
|
||||||
|
@ -1,3 +1,81 @@
|
|||||||
|
Ext.define('PBS.panel.PruneInputPanel', {
|
||||||
|
extend: 'Proxmox.panel.InputPanel',
|
||||||
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
cbindData: function() {
|
||||||
|
let me = this;
|
||||||
|
me.isCreate = !!me.isCreate;
|
||||||
|
return {};
|
||||||
|
},
|
||||||
|
|
||||||
|
column1: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Last'),
|
||||||
|
name: 'keep-last',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Daily'),
|
||||||
|
name: 'keep-daily',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Monthly'),
|
||||||
|
name: 'keep-monthly',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
column2: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Hourly'),
|
||||||
|
name: 'keep-hourly',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Weekly'),
|
||||||
|
name: 'keep-weekly',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('Keep Yearly'),
|
||||||
|
name: 'keep-yearly',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
minValue: 1,
|
||||||
|
allowBlank: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
});
|
||||||
Ext.define('PBS.DataStoreEdit', {
|
Ext.define('PBS.DataStoreEdit', {
|
||||||
extend: 'Proxmox.window.Edit',
|
extend: 'Proxmox.window.Edit',
|
||||||
alias: 'widget.pbsDataStoreEdit',
|
alias: 'widget.pbsDataStoreEdit',
|
||||||
@ -77,15 +155,6 @@ Ext.define('PBS.DataStoreEdit', {
|
|||||||
deleteEmpty: '{!isCreate}',
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
xtype: 'pbsCalendarEvent',
|
|
||||||
name: 'verify-schedule',
|
|
||||||
fieldLabel: gettext("Verify Schedule"),
|
|
||||||
emptyText: gettext('none'),
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
],
|
||||||
columnB: [
|
columnB: [
|
||||||
{
|
{
|
||||||
@ -97,72 +166,11 @@ Ext.define('PBS.DataStoreEdit', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: gettext('Prune Options'),
|
title: gettext('Prune Options'),
|
||||||
xtype: 'inputpanel',
|
xtype: 'pbsPruneInputPanel',
|
||||||
|
cbind: {
|
||||||
|
isCreate: '{isCreate}',
|
||||||
|
},
|
||||||
onlineHelp: 'backup_pruning',
|
onlineHelp: 'backup_pruning',
|
||||||
column1: [
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Last'),
|
|
||||||
name: 'keep-last',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Daily'),
|
|
||||||
name: 'keep-daily',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Monthly'),
|
|
||||||
name: 'keep-monthly',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
column2: [
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Hourly'),
|
|
||||||
name: 'keep-hourly',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Weekly'),
|
|
||||||
name: 'keep-weekly',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'proxmoxintegerfield',
|
|
||||||
fieldLabel: gettext('Keep Yearly'),
|
|
||||||
name: 'keep-yearly',
|
|
||||||
cbind: {
|
|
||||||
deleteEmpty: '{!isCreate}',
|
|
||||||
},
|
|
||||||
minValue: 1,
|
|
||||||
allowBlank: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
|
@ -87,6 +87,9 @@ Ext.define("PBS.window.FileBrowser", {
|
|||||||
};
|
};
|
||||||
params.filepath = data.filepath;
|
params.filepath = data.filepath;
|
||||||
atag.download = data.text;
|
atag.download = data.text;
|
||||||
|
if (data.type === 'd') {
|
||||||
|
atag.download += ".zip";
|
||||||
|
}
|
||||||
atag.href = me
|
atag.href = me
|
||||||
.buildUrl(`/api2/json/admin/datastore/${view.datastore}/pxar-file-download`, params);
|
.buildUrl(`/api2/json/admin/datastore/${view.datastore}/pxar-file-download`, params);
|
||||||
atag.click();
|
atag.click();
|
||||||
@ -106,6 +109,11 @@ Ext.define("PBS.window.FileBrowser", {
|
|||||||
case 'f':
|
case 'f':
|
||||||
canDownload = true;
|
canDownload = true;
|
||||||
break;
|
break;
|
||||||
|
case 'd':
|
||||||
|
if (data.depth > 1) {
|
||||||
|
canDownload = true;
|
||||||
|
}
|
||||||
|
break;
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
38
www/window/NotesEdit.js
Normal file
38
www/window/NotesEdit.js
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
Ext.define('PBS.window.NotesEdit', {
|
||||||
|
extend: 'Proxmox.window.Edit',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
title: gettext('Notes'),
|
||||||
|
|
||||||
|
width: 600,
|
||||||
|
height: '400px',
|
||||||
|
resizable: true,
|
||||||
|
layout: 'fit',
|
||||||
|
|
||||||
|
autoLoad: true,
|
||||||
|
|
||||||
|
defaultButton: undefined,
|
||||||
|
|
||||||
|
notesFieldName: 'notes',
|
||||||
|
|
||||||
|
setValues: function(values) {
|
||||||
|
let me = this;
|
||||||
|
if (typeof values === "string") {
|
||||||
|
let v = values;
|
||||||
|
values = {};
|
||||||
|
values[me.notesFieldName] = v;
|
||||||
|
}
|
||||||
|
me.callParent([values]);
|
||||||
|
},
|
||||||
|
|
||||||
|
items: {
|
||||||
|
xtype: 'textarea',
|
||||||
|
name: 'notes',
|
||||||
|
cbind: {
|
||||||
|
name: '{notesFieldName}',
|
||||||
|
},
|
||||||
|
height: '100%',
|
||||||
|
value: '',
|
||||||
|
hideLabel: true,
|
||||||
|
},
|
||||||
|
});
|
@ -28,16 +28,23 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
|
|
||||||
items: {
|
items: {
|
||||||
xtype: 'inputpanel',
|
xtype: 'inputpanel',
|
||||||
|
onGetValues: function(values) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (!values.id && me.up('pbsSyncJobEdit').isCreate) {
|
||||||
|
values.id = 'auto-' + Ext.data.identifier.Uuid.Global.generate().slice(0, 23);
|
||||||
|
}
|
||||||
|
return values;
|
||||||
|
},
|
||||||
column1: [
|
column1: [
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Sync Job ID'),
|
xtype: 'displayfield',
|
||||||
xtype: 'pmxDisplayEditField',
|
name: 'store',
|
||||||
name: 'id',
|
fieldLabel: gettext('Local Datastore'),
|
||||||
renderer: Ext.htmlEncode,
|
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
minLength: 4,
|
submitValue: true,
|
||||||
cbind: {
|
cbind: {
|
||||||
editable: '{isCreate}',
|
value: '{datastore}',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -52,11 +59,19 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
name: 'remote-store',
|
name: 'remote-store',
|
||||||
},
|
},
|
||||||
|
],
|
||||||
|
advancedColumn1: [
|
||||||
{
|
{
|
||||||
fieldLabel: gettext('Local Datastore'),
|
xtype: 'pmxDisplayEditField',
|
||||||
xtype: 'pbsDataStoreSelector',
|
name: 'id',
|
||||||
allowBlank: false,
|
fieldLabel: gettext('Sync Job ID'),
|
||||||
name: 'store',
|
emptyText: gettext('Automatic'),
|
||||||
|
renderer: Ext.htmlEncode,
|
||||||
|
allowBlank: true,
|
||||||
|
minLength: 4,
|
||||||
|
cbind: {
|
||||||
|
editable: '{isCreate}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
@ -72,7 +87,8 @@ Ext.define('PBS.window.SyncJobEdit', {
|
|||||||
fieldLabel: gettext('Schedule'),
|
fieldLabel: gettext('Schedule'),
|
||||||
xtype: 'pbsCalendarEvent',
|
xtype: 'pbsCalendarEvent',
|
||||||
name: 'schedule',
|
name: 'schedule',
|
||||||
emptyText: gettext('none'),
|
value: 'hourly',
|
||||||
|
emptyText: gettext('none (disabled)'),
|
||||||
cbind: {
|
cbind: {
|
||||||
deleteEmpty: '{!isCreate}',
|
deleteEmpty: '{!isCreate}',
|
||||||
},
|
},
|
||||||
|
124
www/window/VerifyJobEdit.js
Normal file
124
www/window/VerifyJobEdit.js
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
Ext.define('PBS.window.VerifyJobEdit', {
|
||||||
|
extend: 'Proxmox.window.Edit',
|
||||||
|
alias: 'widget.pbsVerifyJobEdit',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
userid: undefined,
|
||||||
|
|
||||||
|
onlineHelp: 'verifyjobs',
|
||||||
|
|
||||||
|
isAdd: true,
|
||||||
|
|
||||||
|
subject: gettext('VerifyJob'),
|
||||||
|
|
||||||
|
fieldDefaults: { labelWidth: 120 },
|
||||||
|
defaultFocus: 'field[name="ignore-verified"]',
|
||||||
|
|
||||||
|
cbindData: function(initialConfig) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let baseurl = '/api2/extjs/config/verify';
|
||||||
|
let id = initialConfig.id;
|
||||||
|
|
||||||
|
me.isCreate = !id;
|
||||||
|
me.url = id ? `${baseurl}/${id}` : baseurl;
|
||||||
|
me.method = id ? 'PUT' : 'POST';
|
||||||
|
me.autoLoad = !!id;
|
||||||
|
return { };
|
||||||
|
},
|
||||||
|
|
||||||
|
viewModel: {
|
||||||
|
data: {
|
||||||
|
'ignore-verified': true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: {
|
||||||
|
xtype: 'inputpanel',
|
||||||
|
onGetValues: function(values) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (!values.id && me.up('pbsVerifyJobEdit').isCreate) {
|
||||||
|
values.id = 'auto-' + Ext.data.identifier.Uuid.Global.generate().slice(0, 23);
|
||||||
|
}
|
||||||
|
return values;
|
||||||
|
},
|
||||||
|
column1: [
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
name: 'store',
|
||||||
|
fieldLabel: gettext('Datastore'),
|
||||||
|
allowBlank: false,
|
||||||
|
submitValue: true,
|
||||||
|
cbind: {
|
||||||
|
value: '{datastore}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pbsCalendarEvent',
|
||||||
|
name: 'schedule',
|
||||||
|
fieldLabel: gettext('Schedule'),
|
||||||
|
emptyText: gettext('none (disabled)'),
|
||||||
|
value: 'daily',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
advancedColumn1: [
|
||||||
|
{
|
||||||
|
xtype: 'pmxDisplayEditField',
|
||||||
|
name: 'id',
|
||||||
|
fieldLabel: gettext('Verify Job ID'),
|
||||||
|
emptyText: gettext('Automatic'),
|
||||||
|
renderer: Ext.htmlEncode,
|
||||||
|
allowBlank: true,
|
||||||
|
minLength: 4,
|
||||||
|
cbind: {
|
||||||
|
editable: '{isCreate}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
column2: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxcheckbox',
|
||||||
|
name: 'ignore-verified',
|
||||||
|
fieldLabel: gettext('Skip verified snapshots'),
|
||||||
|
labelWidth: 150,
|
||||||
|
uncheckedValue: false,
|
||||||
|
value: true,
|
||||||
|
bind: {
|
||||||
|
value: '{ignore-verified}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
name: 'outdated-after',
|
||||||
|
fieldLabel: gettext('Re-Verify After (days)'),
|
||||||
|
labelWidth: 150,
|
||||||
|
minValue: 1,
|
||||||
|
value: 30,
|
||||||
|
allowBlank: true,
|
||||||
|
emptyText: gettext('Never'),
|
||||||
|
bind: {
|
||||||
|
disabled: '{!ignore-verified}',
|
||||||
|
},
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
columnB: [
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Comment'),
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
name: 'comment',
|
||||||
|
cbind: {
|
||||||
|
deleteEmpty: '{!isCreate}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
Reference in New Issue
Block a user