Compare commits
172 Commits
Author | SHA1 | Date | |
---|---|---|---|
c9299e76fc | |||
2f1a46f748 | |||
2b38dfb456 | |||
f487a622ce | |||
906ef6c5bd | |||
ea1853a17b | |||
221177ba41 | |||
184a37635b | |||
b2da7fbd1c | |||
7fe76d3491 | |||
e6b5bf69a3 | |||
4615325f9e | |||
2156dec5a9 | |||
16245d540c | |||
bff8557298 | |||
34aa8e13b6 | |||
babab85b56 | |||
6746bbb1a2 | |||
942078c40b | |||
c30816c1f8 | |||
e6dc35acb8 | |||
e10c5c74f6 | |||
f8adf8f83f | |||
e0538349e2 | |||
0903403ce7 | |||
b6563f48ad | |||
932390bd46 | |||
6b7688aa98 | |||
ab0cf7e6a1 | |||
264779e704 | |||
7f3d91003c | |||
14e0862509 | |||
9e733dae48 | |||
bfea476be2 | |||
385cf2bd9d | |||
d6373f3525 | |||
01f37e01c3 | |||
b4fb262335 | |||
5499bd3dee | |||
d771a608f5 | |||
227a39b34b | |||
f9beae9cc9 | |||
4430f199c4 | |||
eef18365e8 | |||
319fe45261 | |||
f26080fab1 | |||
0cbdeed96b | |||
8b4f4d9ee4 | |||
b9cc905761 | |||
c9725bb829 | |||
40492a562f | |||
db67e4fe06 | |||
b4b14dc16e | |||
c4a45ec744 | |||
5428f5ca29 | |||
328df3b507 | |||
a4915dfc2b | |||
d642802d8c | |||
a20fcab060 | |||
b9e7bcc272 | |||
acc3d9df5a | |||
1298618a83 | |||
a12388d177 | |||
1f092c7802 | |||
cd82870015 | |||
8d6b6a045f | |||
1dceaed1e9 | |||
2565fdd075 | |||
7ece65a01e | |||
028d0a1352 | |||
68931742cb | |||
3ea148598a | |||
cd92fd7336 | |||
d58e6313e1 | |||
16f9f244cf | |||
b683fd589c | |||
a2285525be | |||
f23497b088 | |||
b57b3c9bfc | |||
d3444c0891 | |||
d28e688666 | |||
72c0e102ff | |||
7b22fb257f | |||
2e201e7da6 | |||
ee89416319 | |||
2357744bd0 | |||
52fe9e8ece | |||
eed1bae554 | |||
6eb41487ce | |||
9e61c01ce4 | |||
91c9b42da3 | |||
52d2ae48f0 | |||
1872050564 | |||
efeb92efee | |||
4ebda996e5 | |||
5eb9dd0c8a | |||
12bcbf0734 | |||
dc2876f6bb | |||
bdc208af48 | |||
2ef1b6290f | |||
df0bdf6be7 | |||
8b47a23002 | |||
29615fe838 | |||
133042b5d8 | |||
73df9c515b | |||
8d1beca7e8 | |||
9b2bad7af0 | |||
78efafc2d0 | |||
2d3d91b1db | |||
030c5c6d8a | |||
53a561a222 | |||
e832860a3c | |||
804f61432d | |||
943479f5f6 | |||
fdce52aa99 | |||
4e32d1c590 | |||
afef7f3bba | |||
b428af9781 | |||
c8774067ee | |||
23440482d4 | |||
6f757b8458 | |||
95ade8fdb5 | |||
9e870b5f39 | |||
7827e3b93e | |||
e6ca9c3235 | |||
0698f78df5 | |||
bcc2880461 | |||
115d927c15 | |||
df729017b4 | |||
455f2ad228 | |||
e4f5f59eea | |||
16cdb9563b | |||
02479720c0 | |||
97168f920e | |||
9809772b23 | |||
4940012d0d | |||
0c2f9621d5 | |||
e7372972b5 | |||
e5adbc3419 | |||
41255b4d95 | |||
0c4c6a7b1c | |||
c7e18ba08a | |||
bb14d46796 | |||
e6475b09e0 | |||
d39d095fa4 | |||
86f3c2363c | |||
8e7e2223d8 | |||
081c37cccf | |||
c0df91f8bd | |||
400c568f8e | |||
4703ba81ce | |||
29633e2fe9 | |||
b64e9a97f3 | |||
254b1f2213 | |||
1a374fcfd6 | |||
e07620028d | |||
b947b1e7ee | |||
1e80fb8e92 | |||
8d841f81ee | |||
d9f365d79f | |||
32a4695c46 | |||
2081327428 | |||
4c0ae82e23 | |||
883aa6d5a4 | |||
bfa54f2e85 | |||
238a872d1f | |||
7d6c4c39e9 | |||
f153930066 | |||
836c4a278d | |||
6cd8496008 | |||
61c6eafc08 | |||
8db1468952 |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.9.1"
|
||||
version = "0.9.4"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -29,7 +29,7 @@ hyper = "0.13.6"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
nix = "0.16"
|
||||
nix = "0.19"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
@ -38,7 +38,7 @@ pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.4.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.6.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
|
116
debian/changelog
vendored
116
debian/changelog
vendored
@ -1,3 +1,115 @@
|
||||
rust-proxmox-backup (0.9.4-1) unstable; urgency=medium
|
||||
|
||||
* implement API-token
|
||||
|
||||
* client/remote: allow using API-token + secret
|
||||
|
||||
* ui/cli: implement API-token management interface and commands
|
||||
|
||||
* ui: add widget to view the effective permissions of a user or token
|
||||
|
||||
* ui: datastore summary: handle error when havin zero snapshot of any type
|
||||
|
||||
* ui: move user, token and permissions into an access control tab panel
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 17:19:13 +0100
|
||||
|
||||
rust-proxmox-backup (0.9.3-1) unstable; urgency=medium
|
||||
|
||||
* fix #2998: encode mtime as i64 instead of u64
|
||||
|
||||
* GC: log the number of leftover bad chunks we could not yet cleanup, as no
|
||||
valid one replaced them. Also log deduplication factor.
|
||||
|
||||
* send sync job status emails
|
||||
|
||||
* api: datstore status: introduce proper structs and restore compatibility
|
||||
to 0.9.1
|
||||
|
||||
* ui: drop id field from verify/sync add window, they are now seen as internal
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 14:58:13 +0100
|
||||
|
||||
rust-proxmox-backup (0.9.2-2) unstable; urgency=medium
|
||||
|
||||
* rework server web-interface, move more datastore related panels as tabs
|
||||
inside the datastore view
|
||||
|
||||
* prune: never fail, just warn about failed removals
|
||||
|
||||
* prune/forget: skip snapshots with open readers (restore, verification)
|
||||
|
||||
* datastore: always ensure to remove individual snapshots before their group
|
||||
|
||||
* pxar: fix relative '!' rules in .pxarexclude
|
||||
|
||||
* pxar: anchor pxarexcludes starting with a slash
|
||||
|
||||
* GC: mark phase: ignore vanished index files
|
||||
|
||||
* server/rest: forward real client IP on proxied request and log it in
|
||||
failed authentication requests
|
||||
|
||||
* server: rest: implement max URI path and query length request limits
|
||||
|
||||
* server/rest: implement request access log and log the query part of
|
||||
URL and the user agent
|
||||
|
||||
* api: access: log to separate file, use syslog to errors only to reduce
|
||||
syslog spam
|
||||
|
||||
* client: set HTTP connect timeout to 10 seconds
|
||||
|
||||
* client: sent TCP keep-alive after 2 minutes instead of the Linux default
|
||||
of two hours.
|
||||
|
||||
* CLI completion: fix ACL path completion
|
||||
|
||||
* fix #2988: allow one to enable automatic verification after finishing a
|
||||
snapshot, can be controlled as a per-datastore option
|
||||
|
||||
* various log-rotation improvements
|
||||
|
||||
* proxmox-backup-client: use HumanByte to render snapshot size
|
||||
|
||||
* paperkey: use svg as image format to provide better scalability
|
||||
|
||||
* backup: avoid Transport endpoint is not connected error
|
||||
|
||||
* fix #3038: check user before renewing ticket
|
||||
|
||||
* ui/tools: add zip module and allow to download an archive directory as a zip
|
||||
|
||||
* ui and api: add verification job config, allowing to schedule more
|
||||
flexible jobs, filtering out already and/or recently verified snapshots
|
||||
NOTE: the previous simple "verify all" schedule was dropped from the
|
||||
datastore content, and does *not* gets migrated to the new job config.
|
||||
|
||||
* tasks: use systemd escape to decode/encode the task worker ID, avoiding
|
||||
some display problems with problematic characters
|
||||
|
||||
* fix #2934: list also new to-be-installed packages in updates
|
||||
|
||||
* apt: add /changelog API call similar to PVE
|
||||
|
||||
* api: add world accessible ping dummy endpoint, to cheaply check for a
|
||||
running PBS instance.
|
||||
|
||||
* ui: add datastore summary panel and move Statistics into it
|
||||
|
||||
* ui: navigation: add 'Add Datastore' button below datastore list
|
||||
|
||||
* ui: datastore panel: save and restore selected tab statefully
|
||||
|
||||
* send notification mails to email of root@pam account for GC and verify
|
||||
jobs
|
||||
|
||||
* ui: datastore: use simple V. for verify action button
|
||||
|
||||
* ui: datastore: show snapshot manifest comment and allow to edit them
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 23:05:41 +0100
|
||||
|
||||
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
||||
@ -16,7 +128,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* add "Build" section to README.rst
|
||||
|
||||
* reader: actually allow users to downlod their own backups
|
||||
* reader: actually allow users to download their own backups
|
||||
|
||||
* reader: track index chunks and limit access
|
||||
|
||||
@ -38,7 +150,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
||||
|
||||
* ui: implment task history limit and make it configurable
|
||||
* ui: implement task history limit and make it configurable
|
||||
|
||||
* docs: installation: add system requirements section
|
||||
|
||||
|
12
debian/control
vendored
12
debian/control
vendored
@ -24,7 +24,7 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
librust-nix-0.16+default-dev,
|
||||
librust-nix-0.19+default-dev,
|
||||
librust-nom-5+default-dev (>= 5.1-~~),
|
||||
librust-num-traits-0.2+default-dev,
|
||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||
@ -34,10 +34,10 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+websocket-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.6+api-macro-dev,
|
||||
librust-proxmox-0.6+default-dev,
|
||||
librust-proxmox-0.6+sortable-macro-dev,
|
||||
librust-proxmox-0.6+websocket-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev,
|
||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
||||
@ -107,7 +107,7 @@ Depends: fonts-font-awesome,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.3-1),
|
||||
proxmox-widget-toolkit (>= 2.3-6),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
|
2
debian/control.in
vendored
2
debian/control.in
vendored
@ -7,7 +7,7 @@ Depends: fonts-font-awesome,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.3-1),
|
||||
proxmox-widget-toolkit (>= 2.3-6),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
|
2
debian/postinst
vendored
2
debian/postinst
vendored
@ -15,6 +15,8 @@ case "$1" in
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
|
||||
flock -w 30 /etc/proxmox-backup/.datastore.lck sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg
|
||||
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
|
@ -246,6 +246,8 @@ Restoring this backup will result in:
|
||||
. .. file2
|
||||
|
||||
|
||||
.. _encryption:
|
||||
|
||||
Encryption
|
||||
----------
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
What is Proxmox Backup Server
|
||||
-----------------------------
|
||||
What is Proxmox Backup Server?
|
||||
------------------------------
|
||||
|
||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||
@ -10,12 +10,14 @@ physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||
platform and allows you to back up your data securely, even between remote
|
||||
sites, providing easy management with a web-based user interface.
|
||||
|
||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||
It supports deduplication, compression, and authenticated
|
||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
It features strong client-side encryption. Thus, it's possible to
|
||||
backup data to targets that are not fully trusted.
|
||||
Proxmox Backup uses state of the art cryptography for client communication and
|
||||
backup content :ref:`encryption <encryption>`. Encryption is done on the
|
||||
client side, making it safer to back up data to targets that are not fully
|
||||
trusted.
|
||||
|
||||
|
||||
Architecture
|
||||
@ -179,29 +181,28 @@ along with this program. If not, see AGPL3_.
|
||||
History
|
||||
-------
|
||||
|
||||
Backup is, and always was, as central aspect of IT administration.
|
||||
The need to recover from data loss is fundamental and increases with
|
||||
Backup is, and always has been, a central aspect of IT administration.
|
||||
The need to recover from data loss is fundamental and only increases with
|
||||
virtualization.
|
||||
|
||||
Not surprisingly, we shipped a backup tool with Proxmox VE from the
|
||||
beginning. The tool is called ``vzdump`` and is able to make
|
||||
For this reason, we've been shipping a backup tool with Proxmox VE, from the
|
||||
beginning. This tool is called ``vzdump`` and is able to make
|
||||
consistent snapshots of running LXC containers and KVM virtual
|
||||
machines.
|
||||
|
||||
But ``vzdump`` only allowed for full backups. While this is perfect
|
||||
However, ``vzdump`` only allows for full backups. While this is fine
|
||||
for small backups, it becomes a burden for users with large VMs. Both
|
||||
backup time and space usage was too large for this case, specially
|
||||
when Users want to keep many backups of the same VMs. We need
|
||||
deduplication and incremental backups to solve those problems.
|
||||
backup duration and storage usage are too high for this case, especially
|
||||
for users who want to keep many backups of the same VMs. To solve these
|
||||
problems, we needed to offer deduplication and incremental backups.
|
||||
|
||||
Back in October 2018 development started. We had been looking into
|
||||
Back in October 2018, development started. We investigated
|
||||
several technologies and frameworks and finally decided to use
|
||||
:term:`Rust` as implementation language to provide high speed and
|
||||
memory efficiency. The 2018-edition of Rust seemed to be promising and
|
||||
useful for our requirements.
|
||||
:term:`Rust` as the implementation language, in order to provide high speed and
|
||||
memory efficiency. The 2018-edition of Rust seemed promising for our
|
||||
requirements.
|
||||
|
||||
In July 2020 we released the first beta version of Proxmox Backup
|
||||
Server, followed by a first stable version in November 2020. With the
|
||||
support of incremental, fully deduplicated backups, Proxmox Backup
|
||||
significantly reduces the network load and saves valuable storage
|
||||
space.
|
||||
In July 2020, we released the first beta version of Proxmox Backup
|
||||
Server, followed by the first stable version in November 2020. With support for
|
||||
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
||||
network load and saves valuable storage space.
|
||||
|
@ -2,7 +2,7 @@ use std::io::Write;
|
||||
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::api2::types::Authid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
pub struct DummyWriter {
|
||||
@ -26,13 +26,13 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let host = "localhost";
|
||||
|
||||
let username = Userid::root_userid();
|
||||
let auth_id = Authid::root_auth_id();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = HttpClient::new(host, 8007, username, options)?;
|
||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::api2::types::Authid;
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
async fn upload_speed() -> Result<f64, Error> {
|
||||
@ -8,13 +8,13 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
let host = "localhost";
|
||||
let datastore = "store2";
|
||||
|
||||
let username = Userid::root_userid();
|
||||
let auth_id = Authid::root_auth_id();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = HttpClient::new(host, 8007, username, options)?;
|
||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
|
@ -7,6 +7,7 @@ pub mod reader;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
pub mod ping;
|
||||
pub mod pull;
|
||||
mod helpers;
|
||||
|
||||
@ -22,6 +23,7 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("backup", &backup::ROUTER),
|
||||
("config", &config::ROUTER),
|
||||
("nodes", &NODES_ROUTER),
|
||||
("ping", &ping::ROUTER),
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
|
@ -1,6 +1,8 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use proxmox::api::{api, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
@ -10,9 +12,11 @@ use proxmox::{http_err, list_subdirs_api_method};
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::{FileLogOptions, FileLogger};
|
||||
|
||||
use crate::config::acl as acl_config;
|
||||
use crate::config::acl::{PRIVILEGES, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
pub mod user;
|
||||
pub mod domain;
|
||||
@ -30,7 +34,8 @@ fn authenticate_user(
|
||||
) -> Result<bool, Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&userid) {
|
||||
let auth_id = Authid::from(userid.clone());
|
||||
if !user_info.is_active_auth_id(&auth_id) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
@ -68,8 +73,7 @@ fn authenticate_user(
|
||||
path_vec.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
||||
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -138,14 +142,22 @@ fn create_ticket(
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
port: Option<u16>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let logger_options = FileLogOptions {
|
||||
append: true,
|
||||
prefix_time: true,
|
||||
..Default::default()
|
||||
};
|
||||
let mut auth_log = FileLogger::new("/var/log/proxmox-backup/api/auth.log", logger_options)?;
|
||||
|
||||
match authenticate_user(&username, &password, path, privs, port) {
|
||||
Ok(true) => {
|
||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
||||
|
||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
|
||||
log::info!("successful auth for user '{}'", username);
|
||||
auth_log.log(format!("successful auth for user '{}'", username));
|
||||
|
||||
Ok(json!({
|
||||
"username": username,
|
||||
@ -157,8 +169,20 @@ fn create_ticket(
|
||||
"username": username,
|
||||
})),
|
||||
Err(err) => {
|
||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
|
||||
Some(ip) => format!("{}", ip),
|
||||
None => "unknown".into(),
|
||||
};
|
||||
|
||||
let msg = format!(
|
||||
"authentication failure; rhost={} user={} msg={}",
|
||||
client_ip,
|
||||
username,
|
||||
err.to_string()
|
||||
);
|
||||
auth_log.log(&msg);
|
||||
log::error!("{}", msg);
|
||||
|
||||
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||
}
|
||||
}
|
||||
@ -192,9 +216,10 @@ fn change_password(
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let current_user: Userid = rpcenv
|
||||
.get_user()
|
||||
.get_auth_id()
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
let current_auth = Authid::from(current_user.clone());
|
||||
|
||||
let mut allowed = userid == current_user;
|
||||
|
||||
@ -202,7 +227,7 @@ fn change_password(
|
||||
|
||||
if !allowed {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(¤t_user, &[]);
|
||||
let privs = user_info.lookup_privs(¤t_auth, &[]);
|
||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
||||
}
|
||||
|
||||
@ -216,6 +241,128 @@ fn change_password(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
auth_id: {
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
description: "Requires Sys.Audit on '/access', limited to own privileges otherwise.",
|
||||
},
|
||||
returns: {
|
||||
description: "Map of ACL path to Map of privilege to propagate bit",
|
||||
type: Object,
|
||||
properties: {},
|
||||
additional_properties: true,
|
||||
},
|
||||
)]
|
||||
/// List permissions of given or currently authenticated user / API token.
|
||||
///
|
||||
/// Optionally limited to specific path.
|
||||
pub fn list_permissions(
|
||||
auth_id: Option<Authid>,
|
||||
path: Option<String>,
|
||||
rpcenv: &dyn RpcEnvironment,
|
||||
) -> Result<HashMap<String, HashMap<String, bool>>, Error> {
|
||||
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(¤t_auth_id, &["access"]);
|
||||
|
||||
let auth_id = if user_privs & PRIV_SYS_AUDIT == 0 {
|
||||
match auth_id {
|
||||
Some(auth_id) => {
|
||||
if auth_id == current_auth_id {
|
||||
auth_id
|
||||
} else if auth_id.is_token()
|
||||
&& !current_auth_id.is_token()
|
||||
&& auth_id.user() == current_auth_id.user() {
|
||||
auth_id
|
||||
} else {
|
||||
bail!("not allowed to list permissions of {}", auth_id);
|
||||
}
|
||||
},
|
||||
None => current_auth_id,
|
||||
}
|
||||
} else {
|
||||
match auth_id {
|
||||
Some(auth_id) => auth_id,
|
||||
None => current_auth_id,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
fn populate_acl_paths(
|
||||
mut paths: HashSet<String>,
|
||||
node: acl_config::AclTreeNode,
|
||||
path: &str
|
||||
) -> HashSet<String> {
|
||||
for (sub_path, child_node) in node.children {
|
||||
let sub_path = format!("{}/{}", path, &sub_path);
|
||||
paths = populate_acl_paths(paths, child_node, &sub_path);
|
||||
paths.insert(sub_path);
|
||||
}
|
||||
paths
|
||||
}
|
||||
|
||||
let paths = match path {
|
||||
Some(path) => {
|
||||
let mut paths = HashSet::new();
|
||||
paths.insert(path);
|
||||
paths
|
||||
},
|
||||
None => {
|
||||
let mut paths = HashSet::new();
|
||||
|
||||
let (acl_tree, _) = acl_config::config()?;
|
||||
paths = populate_acl_paths(paths, acl_tree.root, "");
|
||||
|
||||
// default paths, returned even if no ACL exists
|
||||
paths.insert("/".to_string());
|
||||
paths.insert("/access".to_string());
|
||||
paths.insert("/datastore".to_string());
|
||||
paths.insert("/remote".to_string());
|
||||
paths.insert("/system".to_string());
|
||||
|
||||
paths
|
||||
},
|
||||
};
|
||||
|
||||
let map = paths
|
||||
.into_iter()
|
||||
.fold(HashMap::new(), |mut map: HashMap<String, HashMap<String, bool>>, path: String| {
|
||||
let split_path = acl_config::split_acl_path(path.as_str());
|
||||
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
|
||||
|
||||
match privs {
|
||||
0 => map, // Don't leak ACL paths where we don't have any privileges
|
||||
_ => {
|
||||
let priv_map = PRIVILEGES
|
||||
.iter()
|
||||
.fold(HashMap::new(), |mut priv_map, (name, value)| {
|
||||
if value & privs != 0 {
|
||||
priv_map.insert(name.to_string(), value & propagated_privs != 0);
|
||||
}
|
||||
priv_map
|
||||
});
|
||||
|
||||
map.insert(path, priv_map);
|
||||
map
|
||||
},
|
||||
}});
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
("acl", &acl::ROUTER),
|
||||
@ -223,6 +370,10 @@ const SUBDIRS: SubdirMap = &sorted!([
|
||||
"password", &Router::new()
|
||||
.put(&API_METHOD_CHANGE_PASSWORD)
|
||||
),
|
||||
(
|
||||
"permissions", &Router::new()
|
||||
.get(&API_METHOD_LIST_PERMISSIONS)
|
||||
),
|
||||
(
|
||||
"ticket", &Router::new()
|
||||
.post(&API_METHOD_CREATE_TICKET)
|
||||
|
@ -7,6 +7,7 @@ use proxmox::tools::fs::open_file_locked;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl;
|
||||
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
@ -43,8 +44,23 @@ fn extract_acl_node_data(
|
||||
path: &str,
|
||||
list: &mut Vec<AclListItem>,
|
||||
exact: bool,
|
||||
token_user: &Option<Authid>,
|
||||
) {
|
||||
// tokens can't have tokens, so we can early return
|
||||
if let Some(token_user) = token_user {
|
||||
if token_user.is_token() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (user, roles) in &node.users {
|
||||
if let Some(token_user) = token_user {
|
||||
if !user.is_token()
|
||||
|| user.user() != token_user.user() {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
@ -56,6 +72,10 @@ fn extract_acl_node_data(
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
if let Some(_) = token_user {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
@ -71,7 +91,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
for (comp, child) in &node.children {
|
||||
let new_path = format!("{}/{}", path, comp);
|
||||
extract_acl_node_data(child, &new_path, list, exact);
|
||||
extract_acl_node_data(child, &new_path, list, exact, token_user);
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,7 +118,8 @@ fn extract_acl_node_data(
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
description: "Returns all ACLs if user has Sys.Audit on '/access/acl', or just the ACLs containing the user's API tokens.",
|
||||
},
|
||||
)]
|
||||
/// Read Access Control List (ACLs).
|
||||
@ -107,18 +128,26 @@ pub fn read_acl(
|
||||
exact: bool,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<AclListItem>, Error> {
|
||||
let auth_id = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
//let auth_user = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "acl"]);
|
||||
let auth_id_filter = if (top_level_privs & PRIV_SYS_AUDIT) == 0 {
|
||||
Some(auth_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (mut tree, digest) = acl::config()?;
|
||||
|
||||
let mut list: Vec<AclListItem> = Vec::new();
|
||||
if let Some(path) = &path {
|
||||
if let Some(node) = &tree.find_node(path) {
|
||||
extract_acl_node_data(&node, path, &mut list, exact);
|
||||
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
|
||||
}
|
||||
} else {
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact);
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
@ -140,9 +169,9 @@ pub fn read_acl(
|
||||
optional: true,
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
auth_id: {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
group: {
|
||||
optional: true,
|
||||
@ -160,7 +189,8 @@ pub fn read_acl(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
permission: &Permission::Anybody,
|
||||
description: "Requires Permissions.Modify on '/access/acl', limited to updating ACLs of the user's API tokens otherwise."
|
||||
},
|
||||
)]
|
||||
/// Update Access Control List (ACLs).
|
||||
@ -168,12 +198,35 @@ pub fn update_acl(
|
||||
path: String,
|
||||
role: String,
|
||||
propagate: Option<bool>,
|
||||
userid: Option<Userid>,
|
||||
auth_id: Option<Authid>,
|
||||
group: Option<String>,
|
||||
delete: Option<bool>,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||
if let Some(_) = group {
|
||||
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||
}
|
||||
|
||||
match &auth_id {
|
||||
Some(auth_id) => {
|
||||
if current_auth_id.is_token() {
|
||||
bail!("Unprivileged API tokens can't set ACL items.");
|
||||
} else if !auth_id.is_token() {
|
||||
bail!("Unprivileged users can only set ACL items for API tokens.");
|
||||
} else if auth_id.user() != current_auth_id.user() {
|
||||
bail!("Unprivileged users can only set ACL items for their own API tokens.");
|
||||
}
|
||||
},
|
||||
None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
|
||||
};
|
||||
}
|
||||
|
||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
@ -190,11 +243,12 @@ pub fn update_acl(
|
||||
|
||||
if let Some(ref _group) = group {
|
||||
bail!("parameter 'group' - groups are currently not supported.");
|
||||
} else if let Some(ref userid) = userid {
|
||||
} else if let Some(ref auth_id) = auth_id {
|
||||
if !delete { // Note: we allow to delete non-existent users
|
||||
let user_cfg = crate::config::user::cached_config()?;
|
||||
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
||||
bail!("no such user.");
|
||||
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
|
||||
bail!(format!("no such {}.",
|
||||
if auth_id.is_token() { "API token" } else { "user" }));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -205,11 +259,11 @@ pub fn update_acl(
|
||||
acl::check_acl_path(&path)?;
|
||||
}
|
||||
|
||||
if let Some(userid) = userid {
|
||||
if let Some(auth_id) = auth_id {
|
||||
if delete {
|
||||
tree.delete_user_role(&path, &userid, &role);
|
||||
tree.delete_user_role(&path, &auth_id, &role);
|
||||
} else {
|
||||
tree.insert_user_role(&path, &userid, &role, propagate);
|
||||
tree.insert_user_role(&path, &auth_id, &role, propagate);
|
||||
}
|
||||
} else if let Some(group) = group {
|
||||
if delete {
|
||||
|
@ -1,12 +1,16 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::{Schema, StringSchema};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
use crate::config::token_shadow;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
@ -16,14 +20,96 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: user::FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: user::LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: user::EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
tokens: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "List of user's API tokens.",
|
||||
items: {
|
||||
type: user::ApiToken
|
||||
},
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// User properties with added list of ApiTokens
|
||||
pub struct UserWithTokens {
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||
pub tokens: Vec<user::ApiToken>,
|
||||
}
|
||||
|
||||
impl UserWithTokens {
|
||||
fn new(user: user::User) -> Self {
|
||||
Self {
|
||||
userid: user.userid,
|
||||
comment: user.comment,
|
||||
enable: user.enable,
|
||||
expire: user.expire,
|
||||
firstname: user.firstname,
|
||||
lastname: user.lastname,
|
||||
email: user.email,
|
||||
tokens: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
properties: {
|
||||
include_tokens: {
|
||||
type: bool,
|
||||
description: "Include user's API tokens in returned list.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List users (with config digest).",
|
||||
type: Array,
|
||||
items: { type: user::User },
|
||||
items: { type: UserWithTokens },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
@ -32,28 +118,60 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
)]
|
||||
/// List users
|
||||
pub fn list_users(
|
||||
_param: Value,
|
||||
include_tokens: bool,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<user::User>, Error> {
|
||||
) -> Result<Vec<UserWithTokens>, Error> {
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
// intentionally user only for now
|
||||
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let auth_id = Authid::from(userid.clone());
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
|
||||
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
|
||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
let filter_by_privs = |user: &user::User| {
|
||||
top_level_allowed || user.userid == userid
|
||||
};
|
||||
|
||||
|
||||
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
let iter = list.into_iter().filter(filter_by_privs);
|
||||
let list = if include_tokens {
|
||||
let tokens: Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
|
||||
let mut user_to_tokens = tokens
|
||||
.into_iter()
|
||||
.fold(
|
||||
HashMap::new(),
|
||||
|mut map: HashMap<Userid, Vec<user::ApiToken>>, token: user::ApiToken| {
|
||||
if token.tokenid.is_token() {
|
||||
map
|
||||
.entry(token.tokenid.user().clone())
|
||||
.or_default()
|
||||
.push(token);
|
||||
}
|
||||
map
|
||||
});
|
||||
iter
|
||||
.map(|user: user::User| {
|
||||
let mut user = UserWithTokens::new(user);
|
||||
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
|
||||
user
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
iter.map(|user: user::User| UserWithTokens::new(user))
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -304,12 +422,340 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
tokenname: {
|
||||
type: Tokenname,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Get API token metadata (with config digest).",
|
||||
type: user::ApiToken,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Read user's API token metadata
|
||||
pub fn read_token(
|
||||
userid: Userid,
|
||||
tokenname: Tokenname,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<user::ApiToken, Error> {
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let tokenid = Authid::from((userid, Some(tokenname)));
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
config.lookup("token", &tokenid.to_string())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
tokenname: {
|
||||
type: Tokenname,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
returns: {
|
||||
description: "API token identifier + generated secret.",
|
||||
properties: {
|
||||
value: {
|
||||
type: String,
|
||||
description: "The API token secret",
|
||||
},
|
||||
tokenid: {
|
||||
type: String,
|
||||
description: "The API token identifier",
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Generate a new API token with given metadata
|
||||
pub fn generate_token(
|
||||
userid: Userid,
|
||||
tokenname: Tokenname,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
digest: Option<String>,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||
let tokenid_string = tokenid.to_string();
|
||||
|
||||
if let Some(_) = config.sections.get(&tokenid_string) {
|
||||
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||
}
|
||||
|
||||
let secret = format!("{:x}", proxmox::tools::uuid::Uuid::generate());
|
||||
token_shadow::set_secret(&tokenid, &secret)?;
|
||||
|
||||
let token = user::ApiToken {
|
||||
tokenid: tokenid.clone(),
|
||||
comment,
|
||||
enable,
|
||||
expire,
|
||||
};
|
||||
|
||||
config.set_data(&tokenid_string, "token", &token)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(json!({
|
||||
"tokenid": tokenid_string,
|
||||
"value": secret
|
||||
}))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
tokenname: {
|
||||
type: Tokenname,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Update user's API token metadata
|
||||
pub fn update_token(
|
||||
userid: Userid,
|
||||
tokenname: Tokenname,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let tokenid = Authid::from((userid, Some(tokenname)));
|
||||
let tokenid_string = tokenid.to_string();
|
||||
|
||||
let mut data: user::ApiToken = config.lookup("token", &tokenid_string)?;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enable) = enable {
|
||||
data.enable = if enable { None } else { Some(false) };
|
||||
}
|
||||
|
||||
if let Some(expire) = expire {
|
||||
data.expire = if expire > 0 { Some(expire) } else { None };
|
||||
}
|
||||
|
||||
config.set_data(&tokenid_string, "token", &data)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
tokenname: {
|
||||
type: Tokenname,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Delete a user's API token
|
||||
pub fn delete_token(
|
||||
userid: Userid,
|
||||
tokenname: Tokenname,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||
let tokenid_string = tokenid.to_string();
|
||||
|
||||
match config.sections.get(&tokenid_string) {
|
||||
Some(_) => { config.sections.remove(&tokenid_string); },
|
||||
None => bail!("token '{}' of user '{}' does not exist.", tokenname.as_str(), userid),
|
||||
}
|
||||
|
||||
token_shadow::delete_secret(&tokenid)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List user's API tokens (with config digest).",
|
||||
type: Array,
|
||||
items: { type: user::ApiToken },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// List user's API tokens
|
||||
pub fn list_tokens(
|
||||
userid: Userid,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<user::ApiToken>, Error> {
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let list:Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
let filter_by_owner = |token: &user::ApiToken| {
|
||||
if token.tokenid.is_token() {
|
||||
token.tokenid.user() == &userid
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
Ok(list.into_iter().filter(filter_by_owner).collect())
|
||||
}
|
||||
|
||||
const TOKEN_ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_TOKEN)
|
||||
.put(&API_METHOD_UPDATE_TOKEN)
|
||||
.post(&API_METHOD_GENERATE_TOKEN)
|
||||
.delete(&API_METHOD_DELETE_TOKEN);
|
||||
|
||||
const TOKEN_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TOKENS)
|
||||
.match_all("tokenname", &TOKEN_ITEM_ROUTER);
|
||||
|
||||
const USER_SUBDIRS: SubdirMap = &[
|
||||
("token", &TOKEN_ROUTER),
|
||||
];
|
||||
|
||||
const USER_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_USER)
|
||||
.put(&API_METHOD_UPDATE_USER)
|
||||
.delete(&API_METHOD_DELETE_USER);
|
||||
.delete(&API_METHOD_DELETE_USER)
|
||||
.subdirs(USER_SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_USERS)
|
||||
.post(&API_METHOD_CREATE_USER)
|
||||
.match_all("userid", &ITEM_ROUTER);
|
||||
.match_all("userid", &USER_ROUTER);
|
||||
|
@ -3,10 +3,12 @@ use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod sync;
|
||||
pub mod verify;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER),
|
||||
("sync", &sync::ROUTER)
|
||||
("sync", &sync::ROUTER),
|
||||
("verify", &verify::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -2,6 +2,8 @@ use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -16,10 +18,9 @@ use proxmox::api::{
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
@ -29,7 +30,12 @@ use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
||||
use crate::tools::{
|
||||
self,
|
||||
zip::{ZipEncoder, ZipEntry},
|
||||
AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
|
||||
};
|
||||
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
@ -38,14 +44,30 @@ use crate::config::acl::{
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
fn check_backup_owner(
|
||||
fn check_priv_or_backup_owner(
|
||||
store: &DataStore,
|
||||
group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
required_privs: u64,
|
||||
) -> Result<(), Error> {
|
||||
let owner = store.get_owner(group)?;
|
||||
if &owner != userid {
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
|
||||
|
||||
if privs & required_privs == 0 {
|
||||
let owner = store.get_owner(group)?;
|
||||
check_backup_owner(&owner, auth_id)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_backup_owner(
|
||||
owner: &Authid,
|
||||
auth_id: &Authid,
|
||||
) -> Result<(), Error> {
|
||||
let correct_owner = owner == auth_id
|
||||
|| (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
|
||||
if !correct_owner {
|
||||
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -143,9 +165,9 @@ fn list_groups(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<GroupListItem>, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -165,8 +187,8 @@ fn list_groups(
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if !list_all {
|
||||
if owner != userid { continue; }
|
||||
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let result_item = GroupListItem {
|
||||
@ -224,16 +246,12 @@ pub fn list_snapshot_files(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
@ -276,16 +294,12 @@ fn delete_snapshot(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
|
||||
datastore.remove_backup_dir(&snapshot, false)?;
|
||||
|
||||
@ -332,9 +346,9 @@ pub fn list_snapshots (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -356,8 +370,8 @@ pub fn list_snapshots (
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
|
||||
if !list_all {
|
||||
if owner != userid { continue; }
|
||||
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut size = None;
|
||||
@ -417,6 +431,53 @@ pub fn list_snapshots (
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
|
||||
let base_path = store.base_path();
|
||||
let backup_list = BackupInfo::list_backups(&base_path)?;
|
||||
let mut groups = HashSet::new();
|
||||
|
||||
let mut result = Counts {
|
||||
ct: None,
|
||||
host: None,
|
||||
vm: None,
|
||||
other: None,
|
||||
};
|
||||
|
||||
for info in backup_list {
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
let id = group.backup_id();
|
||||
let backup_type = group.backup_type();
|
||||
|
||||
let mut new_id = false;
|
||||
|
||||
if groups.insert(format!("{}-{}", &backup_type, &id)) {
|
||||
new_id = true;
|
||||
}
|
||||
|
||||
let mut counts = match backup_type {
|
||||
"ct" => result.ct.take().unwrap_or(Default::default()),
|
||||
"host" => result.host.take().unwrap_or(Default::default()),
|
||||
"vm" => result.vm.take().unwrap_or(Default::default()),
|
||||
_ => result.other.take().unwrap_or(Default::default()),
|
||||
};
|
||||
|
||||
counts.snapshots += 1;
|
||||
if new_id {
|
||||
counts.groups +=1;
|
||||
}
|
||||
|
||||
match backup_type {
|
||||
"ct" => result.ct = Some(counts),
|
||||
"host" => result.host = Some(counts),
|
||||
"vm" => result.vm = Some(counts),
|
||||
_ => result.other = Some(counts),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -426,7 +487,7 @@ pub fn list_snapshots (
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: StorageStatus,
|
||||
type: DataStoreStatus,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
|
||||
@ -437,9 +498,19 @@ pub fn status(
|
||||
store: String,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<StorageStatus, Error> {
|
||||
) -> Result<DataStoreStatus, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||
let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
let counts = get_snapshots_count(&datastore)?;
|
||||
let gc_status = datastore.last_gc_status();
|
||||
|
||||
Ok(DataStoreStatus {
|
||||
total: storage.total,
|
||||
used: storage.used,
|
||||
avail: storage.avail,
|
||||
gc_status,
|
||||
counts,
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -486,17 +557,20 @@ pub fn verify(
|
||||
|
||||
let mut backup_dir = None;
|
||||
let mut backup_group = None;
|
||||
let mut worker_type = "verify";
|
||||
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
backup_dir = Some(dir);
|
||||
worker_type = "verify_snapshot";
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
backup_group = Some(group);
|
||||
worker_type = "verify_group";
|
||||
}
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
@ -504,13 +578,13 @@ pub fn verify(
|
||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify",
|
||||
worker_type,
|
||||
Some(worker_id.clone()),
|
||||
userid,
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
@ -525,6 +599,7 @@ pub fn verify(
|
||||
corrupt_chunks,
|
||||
worker.clone(),
|
||||
worker.upid().clone(),
|
||||
None,
|
||||
)? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
@ -538,10 +613,11 @@ pub fn verify(
|
||||
None,
|
||||
worker.clone(),
|
||||
worker.upid(),
|
||||
None,
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid())?
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
|
||||
};
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
@ -637,9 +713,7 @@ fn prune(
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||
|
||||
@ -647,8 +721,7 @@ fn prune(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
|
||||
let prune_options = PruneOptions {
|
||||
keep_last: param["keep-last"].as_u64(),
|
||||
@ -659,7 +732,7 @@ fn prune(
|
||||
keep_yearly: param["keep-yearly"].as_u64(),
|
||||
};
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||
|
||||
let mut prune_result = Vec::new();
|
||||
|
||||
@ -690,55 +763,54 @@ fn prune(
|
||||
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
|
||||
|
||||
let result = try_block! {
|
||||
if keep_all {
|
||||
worker.log("No prune selection - keeping all files.");
|
||||
} else {
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||
store, backup_type, backup_id));
|
||||
}
|
||||
if keep_all {
|
||||
worker.log("No prune selection - keeping all files.");
|
||||
} else {
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||
store, backup_type, backup_id));
|
||||
}
|
||||
|
||||
for (info, mut keep) in prune_info {
|
||||
if keep_all { keep = true; }
|
||||
for (info, mut keep) in prune_info {
|
||||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = info.backup_dir.backup_time_string();
|
||||
let group = info.backup_dir.group();
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = info.backup_dir.backup_time_string();
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
|
||||
let msg = format!(
|
||||
"{}/{}/{} {}",
|
||||
group.backup_type(),
|
||||
group.backup_id(),
|
||||
timestamp,
|
||||
if keep { "keep" } else { "remove" },
|
||||
);
|
||||
let msg = format!(
|
||||
"{}/{}/{} {}",
|
||||
group.backup_type(),
|
||||
group.backup_id(),
|
||||
timestamp,
|
||||
if keep { "keep" } else { "remove" },
|
||||
);
|
||||
|
||||
worker.log(msg);
|
||||
worker.log(msg);
|
||||
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
|
||||
if !(dry_run || keep) {
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
if !(dry_run || keep) {
|
||||
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
||||
worker.warn(
|
||||
format!(
|
||||
"failed to remove dir {:?}: {}",
|
||||
info.backup_dir.relative_path(), err
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
worker.log_result(&result);
|
||||
|
||||
if let Err(err) = result {
|
||||
bail!("prune failed - {}", err);
|
||||
};
|
||||
worker.log_result(&Ok(()));
|
||||
|
||||
Ok(json!(prune_result))
|
||||
}
|
||||
@ -766,6 +838,7 @@ fn start_garbage_collection(
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
println!("Starting garbage collection on store {}", store);
|
||||
|
||||
@ -774,7 +847,7 @@ fn start_garbage_collection(
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"garbage_collection",
|
||||
Some(store.clone()),
|
||||
Userid::root_userid().clone(),
|
||||
auth_id.clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
@ -844,13 +917,13 @@ fn get_datastore_list(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
let mut entry = json!({ "store": store });
|
||||
@ -895,9 +968,7 @@ fn download_file(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -907,8 +978,7 @@ fn download_file(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
@ -968,9 +1038,7 @@ fn download_file_decoded(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -980,8 +1048,7 @@ fn download_file_decoded(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
@ -1093,8 +1160,9 @@ fn upload_backup_log(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let owner = datastore.get_owner(backup_dir.group())?;
|
||||
check_backup_owner(&owner, &auth_id)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
@ -1163,14 +1231,11 @@ fn catalog(
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let file_name = CATALOG_NAME;
|
||||
|
||||
@ -1243,6 +1308,66 @@ fn catalog(
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
fn recurse_files<'a, T, W>(
|
||||
zip: &'a mut ZipEncoder<W>,
|
||||
decoder: &'a mut Accessor<T>,
|
||||
prefix: &'a Path,
|
||||
file: FileEntry<T>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
let metadata = file.entry().metadata();
|
||||
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(file.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Hardlink(_) => {
|
||||
let realfile = decoder.follow_hardlink(&file).await?;
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(realfile.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
let dir = file.enter_directory().await?;
|
||||
let mut readdir = dir.read_dir();
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
false,
|
||||
);
|
||||
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
||||
while let Some(entry) = readdir.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
recurse_files(zip, decoder, prefix, entry).await?;
|
||||
}
|
||||
}
|
||||
_ => {} // ignore all else
|
||||
};
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
@ -1274,9 +1399,7 @@ fn pxar_file_download(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
@ -1286,8 +1409,7 @@ fn pxar_file_download(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
@ -1325,23 +1447,55 @@ fn pxar_file_download(
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||
|
||||
let file = match file.kind() {
|
||||
EntryKind::File { .. } => file,
|
||||
EntryKind::Hardlink(_) => {
|
||||
decoder.follow_hardlink(&file).await?
|
||||
},
|
||||
// TODO symlink
|
||||
let body = match file.kind() {
|
||||
EntryKind::File { .. } => Body::wrap_stream(
|
||||
AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
|
||||
eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
|
||||
err
|
||||
}),
|
||||
),
|
||||
EntryKind::Hardlink(_) => Body::wrap_stream(
|
||||
AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
|
||||
.map_err(move |err| {
|
||||
eprintln!(
|
||||
"error during streaming of hardlink '{:?}' - {}",
|
||||
filepath, err
|
||||
);
|
||||
err
|
||||
}),
|
||||
),
|
||||
EntryKind::Directory => {
|
||||
let (sender, receiver) = tokio::sync::mpsc::channel(100);
|
||||
let mut prefix = PathBuf::new();
|
||||
let mut components = file.entry().path().components();
|
||||
components.next_back(); // discar last
|
||||
for comp in components {
|
||||
prefix.push(comp);
|
||||
}
|
||||
|
||||
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
|
||||
|
||||
crate::server::spawn_internal_task(async move {
|
||||
let mut zipencoder = ZipEncoder::new(channelwriter);
|
||||
let mut decoder = decoder;
|
||||
recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during creating of zip: {}", err))?;
|
||||
|
||||
zipencoder
|
||||
.finish()
|
||||
.await
|
||||
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
||||
});
|
||||
|
||||
Body::wrap_stream(receiver.map_err(move |err| {
|
||||
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
||||
err
|
||||
}))
|
||||
}
|
||||
other => bail!("cannot download file of type {:?}", other),
|
||||
};
|
||||
|
||||
let body = Body::wrap_stream(
|
||||
AsyncReaderStream::new(file.contents().await?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
||||
err
|
||||
})
|
||||
);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
@ -1421,18 +1575,14 @@ fn get_notes(
|
||||
) -> Result<String, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
|
||||
|
||||
let notes = manifest["unprotected"]["notes"]
|
||||
let notes = manifest.unprotected["notes"]
|
||||
.as_str()
|
||||
.unwrap_or("");
|
||||
|
||||
@ -1474,26 +1624,20 @@ fn set_notes(
|
||||
) -> Result<(), Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
|
||||
manifest["unprotected"]["notes"] = notes.into();
|
||||
|
||||
datastore.store_manifest(&backup_dir, manifest)?;
|
||||
datastore.update_manifest(&backup_dir,|manifest| {
|
||||
manifest.unprotected["notes"] = notes.into();
|
||||
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
@ -1505,31 +1649,83 @@ fn set_notes(
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"new-owner": {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
|
||||
},
|
||||
)]
|
||||
/// Change owner of a backup group
|
||||
fn set_backup_owner(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
new_owner: Userid,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
new_owner: Authid,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&new_owner) {
|
||||
bail!("user '{}' is inactive or non-existent", new_owner);
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
|
||||
let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
|
||||
// High-privilege user/token
|
||||
true
|
||||
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
|
||||
let owner = datastore.get_owner(&backup_group)?;
|
||||
|
||||
match (owner.is_token(), new_owner.is_token()) {
|
||||
(true, true) => {
|
||||
// API token to API token, owned by same user
|
||||
let owner = owner.user();
|
||||
let new_owner = new_owner.user();
|
||||
owner == new_owner && Authid::from(owner.clone()) == auth_id
|
||||
},
|
||||
(true, false) => {
|
||||
// API token to API token owner
|
||||
Authid::from(owner.user().clone()) == auth_id
|
||||
&& new_owner == auth_id
|
||||
},
|
||||
(false, true) => {
|
||||
// API token owner to API token
|
||||
owner == auth_id
|
||||
&& Authid::from(new_owner.user().clone()) == auth_id
|
||||
},
|
||||
(false, false) => {
|
||||
// User to User, not allowed for unprivileged users
|
||||
false
|
||||
},
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if !allowed {
|
||||
return Err(http_err!(UNAUTHORIZED,
|
||||
"{} does not have permission to change owner of backup group '{}' to {}",
|
||||
auth_id,
|
||||
backup_group,
|
||||
new_owner,
|
||||
));
|
||||
}
|
||||
|
||||
if !user_info.is_active_auth_id(&new_owner) {
|
||||
bail!("{} '{}' is inactive or non-existent",
|
||||
if new_owner.is_token() {
|
||||
"API token".to_string()
|
||||
} else {
|
||||
"user".to_string()
|
||||
},
|
||||
new_owner);
|
||||
}
|
||||
|
||||
datastore.set_owner(&backup_group, &new_owner, true)?;
|
||||
|
@ -9,13 +9,18 @@ use crate::api2::types::*;
|
||||
use crate::api2::pull::do_sync_job;
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::UPID;
|
||||
use crate::config::jobstate::{Job, JobState};
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
@ -25,13 +30,23 @@ use crate::tools::systemd::time::{
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
store: Option<String>,
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
let mut list: Vec<SyncJobStatus> = config
|
||||
.convert_to_typed_array("sync")?
|
||||
.into_iter()
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
if let Some(store) = &store {
|
||||
&job.store == store
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}).collect();
|
||||
|
||||
for job in &mut list {
|
||||
let last_state = JobState::load("syncjob", &job.id)
|
||||
@ -86,11 +101,11 @@ fn run_sync_job(
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let job = Job::new("syncjob", &id)?;
|
||||
|
||||
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
||||
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
122
src/api2/admin/verify.rs
Normal file
122
src/api2/admin/verify.rs
Normal file
@ -0,0 +1,122 @@
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::server::do_verification_job;
|
||||
use crate::server::jobstate::{Job, JobState};
|
||||
use crate::config::verify;
|
||||
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
||||
use serde_json::Value;
|
||||
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
||||
use crate::server::UPID;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: verify::VerificationJobStatus },
|
||||
},
|
||||
)]
|
||||
/// List all verification jobs
|
||||
pub fn list_verification_jobs(
|
||||
store: Option<String>,
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<VerificationJobStatus>, Error> {
|
||||
|
||||
let (config, digest) = verify::config()?;
|
||||
|
||||
let mut list: Vec<VerificationJobStatus> = config
|
||||
.convert_to_typed_array("verification")?
|
||||
.into_iter()
|
||||
.filter(|job: &VerificationJobStatus| {
|
||||
if let Some(store) = &store {
|
||||
&job.store == store
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}).collect();
|
||||
|
||||
for job in &mut list {
|
||||
let last_state = JobState::load("verificationjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||
})();
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Runs a verification job manually.
|
||||
fn run_verification_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let (config, _digest) = verify::config()?;
|
||||
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let job = Job::new("verificationjob", &id)?;
|
||||
|
||||
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
|
||||
|
||||
const VERIFICATION_INFO_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))
|
||||
.subdirs(VERIFICATION_INFO_SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
|
||||
.match_all("id", &VERIFICATION_INFO_ROUTER);
|
@ -16,7 +16,7 @@ use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::tools::fs::lock_dir_noblock;
|
||||
use crate::tools::fs::lock_dir_noblock_shared;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -59,12 +59,12 @@ async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -86,7 +86,7 @@ async move {
|
||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||
}
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
@ -105,12 +105,15 @@ async move {
|
||||
};
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
|
||||
|
||||
// permission check
|
||||
if owner != userid && worker_type != "benchmark" {
|
||||
let correct_owner = owner == auth_id
|
||||
|| (owner.is_token()
|
||||
&& Authid::from(owner.user().clone()) == auth_id);
|
||||
if !correct_owner && worker_type != "benchmark" {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
||||
}
|
||||
|
||||
let last_backup = {
|
||||
@ -144,18 +147,18 @@ async move {
|
||||
|
||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||
Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
|
||||
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
||||
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||
env_type, auth_id, worker.clone(), datastore, backup_dir);
|
||||
|
||||
env.debug = debug;
|
||||
env.last_backup = last_backup;
|
||||
@ -182,8 +185,22 @@ async move {
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
let env3 = env2.clone();
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
.map(move |result| {
|
||||
match result {
|
||||
Err(err) => {
|
||||
// Avoid Transport endpoint is not connected (os error 107)
|
||||
// fixme: find a better way to test for that error
|
||||
if err.to_string().starts_with("connection error") && env3.finished() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::from(err))
|
||||
}
|
||||
}
|
||||
Ok(()) => Ok(()),
|
||||
}
|
||||
})
|
||||
});
|
||||
let mut abort_future = abort_future
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
@ -191,7 +208,7 @@ async move {
|
||||
async move {
|
||||
// keep flock until task ends
|
||||
let _group_guard = _group_guard;
|
||||
let _snap_guard = _snap_guard;
|
||||
let snap_guard = snap_guard;
|
||||
let _last_guard = _last_guard;
|
||||
|
||||
let res = select!{
|
||||
@ -203,20 +220,32 @@ async move {
|
||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let verify = |env: BackupEnvironment| {
|
||||
if let Err(err) = env.verify_after_complete(snap_guard) {
|
||||
env.log(format!(
|
||||
"backup finished, but starting the requested verify task failed: {}",
|
||||
err
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished successfully");
|
||||
verify(env);
|
||||
Ok(())
|
||||
},
|
||||
(Err(err), Ok(())) => {
|
||||
// ignore errors after finish
|
||||
env.log(format!("backup had errors but finished: {}", err));
|
||||
verify(env);
|
||||
Ok(())
|
||||
},
|
||||
(Ok(_), Err(err)) => {
|
||||
env.log(format!("backup ended and finish failed: {}", err));
|
||||
env.log("removing unfinished backup");
|
||||
env.remove_backup()?;
|
||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||
Err(err)
|
||||
},
|
||||
(Err(err), Err(_)) => {
|
||||
|
@ -1,6 +1,7 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use nix::dir::Dir;
|
||||
|
||||
use ::serde::{Serialize};
|
||||
use serde_json::{json, Value};
|
||||
@ -9,7 +10,7 @@ use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::Authid;
|
||||
use crate::backup::*;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::server::formatter::*;
|
||||
@ -103,7 +104,7 @@ impl SharedBackupState {
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: Userid,
|
||||
auth_id: Authid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -116,7 +117,7 @@ pub struct BackupEnvironment {
|
||||
impl BackupEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: Userid,
|
||||
auth_id: Authid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -136,7 +137,7 @@ impl BackupEnvironment {
|
||||
Self {
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
auth_id,
|
||||
worker,
|
||||
datastore,
|
||||
debug: false,
|
||||
@ -472,16 +473,11 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
// check manifest
|
||||
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
||||
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
||||
|
||||
// check for valid manifest and store stats
|
||||
let stats = serde_json::to_value(state.backup_stat)?;
|
||||
|
||||
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
||||
|
||||
self.datastore.store_manifest(&self.backup_dir, manifest)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
self.datastore.update_manifest(&self.backup_dir, |manifest| {
|
||||
manifest.unprotected["chunk_upload_stats"] = stats;
|
||||
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
|
||||
if let Some(base) = &self.last_backup {
|
||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||
@ -499,6 +495,55 @@ impl BackupEnvironment {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// If verify-new is set on the datastore, this will run a new verify task
|
||||
/// for the backup. If not, this will return and also drop the passed lock
|
||||
/// immediately.
|
||||
pub fn verify_after_complete(&self, snap_lock: Dir) -> Result<(), Error> {
|
||||
self.ensure_finished()?;
|
||||
|
||||
if !self.datastore.verify_new() {
|
||||
// no verify requested, do nothing
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let worker_id = format!("{}:{}/{}/{:08X}",
|
||||
self.datastore.name(),
|
||||
self.backup_dir.group().backup_type(),
|
||||
self.backup_dir.group().backup_id(),
|
||||
self.backup_dir.backup_time());
|
||||
|
||||
let datastore = self.datastore.clone();
|
||||
let backup_dir = self.backup_dir.clone();
|
||||
|
||||
WorkerTask::new_thread(
|
||||
"verify",
|
||||
Some(worker_id),
|
||||
self.auth_id.clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log("Automatically verifying newly added snapshot");
|
||||
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
if !verify_backup_dir_with_lock(
|
||||
datastore,
|
||||
&backup_dir,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
worker.clone(),
|
||||
worker.upid().clone(),
|
||||
None,
|
||||
snap_lock,
|
||||
)? {
|
||||
bail!("verification failed - please check the log for details");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
).map(|_| ())
|
||||
}
|
||||
|
||||
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
||||
self.worker.log(msg);
|
||||
}
|
||||
@ -523,6 +568,12 @@ impl BackupEnvironment {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return true if the finished flag is set
|
||||
pub fn finished(&self) -> bool {
|
||||
let state = self.state.lock().unwrap();
|
||||
state.finished
|
||||
}
|
||||
|
||||
/// Remove complete backup
|
||||
pub fn remove_backup(&self) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
@ -548,12 +599,12 @@ impl RpcEnvironment for BackupEnvironment {
|
||||
self.env_type
|
||||
}
|
||||
|
||||
fn set_user(&mut self, _user: Option<String>) {
|
||||
panic!("unable to change user");
|
||||
fn set_auth_id(&mut self, _auth_id: Option<String>) {
|
||||
panic!("unable to change auth_id");
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.to_string())
|
||||
fn get_auth_id(&self) -> Option<String> {
|
||||
Some(self.auth_id.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,11 +4,13 @@ use proxmox::list_subdirs_api_method;
|
||||
pub mod datastore;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
pub mod verify;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER),
|
||||
("remote", &remote::ROUTER),
|
||||
("sync", &sync::ROUTER),
|
||||
("verify", &verify::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -12,6 +12,7 @@ use crate::backup::*;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
use crate::server::jobstate;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -34,14 +35,14 @@ pub fn list_datastores(
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let filter_by_privs = |store: &DataStoreConfig| {
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
|
||||
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
||||
};
|
||||
|
||||
@ -75,10 +76,6 @@ pub fn list_datastores(
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -131,9 +128,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
||||
jobstate::create_state_file("prune", &datastore.name)?;
|
||||
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -179,8 +175,6 @@ pub enum DeletableProperty {
|
||||
gc_schedule,
|
||||
/// Delete the prune job schedule.
|
||||
prune_schedule,
|
||||
/// Delete the verify schedule property
|
||||
verify_schedule,
|
||||
/// Delete the keep-last property
|
||||
keep_last,
|
||||
/// Delete the keep-hourly property
|
||||
@ -214,10 +208,6 @@ pub enum DeletableProperty {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -266,7 +256,6 @@ pub fn update_datastore(
|
||||
comment: Option<String>,
|
||||
gc_schedule: Option<String>,
|
||||
prune_schedule: Option<String>,
|
||||
verify_schedule: Option<String>,
|
||||
keep_last: Option<u64>,
|
||||
keep_hourly: Option<u64>,
|
||||
keep_daily: Option<u64>,
|
||||
@ -295,7 +284,6 @@ pub fn update_datastore(
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
|
||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||
@ -327,12 +315,6 @@ pub fn update_datastore(
|
||||
data.prune_schedule = prune_schedule;
|
||||
}
|
||||
|
||||
let mut verify_schedule_changed = false;
|
||||
if verify_schedule.is_some() {
|
||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
||||
data.verify_schedule = verify_schedule;
|
||||
}
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
||||
@ -347,15 +329,11 @@ pub fn update_datastore(
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
if gc_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
||||
jobstate::create_state_file("garbage_collection", &name)?;
|
||||
}
|
||||
|
||||
if prune_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
||||
}
|
||||
|
||||
if verify_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
||||
jobstate::create_state_file("prune", &name)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -398,9 +376,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// ignore errors
|
||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
||||
let _ = jobstate::remove_state_file("prune", &name);
|
||||
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use base64;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
@ -67,7 +66,7 @@ pub fn list_remotes(
|
||||
default: 8007,
|
||||
},
|
||||
userid: {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
password: {
|
||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||
@ -168,7 +167,7 @@ pub enum DeletableProperty {
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
password: {
|
||||
optional: true,
|
||||
@ -202,7 +201,7 @@ pub fn update_remote(
|
||||
comment: Option<String>,
|
||||
host: Option<String>,
|
||||
port: Option<u16>,
|
||||
userid: Option<Userid>,
|
||||
userid: Option<Authid>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
|
@ -83,7 +83,7 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||
crate::server::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -266,7 +266,7 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::remove_state_file("syncjob", &id)?;
|
||||
crate::server::jobstate::remove_state_file("syncjob", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
274
src/api2/config/verify.rs
Normal file
274
src/api2/config/verify.rs
Normal file
@ -0,0 +1,274 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::verify::{self, VerificationJobConfig};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs.",
|
||||
type: Array,
|
||||
items: { type: verify::VerificationJobConfig },
|
||||
},
|
||||
)]
|
||||
/// List all verification jobs
|
||||
pub fn list_verification_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<VerificationJobConfig>, Error> {
|
||||
|
||||
let (config, digest) = verify::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("verification")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Create a new verification job.
|
||||
pub fn create_verification_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
let (mut config, _digest) = verify::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&verification_job.id) {
|
||||
bail!("job '{}' already exists.", verification_job.id);
|
||||
}
|
||||
|
||||
config.set_data(&verification_job.id, "verification", &verification_job)?;
|
||||
|
||||
verify::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::create_state_file("verificationjob", &verification_job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The verification job configuration.",
|
||||
type: verify::VerificationJobConfig,
|
||||
},
|
||||
)]
|
||||
/// Read a verification job configuration.
|
||||
pub fn read_verification_job(
|
||||
id: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<VerificationJobConfig, Error> {
|
||||
let (config, digest) = verify::config()?;
|
||||
|
||||
let verification_job = config.lookup("verification", &id)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(verification_job)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the ignore verified property.
|
||||
IgnoreVerified,
|
||||
/// Delete the comment property.
|
||||
Comment,
|
||||
/// Delete the job schedule.
|
||||
Schedule,
|
||||
/// Delete outdated after property.
|
||||
OutdatedAfter
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
optional: true,
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Update verification job config.
|
||||
pub fn update_verification_job(
|
||||
id: String,
|
||||
store: Option<String>,
|
||||
ignore_verified: Option<bool>,
|
||||
outdated_after: Option<i64>,
|
||||
comment: Option<String>,
|
||||
schedule: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = verify::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: verify::VerificationJobConfig = config.lookup("verification", &id)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
|
||||
DeletableProperty::OutdatedAfter => { data.outdated_after = None; },
|
||||
DeletableProperty::Comment => { data.comment = None; },
|
||||
DeletableProperty::Schedule => { data.schedule = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(store) = store { data.store = store; }
|
||||
|
||||
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
|
||||
if outdated_after.is_some() { data.outdated_after = outdated_after; }
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
|
||||
config.set_data(&id, "verification", &data)?;
|
||||
|
||||
verify::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Remove a verification job configuration
|
||||
pub fn delete_verification_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(verify::VERIFICATION_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = verify::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&id) {
|
||||
Some(_) => { config.sections.remove(&id); },
|
||||
None => bail!("job '{}' does not exist.", id),
|
||||
}
|
||||
|
||||
verify::save_config(&config)?;
|
||||
|
||||
crate::server::jobstate::remove_state_file("verificationjob", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_VERIFICATION_JOB)
|
||||
.put(&API_METHOD_UPDATE_VERIFICATION_JOB)
|
||||
.delete(&API_METHOD_DELETE_VERIFICATION_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
|
||||
.post(&API_METHOD_CREATE_VERIFICATION_JOB)
|
||||
.match_all("id", &ITEM_ROUTER);
|
@ -91,10 +91,12 @@ async fn termproxy(
|
||||
cmd: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
// intentionally user only for now
|
||||
let userid: Userid = rpcenv
|
||||
.get_user()
|
||||
.get_auth_id()
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
let auth_id = Authid::from(userid.clone());
|
||||
|
||||
if userid.realm() != "pam" {
|
||||
bail!("only pam users can use the console");
|
||||
@ -137,7 +139,7 @@ async fn termproxy(
|
||||
let upid = WorkerTask::spawn(
|
||||
"termproxy",
|
||||
None,
|
||||
userid,
|
||||
auth_id,
|
||||
false,
|
||||
move |worker| async move {
|
||||
// move inside the worker so that it survives and does not close the port
|
||||
@ -272,7 +274,8 @@ fn upgrade_to_websocket(
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
async move {
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
// intentionally user only for now
|
||||
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use apt_pkg_native::Cache;
|
||||
use anyhow::{Error, bail};
|
||||
use anyhow::{Error, bail, format_err};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{list_subdirs_api_method, const_regex};
|
||||
@ -7,23 +9,23 @@ use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools::http;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
||||
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||
|
||||
const_regex! {
|
||||
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||
}
|
||||
|
||||
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
||||
// not possible as our packages do not have a URI set in their Release file
|
||||
// FIXME: once the 'changelog' API call switches over to 'apt-get changelog' only,
|
||||
// consider removing this function entirely, as it's value is never used anywhere
|
||||
// then (widget-toolkit doesn't use the value either)
|
||||
fn get_changelog_url(
|
||||
package: &str,
|
||||
filename: &str,
|
||||
source_pkg: &str,
|
||||
version: &str,
|
||||
source_version: &str,
|
||||
origin: &str,
|
||||
component: &str,
|
||||
) -> Result<String, Error> {
|
||||
@ -32,25 +34,24 @@ fn get_changelog_url(
|
||||
}
|
||||
|
||||
if origin == "Debian" {
|
||||
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
||||
|
||||
let prefix = if source_pkg.starts_with("lib") {
|
||||
source_pkg.get(0..4)
|
||||
} else {
|
||||
source_pkg.get(0..1)
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
command.arg("changelog");
|
||||
command.arg("--print-uris");
|
||||
command.arg(package);
|
||||
let output = crate::tools::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
|
||||
let output = match output.splitn(2, ' ').next() {
|
||||
Some(output) => {
|
||||
if output.len() < 2 {
|
||||
bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
|
||||
}
|
||||
output[1..output.len()-1].to_owned()
|
||||
},
|
||||
None => bail!("invalid output from 'apt-get changelog --print-uris': {}", output)
|
||||
};
|
||||
|
||||
let prefix = match prefix {
|
||||
Some(p) => p,
|
||||
None => bail!("cannot get starting characters of package name '{}'", package)
|
||||
};
|
||||
|
||||
// note: security updates seem to not always upload a changelog for
|
||||
// their package version, so this only works *most* of the time
|
||||
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
||||
prefix, source_pkg, source_pkg, source_version));
|
||||
|
||||
return Ok(output);
|
||||
} else if origin == "Proxmox" {
|
||||
// FIXME: Use above call to 'apt changelog <pkg> --print-uris' as well.
|
||||
// Currently not possible as our packages do not have a URI set in their Release file.
|
||||
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||
|
||||
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||
@ -71,115 +72,229 @@ fn get_changelog_url(
|
||||
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||
}
|
||||
|
||||
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||
-> Vec<APTUpdateInfo> {
|
||||
struct FilterData<'a> {
|
||||
// this is version info returned by APT
|
||||
installed_version: Option<&'a str>,
|
||||
candidate_version: &'a str,
|
||||
|
||||
// this is the version info the filter is supposed to check
|
||||
active_version: &'a str,
|
||||
}
|
||||
|
||||
enum PackagePreSelect {
|
||||
OnlyInstalled,
|
||||
OnlyNew,
|
||||
All,
|
||||
}
|
||||
|
||||
fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
|
||||
filter: F,
|
||||
only_versions_for: Option<&str>,
|
||||
) -> Vec<APTUpdateInfo> {
|
||||
|
||||
let mut ret = Vec::new();
|
||||
let mut depends = HashSet::new();
|
||||
|
||||
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||
let mut cache = Cache::get_singleton();
|
||||
cache.reload();
|
||||
|
||||
let mut cache_iter = cache.iter();
|
||||
let mut cache_iter = match only_versions_for {
|
||||
Some(name) => cache.find_by_name(name),
|
||||
None => cache.iter()
|
||||
};
|
||||
|
||||
loop {
|
||||
let view = match cache_iter.next() {
|
||||
Some(view) => view,
|
||||
None => break
|
||||
};
|
||||
|
||||
let current_version = match view.current_version() {
|
||||
Some(vers) => vers,
|
||||
None => continue
|
||||
};
|
||||
let candidate_version = match view.candidate_version() {
|
||||
Some(vers) => vers,
|
||||
// if there's no candidate (i.e. no update) get info of currently
|
||||
// installed version instead
|
||||
None => current_version.clone()
|
||||
};
|
||||
match cache_iter.next() {
|
||||
Some(view) => {
|
||||
let di = if only_versions_for.is_some() {
|
||||
query_detailed_info(
|
||||
PackagePreSelect::All,
|
||||
&filter,
|
||||
view,
|
||||
None
|
||||
)
|
||||
} else {
|
||||
query_detailed_info(
|
||||
PackagePreSelect::OnlyInstalled,
|
||||
&filter,
|
||||
view,
|
||||
Some(&mut depends)
|
||||
)
|
||||
};
|
||||
if let Some(info) = di {
|
||||
ret.push(info);
|
||||
}
|
||||
|
||||
if only_versions_for.is_some() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
drop(cache_iter);
|
||||
// also loop through missing dependencies, as they would be installed
|
||||
for pkg in depends.iter() {
|
||||
let mut iter = cache.find_by_name(&pkg);
|
||||
let view = match iter.next() {
|
||||
Some(view) => view,
|
||||
None => continue // package not found, ignore
|
||||
};
|
||||
|
||||
let di = query_detailed_info(
|
||||
PackagePreSelect::OnlyNew,
|
||||
&filter,
|
||||
view,
|
||||
None
|
||||
);
|
||||
if let Some(info) = di {
|
||||
ret.push(info);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
fn query_detailed_info<'a, F, V>(
|
||||
pre_select: PackagePreSelect,
|
||||
filter: F,
|
||||
view: V,
|
||||
depends: Option<&mut HashSet<String>>,
|
||||
) -> Option<APTUpdateInfo>
|
||||
where
|
||||
F: Fn(FilterData) -> bool,
|
||||
V: std::ops::Deref<Target = apt_pkg_native::sane::PkgView<'a>>
|
||||
{
|
||||
let current_version = view.current_version();
|
||||
let candidate_version = view.candidate_version();
|
||||
|
||||
let (current_version, candidate_version) = match pre_select {
|
||||
PackagePreSelect::OnlyInstalled => match (current_version, candidate_version) {
|
||||
(Some(cur), Some(can)) => (Some(cur), can), // package installed and there is an update
|
||||
(Some(cur), None) => (Some(cur.clone()), cur), // package installed and up-to-date
|
||||
(None, Some(_)) => return None, // package could be installed
|
||||
(None, None) => return None, // broken
|
||||
},
|
||||
PackagePreSelect::OnlyNew => match (current_version, candidate_version) {
|
||||
(Some(_), Some(_)) => return None,
|
||||
(Some(_), None) => return None,
|
||||
(None, Some(can)) => (None, can),
|
||||
(None, None) => return None,
|
||||
},
|
||||
PackagePreSelect::All => match (current_version, candidate_version) {
|
||||
(Some(cur), Some(can)) => (Some(cur), can),
|
||||
(Some(cur), None) => (Some(cur.clone()), cur),
|
||||
(None, Some(can)) => (None, can),
|
||||
(None, None) => return None,
|
||||
},
|
||||
};
|
||||
|
||||
// get additional information via nested APT 'iterators'
|
||||
let mut view_iter = view.versions();
|
||||
while let Some(ver) = view_iter.next() {
|
||||
|
||||
let package = view.name();
|
||||
if filter(&package, ¤t_version, &candidate_version) {
|
||||
let mut origin_res = "unknown".to_owned();
|
||||
let mut section_res = "unknown".to_owned();
|
||||
let mut priority_res = "unknown".to_owned();
|
||||
let mut change_log_url = "".to_owned();
|
||||
let mut short_desc = package.clone();
|
||||
let mut long_desc = "".to_owned();
|
||||
let version = ver.version();
|
||||
let mut origin_res = "unknown".to_owned();
|
||||
let mut section_res = "unknown".to_owned();
|
||||
let mut priority_res = "unknown".to_owned();
|
||||
let mut change_log_url = "".to_owned();
|
||||
let mut short_desc = package.clone();
|
||||
let mut long_desc = "".to_owned();
|
||||
|
||||
// get additional information via nested APT 'iterators'
|
||||
let mut view_iter = view.versions();
|
||||
while let Some(ver) = view_iter.next() {
|
||||
if ver.version() == candidate_version {
|
||||
if let Some(section) = ver.section() {
|
||||
section_res = section;
|
||||
let fd = FilterData {
|
||||
installed_version: current_version.as_deref(),
|
||||
candidate_version: &candidate_version,
|
||||
active_version: &version,
|
||||
};
|
||||
|
||||
if filter(fd) {
|
||||
if let Some(section) = ver.section() {
|
||||
section_res = section;
|
||||
}
|
||||
|
||||
if let Some(prio) = ver.priority_type() {
|
||||
priority_res = prio;
|
||||
}
|
||||
|
||||
// assume every package has only one origin file (not
|
||||
// origin, but origin *file*, for some reason those seem to
|
||||
// be different concepts in APT)
|
||||
let mut origin_iter = ver.origin_iter();
|
||||
let origin = origin_iter.next();
|
||||
if let Some(origin) = origin {
|
||||
|
||||
if let Some(sd) = origin.short_desc() {
|
||||
short_desc = sd;
|
||||
}
|
||||
|
||||
if let Some(ld) = origin.long_desc() {
|
||||
long_desc = ld;
|
||||
}
|
||||
|
||||
// the package files appear in priority order, meaning
|
||||
// the one for the candidate version is first - this is fine
|
||||
// however, as the source package should be the same for all
|
||||
// versions anyway
|
||||
let mut pkg_iter = origin.file();
|
||||
let pkg_file = pkg_iter.next();
|
||||
if let Some(pkg_file) = pkg_file {
|
||||
if let Some(origin_name) = pkg_file.origin() {
|
||||
origin_res = origin_name;
|
||||
}
|
||||
|
||||
if let Some(prio) = ver.priority_type() {
|
||||
priority_res = prio;
|
||||
let filename = pkg_file.file_name();
|
||||
let component = pkg_file.component();
|
||||
|
||||
// build changelog URL from gathered information
|
||||
// ignore errors, use empty changelog instead
|
||||
let url = get_changelog_url(&package, &filename,
|
||||
&version, &origin_res, &component);
|
||||
if let Ok(url) = url {
|
||||
change_log_url = url;
|
||||
}
|
||||
|
||||
// assume every package has only one origin file (not
|
||||
// origin, but origin *file*, for some reason those seem to
|
||||
// be different concepts in APT)
|
||||
let mut origin_iter = ver.origin_iter();
|
||||
let origin = origin_iter.next();
|
||||
if let Some(origin) = origin {
|
||||
|
||||
if let Some(sd) = origin.short_desc() {
|
||||
short_desc = sd;
|
||||
}
|
||||
|
||||
if let Some(ld) = origin.long_desc() {
|
||||
long_desc = ld;
|
||||
}
|
||||
|
||||
// the package files appear in priority order, meaning
|
||||
// the one for the candidate version is first
|
||||
let mut pkg_iter = origin.file();
|
||||
let pkg_file = pkg_iter.next();
|
||||
if let Some(pkg_file) = pkg_file {
|
||||
if let Some(origin_name) = pkg_file.origin() {
|
||||
origin_res = origin_name;
|
||||
}
|
||||
|
||||
let filename = pkg_file.file_name();
|
||||
let source_pkg = ver.source_package();
|
||||
let source_ver = ver.source_version();
|
||||
let component = pkg_file.component();
|
||||
|
||||
// build changelog URL from gathered information
|
||||
// ignore errors, use empty changelog instead
|
||||
let url = get_changelog_url(&package, &filename, &source_pkg,
|
||||
&candidate_version, &source_ver, &origin_res, &component);
|
||||
if let Ok(url) = url {
|
||||
change_log_url = url;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let info = APTUpdateInfo {
|
||||
if let Some(depends) = depends {
|
||||
let mut dep_iter = ver.dep_iter();
|
||||
loop {
|
||||
let dep = match dep_iter.next() {
|
||||
Some(dep) if dep.dep_type() != "Depends" => continue,
|
||||
Some(dep) => dep,
|
||||
None => break
|
||||
};
|
||||
|
||||
let dep_pkg = dep.target_pkg();
|
||||
let name = dep_pkg.name();
|
||||
|
||||
depends.insert(name);
|
||||
}
|
||||
}
|
||||
|
||||
return Some(APTUpdateInfo {
|
||||
package,
|
||||
title: short_desc,
|
||||
arch: view.arch(),
|
||||
description: long_desc,
|
||||
change_log_url,
|
||||
origin: origin_res,
|
||||
version: candidate_version,
|
||||
old_version: current_version,
|
||||
version: candidate_version.clone(),
|
||||
old_version: match current_version {
|
||||
Some(vers) => vers,
|
||||
None => "".to_owned()
|
||||
},
|
||||
priority: priority_res,
|
||||
section: section_res,
|
||||
};
|
||||
ret.push(info);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return None;
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -201,8 +316,11 @@ fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||
)]
|
||||
/// List available APT updates
|
||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
||||
Ok(json!(ret))
|
||||
let all_upgradeable = list_installed_apt_packages(|data| {
|
||||
data.candidate_version == data.active_version &&
|
||||
data.installed_version != Some(data.candidate_version)
|
||||
}, None);
|
||||
Ok(json!(all_upgradeable))
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -233,11 +351,11 @@ pub fn apt_update_database(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
|
||||
if !quiet { worker.log("starting apt-get update") }
|
||||
|
||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||
@ -256,7 +374,67 @@ pub fn apt_update_database(
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
description: "Package name to get changelog of.",
|
||||
type: String,
|
||||
},
|
||||
version: {
|
||||
description: "Package version to get changelog of. Omit to use candidate version.",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Retrieve the changelog of the specified package.
|
||||
fn apt_get_changelog(
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let name = crate::tools::required_string_param(¶m, "name")?.to_owned();
|
||||
let version = param["version"].as_str();
|
||||
|
||||
let pkg_info = list_installed_apt_packages(|data| {
|
||||
match version {
|
||||
Some(version) => version == data.active_version,
|
||||
None => data.active_version == data.candidate_version
|
||||
}
|
||||
}, Some(&name));
|
||||
|
||||
if pkg_info.len() == 0 {
|
||||
bail!("Package '{}' not found", name);
|
||||
}
|
||||
|
||||
let changelog_url = &pkg_info[0].change_log_url;
|
||||
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url))
|
||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||
return Ok(json!(changelog));
|
||||
} else {
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
command.arg("changelog");
|
||||
command.arg("-qq"); // don't display download progress
|
||||
command.arg(name);
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
return Ok(json!(output));
|
||||
}
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)),
|
||||
("update", &Router::new()
|
||||
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||
|
@ -13,7 +13,7 @@ use crate::tools::disks::{
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
@ -140,7 +140,7 @@ pub fn initialize_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -149,7 +149,7 @@ pub fn initialize_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
|
||||
"diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
|
@ -134,7 +134,7 @@ pub fn create_datastore_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -143,7 +143,7 @@ pub fn create_datastore_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||
@ -256,7 +256,7 @@ pub fn create_zpool(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
@ -316,7 +316,7 @@ pub fn create_zpool(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
@ -684,9 +684,9 @@ pub async fn reload_network_config(
|
||||
|
||||
network::assert_ifupdown2_installed()?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
|
||||
|
||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||
|
||||
|
@ -31,10 +31,8 @@ pub fn create_value_from_rrd(
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
} else if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ fn get_service_state(
|
||||
Ok(json_service_state(&service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
|
||||
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
||||
|
||||
let workerid = format!("srv{}", &cmd);
|
||||
|
||||
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value
|
||||
let upid = WorkerTask::new_thread(
|
||||
&workerid,
|
||||
Some(service.clone()),
|
||||
userid,
|
||||
auth_id,
|
||||
false,
|
||||
move |_worker| {
|
||||
|
||||
@ -244,11 +244,11 @@ fn start_service(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
log::info!("starting service {}", service);
|
||||
|
||||
run_service_command(&service, "start", userid)
|
||||
run_service_command(&service, "start", auth_id)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -274,11 +274,11 @@ fn stop_service(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
run_service_command(&service, "stop", userid)
|
||||
run_service_command(&service, "stop", auth_id)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -304,15 +304,15 @@ fn restart_service(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
log::info!("re-starting service {}", service);
|
||||
|
||||
if &service == "proxmox-backup-proxy" {
|
||||
// special case, avoid aborting running tasks
|
||||
run_service_command(&service, "reload", userid)
|
||||
run_service_command(&service, "reload", auth_id)
|
||||
} else {
|
||||
run_service_command(&service, "restart", userid)
|
||||
run_service_command(&service, "restart", auth_id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,11 +339,11 @@ fn reload_service(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
log::info!("reloading service {}", service);
|
||||
|
||||
run_service_command(&service, "reload", userid)
|
||||
run_service_command(&service, "reload", auth_id)
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,12 +1,69 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||
use crate::api2::types::{NODE_SCHEMA, Authid};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
force: {
|
||||
description: "Always connect to server, even if information in cache is up to date.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
protected: true,
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Check and update subscription status.
|
||||
fn check_subscription(
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
|
||||
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
|
||||
|
||||
let info = match subscription::read_subscription() {
|
||||
Err(err) => bail!("could not read subscription status: {}", err),
|
||||
Ok(Some(info)) => info,
|
||||
Ok(None) => return Ok(()),
|
||||
};
|
||||
|
||||
let server_id = tools::get_hardware_address()?;
|
||||
let key = if let Some(key) = info.key {
|
||||
// always update apt auth if we have a key to ensure user can access enterprise repo
|
||||
subscription::update_apt_auth(Some(key.to_owned()), Some(server_id.to_owned()))?;
|
||||
key
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
if !force && info.status == SubscriptionStatus::ACTIVE {
|
||||
let age = proxmox::tools::time::epoch_i64() - info.checktime.unwrap_or(i64::MAX);
|
||||
if age < subscription::MAX_LOCAL_KEY_AGE {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let info = subscription::check_subscription(key, server_id)?;
|
||||
|
||||
subscription::write_subscription(info)
|
||||
.map_err(|e| format_err!("Error writing updated subscription status - {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -18,24 +75,7 @@ use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||
},
|
||||
returns: {
|
||||
description: "Subscription status.",
|
||||
properties: {
|
||||
status: {
|
||||
type: String,
|
||||
description: "'NotFound', 'active' or 'inactive'."
|
||||
},
|
||||
message: {
|
||||
type: String,
|
||||
description: "Human readable problem description.",
|
||||
},
|
||||
serverid: {
|
||||
type: String,
|
||||
description: "The unique server ID, if permitted to access.",
|
||||
},
|
||||
url: {
|
||||
type: String,
|
||||
description: "URL to Web Shop.",
|
||||
},
|
||||
},
|
||||
type: SubscriptionInfo,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
@ -45,24 +85,95 @@ use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||
fn get_subscription(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &[]);
|
||||
let server_id = if (user_privs & PRIV_SYS_AUDIT) != 0 {
|
||||
tools::get_hardware_address()?
|
||||
} else {
|
||||
"hidden".to_string()
|
||||
) -> Result<SubscriptionInfo, Error> {
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
|
||||
let info = match subscription::read_subscription() {
|
||||
Err(err) => bail!("could not read subscription status: {}", err),
|
||||
Ok(Some(info)) => info,
|
||||
Ok(None) => SubscriptionInfo {
|
||||
status: SubscriptionStatus::NOTFOUND,
|
||||
message: Some("There is no subscription key".into()),
|
||||
serverid: Some(tools::get_hardware_address()?),
|
||||
url: Some(url.into()),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
Ok(json!({
|
||||
"status": "NotFound",
|
||||
"message": "There is no subscription key",
|
||||
"serverid": server_id,
|
||||
"url": url,
|
||||
}))
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &[]);
|
||||
|
||||
if (user_privs & PRIV_SYS_AUDIT) == 0 {
|
||||
// not enough privileges for full state
|
||||
return Ok(SubscriptionInfo {
|
||||
status: info.status,
|
||||
message: info.message,
|
||||
url: info.url,
|
||||
..Default::default()
|
||||
});
|
||||
};
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
key: {
|
||||
description: "Proxmox Backup Server subscription key",
|
||||
type: String,
|
||||
max_length: 32,
|
||||
},
|
||||
},
|
||||
},
|
||||
protected: true,
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Set a subscription key and check it.
|
||||
fn set_subscription(
|
||||
key: String,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let server_id = tools::get_hardware_address()?;
|
||||
|
||||
let info = subscription::check_subscription(key, server_id.to_owned())?;
|
||||
|
||||
subscription::write_subscription(info)
|
||||
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
protected: true,
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Delete subscription info.
|
||||
fn delete_subscription() -> Result<(), Error> {
|
||||
|
||||
subscription::delete_subscription()
|
||||
.map_err(|err| format_err!("Deleting subscription failed: {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.post(&API_METHOD_CHECK_SUBSCRIPTION)
|
||||
.put(&API_METHOD_SET_SUBSCRIPTION)
|
||||
.delete(&API_METHOD_DELETE_SUBSCRIPTION)
|
||||
.get(&API_METHOD_GET_SUBSCRIPTION);
|
||||
|
@ -14,6 +14,16 @@ use crate::server::{self, UPID, TaskState, TaskListInfoIterator};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
||||
let task_auth_id = &upid.auth_id;
|
||||
if auth_id == task_auth_id
|
||||
|| (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) {
|
||||
Ok(())
|
||||
} else {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -27,7 +37,7 @@ use crate::config::cached_user_info::CachedUserInfo;
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Task status nformation.",
|
||||
description: "Task status information.",
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
@ -57,9 +67,13 @@ use crate::config::cached_user_info::CachedUserInfo;
|
||||
description: "Worker ID (arbitrary ASCII string)",
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
type: Userid,
|
||||
description: "The user who started the task.",
|
||||
},
|
||||
tokenid: {
|
||||
type: Tokenname,
|
||||
optional: true,
|
||||
},
|
||||
status: {
|
||||
type: String,
|
||||
description: "'running' or 'stopped'",
|
||||
@ -72,7 +86,7 @@ use crate::config::cached_user_info::CachedUserInfo;
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
description: "Users can access their own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
@ -84,12 +98,8 @@ async fn get_task_status(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
check_task_access(&auth_id, &upid)?;
|
||||
|
||||
let mut result = json!({
|
||||
"upid": param["upid"],
|
||||
@ -99,9 +109,13 @@ async fn get_task_status(
|
||||
"starttime": upid.starttime,
|
||||
"type": upid.worker_type,
|
||||
"id": upid.worker_id,
|
||||
"user": upid.userid,
|
||||
"user": upid.auth_id.user(),
|
||||
});
|
||||
|
||||
if upid.auth_id.is_token() {
|
||||
result["tokenid"] = Value::from(upid.auth_id.tokenname().unwrap().as_str());
|
||||
}
|
||||
|
||||
if crate::server::worker_is_active(&upid).await? {
|
||||
result["status"] = Value::from("running");
|
||||
} else {
|
||||
@ -161,12 +175,9 @@ async fn read_task_log(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
check_task_access(&auth_id, &upid)?;
|
||||
|
||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||
|
||||
@ -234,11 +245,11 @@ fn stop_task(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
if userid != upid.userid {
|
||||
if auth_id != upid.auth_id {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
}
|
||||
|
||||
server::abort_worker_async(upid);
|
||||
@ -308,9 +319,9 @@ pub fn list_tasks(
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
@ -326,10 +337,12 @@ pub fn list_tasks(
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
if !list_all && info.upid.userid != userid { return None; }
|
||||
if !list_all && check_task_access(&auth_id, &info.upid).is_err() {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(userid) = &userfilter {
|
||||
if !info.upid.userid.as_str().contains(userid) { return None; }
|
||||
if let Some(needle) = &userfilter {
|
||||
if !info.upid.auth_id.to_string().contains(needle) { return None; }
|
||||
}
|
||||
|
||||
if let Some(store) = store {
|
||||
@ -342,7 +355,7 @@ pub fn list_tasks(
|
||||
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
||||
info.upid.worker_type == "prune"
|
||||
{
|
||||
let prefix = format!("{}_", store);
|
||||
let prefix = format!("{}:", store);
|
||||
if !worker_id.starts_with(&prefix) { return None; }
|
||||
} else if info.upid.worker_type == "garbage_collection" {
|
||||
if worker_id != store { return None; }
|
||||
|
29
src/api2/ping.rs
Normal file
29
src/api2/ping.rs
Normal file
@ -0,0 +1,29 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Dummy ping",
|
||||
type: Object,
|
||||
properties: {
|
||||
pong: {
|
||||
description: "Always true",
|
||||
type: bool,
|
||||
}
|
||||
}
|
||||
},
|
||||
access: {
|
||||
description: "Anyone can access this, because it's used for a cheap check if the API daemon is online.",
|
||||
permission: &Permission::World,
|
||||
}
|
||||
)]
|
||||
/// Dummy method which replies with `{ "pong": True }`
|
||||
fn ping() -> Result<Value, Error> {
|
||||
Ok(json!({
|
||||
"pong": true,
|
||||
}))
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_PING);
|
@ -7,21 +7,20 @@ use futures::{select, future::FutureExt};
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::server::{WorkerTask, jobstate::Job};
|
||||
use crate::backup::DataStore;
|
||||
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
|
||||
use crate::api2::types::*;
|
||||
use crate::config::{
|
||||
remote,
|
||||
sync::SyncJobConfig,
|
||||
jobstate::Job,
|
||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
pub fn check_pull_privs(
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
@ -30,11 +29,11 @@ pub fn check_pull_privs(
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -57,7 +56,7 @@ pub async fn get_pull_parameters(
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid.clone()), Some(remote.host.clone()), remote.port, remote_store.to_string());
|
||||
|
||||
let client = HttpClient::new(&src_repo.host(), src_repo.port(), &src_repo.user(), options)?;
|
||||
let client = HttpClient::new(&src_repo.host(), src_repo.port(), &src_repo.auth_id(), options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
@ -69,23 +68,26 @@ pub async fn get_pull_parameters(
|
||||
pub fn do_sync_job(
|
||||
mut job: Job,
|
||||
sync_job: SyncJobConfig,
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let job_id = job.jobname().to_string();
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let email = crate::server::lookup_user_email(auth_id.user());
|
||||
|
||||
let upid_str = WorkerTask::spawn(
|
||||
&worker_type,
|
||||
Some(job.jobname().to_string()),
|
||||
userid.clone(),
|
||||
auth_id.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let worker2 = worker.clone();
|
||||
let sync_job2 = sync_job.clone();
|
||||
|
||||
let worker_future = async move {
|
||||
|
||||
@ -99,7 +101,9 @@ pub fn do_sync_job(
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
sync_job.store, sync_job.remote, sync_job.remote_store));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
|
||||
let backup_auth_id = Authid::backup_auth_id();
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, backup_auth_id.clone()).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &job_id));
|
||||
|
||||
@ -108,12 +112,12 @@ pub fn do_sync_job(
|
||||
|
||||
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
|
||||
|
||||
let res = select!{
|
||||
let result = select!{
|
||||
worker = worker_future.fuse() => worker,
|
||||
abort = abort_future => abort,
|
||||
};
|
||||
|
||||
let status = worker2.create_state(&res);
|
||||
let status = worker2.create_state(&result);
|
||||
|
||||
match job.finish(status) {
|
||||
Ok(_) => {},
|
||||
@ -122,7 +126,13 @@ pub fn do_sync_job(
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_sync_status(&email, &sync_job2, &result) {
|
||||
eprintln!("send sync notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
@ -165,19 +175,19 @@ async fn pull (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
|
||||
check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
|
||||
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
|
||||
let future = select!{
|
||||
success = pull_future.fuse() => success,
|
||||
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
|
||||
|
@ -17,6 +17,7 @@ use crate::tools;
|
||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
use crate::tools::fs::lock_dir_noblock_shared;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -54,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
|
||||
let priv_read = privs & PRIV_DATASTORE_READ != 0;
|
||||
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
|
||||
@ -93,21 +94,29 @@ fn upgrade_to_backup_reader_protocol(
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
if !priv_read {
|
||||
let owner = datastore.get_owner(backup_dir.group())?;
|
||||
if owner != userid {
|
||||
let correct_owner = owner == auth_id
|
||||
|| (owner.is_token()
|
||||
&& Authid::from(owner.user().clone()) == auth_id);
|
||||
if !correct_owner {
|
||||
bail!("backup owner check failed!");
|
||||
}
|
||||
}
|
||||
|
||||
let _guard = lock_dir_noblock_shared(
|
||||
&datastore.snapshot_path(&backup_dir),
|
||||
"snapshot",
|
||||
"locked by another operation")?;
|
||||
|
||||
let path = datastore.base_path();
|
||||
|
||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
env_type,
|
||||
userid,
|
||||
auth_id,
|
||||
worker.clone(),
|
||||
datastore,
|
||||
backup_dir,
|
||||
@ -146,11 +155,14 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
use futures::future::Either;
|
||||
futures::future::select(req_fut, abort_future)
|
||||
.map(|res| match res {
|
||||
Either::Left((Ok(res), _)) => Ok(res),
|
||||
Either::Left((Err(err), _)) => Err(err),
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
.map(move |res| {
|
||||
let _guard = _guard;
|
||||
match res {
|
||||
Either::Left((Ok(res), _)) => Ok(res),
|
||||
Either::Left((Err(err), _)) => Err(err),
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
}
|
||||
})
|
||||
.map_ok(move |_| env.log("reader finished successfully"))
|
||||
})?;
|
||||
|
@ -5,7 +5,7 @@ use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::Authid;
|
||||
use crate::backup::*;
|
||||
use crate::server::formatter::*;
|
||||
use crate::server::WorkerTask;
|
||||
@ -17,7 +17,7 @@ use crate::server::WorkerTask;
|
||||
pub struct ReaderEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: Userid,
|
||||
auth_id: Authid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -29,7 +29,7 @@ pub struct ReaderEnvironment {
|
||||
impl ReaderEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: Userid,
|
||||
auth_id: Authid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -39,7 +39,7 @@ impl ReaderEnvironment {
|
||||
Self {
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
auth_id,
|
||||
worker,
|
||||
datastore,
|
||||
debug: false,
|
||||
@ -82,12 +82,12 @@ impl RpcEnvironment for ReaderEnvironment {
|
||||
self.env_type
|
||||
}
|
||||
|
||||
fn set_user(&mut self, _user: Option<String>) {
|
||||
panic!("unable to change user");
|
||||
fn set_auth_id(&mut self, _auth_id: Option<String>) {
|
||||
panic!("unable to change auth_id");
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.to_string())
|
||||
fn get_auth_id(&self) -> Option<String> {
|
||||
Some(self.auth_id.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,9 +16,9 @@ use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
Authid,
|
||||
TaskListItem,
|
||||
TaskStateType,
|
||||
Userid,
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
@ -87,13 +87,13 @@ fn datastore_status(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
@ -221,9 +221,9 @@ pub fn list_tasks(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
let since = since.unwrap_or_else(|| 0);
|
||||
@ -238,7 +238,7 @@ pub fn list_tasks(
|
||||
.filter_map(|info| {
|
||||
match info {
|
||||
Ok(info) => {
|
||||
if list_all || info.upid.userid == userid {
|
||||
if list_all || info.upid.auth_id == auth_id {
|
||||
if let Some(filter) = &typefilter {
|
||||
if !info.upid.worker_type.contains(filter) {
|
||||
return None;
|
||||
|
@ -14,9 +14,11 @@ mod macros;
|
||||
#[macro_use]
|
||||
mod userid;
|
||||
pub use userid::{Realm, RealmRef};
|
||||
pub use userid::{Tokenname, TokennameRef};
|
||||
pub use userid::{Username, UsernameRef};
|
||||
pub use userid::Userid;
|
||||
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
|
||||
pub use userid::Authid;
|
||||
pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
@ -65,7 +67,7 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
@ -302,7 +304,7 @@ pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const VERIFY_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
@ -324,6 +326,16 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Do not verify backups that are already verified if their verification is not outdated.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Days after that a verification becomes outdated")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.schema();
|
||||
@ -364,7 +376,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
@ -382,7 +394,7 @@ pub struct GroupListItem {
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<Userid>,
|
||||
pub owner: Option<Authid>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
@ -440,7 +452,7 @@ pub struct SnapshotVerifyState {
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
@ -465,7 +477,7 @@ pub struct SnapshotListItem {
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<Userid>,
|
||||
pub owner: Option<Authid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -577,6 +589,8 @@ pub struct GarbageCollectionStatus {
|
||||
pub pending_chunks: usize,
|
||||
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||
pub removed_bad: usize,
|
||||
/// Number of chunks still marked as .bad after garbage collection.
|
||||
pub still_bad: usize,
|
||||
}
|
||||
|
||||
impl Default for GarbageCollectionStatus {
|
||||
@ -592,6 +606,7 @@ impl Default for GarbageCollectionStatus {
|
||||
pending_bytes: 0,
|
||||
pending_chunks: 0,
|
||||
removed_bad: 0,
|
||||
still_bad: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -609,10 +624,75 @@ pub struct StorageStatus {
|
||||
pub avail: u64,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
/// Backup Type group/snapshot counts.
|
||||
pub struct TypeCounts {
|
||||
/// The number of groups of the type.
|
||||
pub groups: u64,
|
||||
/// The number of snapshots of the type.
|
||||
pub snapshots: u64,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
ct: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
host: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
vm: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
other: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Counts of groups/snapshots per BackupType.
|
||||
pub struct Counts {
|
||||
/// The counts for CT backups
|
||||
pub ct: Option<TypeCounts>,
|
||||
/// The counts for Host backups
|
||||
pub host: Option<TypeCounts>,
|
||||
/// The counts for VM backups
|
||||
pub vm: Option<TypeCounts>,
|
||||
/// The counts for other backup types
|
||||
pub other: Option<TypeCounts>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"gc-status": { type: GarbageCollectionStatus, },
|
||||
counts: { type: Counts, }
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Overall Datastore status and useful information.
|
||||
pub struct DataStoreStatus {
|
||||
/// Total space (bytes).
|
||||
pub total: u64,
|
||||
/// Used space (bytes).
|
||||
pub used: u64,
|
||||
/// Available space (bytes).
|
||||
pub avail: u64,
|
||||
/// Status of last GC
|
||||
pub gc_status: GarbageCollectionStatus,
|
||||
/// Group/Snapshot counts
|
||||
pub counts: Counts,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID_SCHEMA },
|
||||
user: { type: Userid },
|
||||
user: { type: Authid },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -631,8 +711,8 @@ pub struct TaskListItem {
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The user who started the task
|
||||
pub user: Userid,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: Authid,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
@ -655,7 +735,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.userid,
|
||||
user: info.upid.auth_id,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! Types for user handling.
|
||||
//!
|
||||
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
|
||||
//! We have [`Username`]s, [`Realm`]s and [`Tokenname`]s. To uniquely identify a user/API token, they
|
||||
//! must be combined into a [`Userid`] or [`Authid`].
|
||||
//!
|
||||
//! Since they're all string types, they're organized as follows:
|
||||
//!
|
||||
@ -9,13 +10,16 @@
|
||||
//! with `String`, meaning you can only make references to it.
|
||||
//! * [`Realm`]: an owned realm (`String` equivalent).
|
||||
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
||||
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
|
||||
//! borrowed type.
|
||||
//! * [`Tokenname`]: an owned API token name (`String` equivalent)
|
||||
//! * [`TokennameRef`]: a borrowed `Tokenname` (`str` equivalent).
|
||||
//! * [`Userid`]: an owned user id (`"user@realm"`).
|
||||
//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`).
|
||||
//! Note that `Userid` and `Authid` do not have a separate borrowed type.
|
||||
//!
|
||||
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||
//! Note that `Username`s and `Tokenname`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
||||
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
|
||||
//! each other, as in those two cases the comparison has meaning.
|
||||
//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other
|
||||
//! hand can be compared with each other, as in those cases the comparison has meaning.
|
||||
|
||||
use std::borrow::Borrow;
|
||||
use std::convert::TryFrom;
|
||||
@ -36,19 +40,42 @@ use proxmox::const_regex;
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
|
||||
|
||||
const_regex! {
|
||||
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_TOKEN_NAME_REGEX = concat!(r"^", TOKEN_NAME_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_APITOKEN_ID_REGEX = concat!(r"^", APITOKEN_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_AUTH_ID_REGEX = concat!(r"^", r"(?:", USER_ID_REGEX_STR!(), r"|", APITOKEN_ID_REGEX_STR!(), r")$");
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
}
|
||||
|
||||
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
|
||||
pub const PROXMOX_TOKEN_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_TOKEN_NAME_REGEX);
|
||||
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
pub const PROXMOX_TOKEN_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_APITOKEN_ID_REGEX);
|
||||
pub const PROXMOX_AUTH_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_AUTH_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_TOKEN_ID_SCHEMA: Schema = StringSchema::new("API Token ID")
|
||||
.format(&PROXMOX_TOKEN_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_TOKEN_NAME_SCHEMA: Schema = StringSchema::new("API Token name")
|
||||
.format(&PROXMOX_TOKEN_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
@ -91,26 +118,6 @@ pub struct Username(String);
|
||||
#[derive(Debug, Hash)]
|
||||
pub struct UsernameRef(str);
|
||||
|
||||
#[doc(hidden)]
|
||||
/// ```compile_fail
|
||||
/// let a: Username = unsafe { std::mem::zeroed() };
|
||||
/// let b: Username = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <Username as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
struct _AssertNoEqImpl;
|
||||
|
||||
impl UsernameRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const UsernameRef) }
|
||||
@ -286,7 +293,132 @@ impl PartialEq<Realm> for &RealmRef {
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete user id consting of a user name and a realm.
|
||||
#[api(
|
||||
type: String,
|
||||
format: &PROXMOX_TOKEN_NAME_FORMAT,
|
||||
)]
|
||||
/// The token ID part of an API token authentication id.
|
||||
///
|
||||
/// This alone does NOT uniquely identify the API token and therefore does not implement `Eq`. In
|
||||
/// order to compare token IDs directly, they need to be explicitly compared as strings by calling
|
||||
/// `.as_str()`.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// fn test(a: Tokenname, b: Tokenname) -> bool {
|
||||
/// a == b // illegal and does not compile
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
|
||||
pub struct Tokenname(String);
|
||||
|
||||
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
|
||||
/// the user.
|
||||
///
|
||||
/// This is like a `str` to the `String` of a [`Tokenname`].
|
||||
#[derive(Debug, Hash)]
|
||||
pub struct TokennameRef(str);
|
||||
|
||||
#[doc(hidden)]
|
||||
/// ```compile_fail
|
||||
/// let a: Username = unsafe { std::mem::zeroed() };
|
||||
/// let b: Username = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <Username as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: Tokenname = unsafe { std::mem::zeroed() };
|
||||
/// let b: Tokenname = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <Tokenname as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&TokennameRef as PartialEq>::eq(a, b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&TokennameRef as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
struct _AssertNoEqImpl;
|
||||
|
||||
impl TokennameRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const TokennameRef) }
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Tokenname {
|
||||
type Target = TokennameRef;
|
||||
|
||||
fn deref(&self) -> &TokennameRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<TokennameRef> for Tokenname {
|
||||
fn borrow(&self) -> &TokennameRef {
|
||||
TokennameRef::new(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<TokennameRef> for Tokenname {
|
||||
fn as_ref(&self) -> &TokennameRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl ToOwned for TokennameRef {
|
||||
type Owned = Tokenname;
|
||||
|
||||
fn to_owned(&self) -> Self::Owned {
|
||||
Tokenname(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Tokenname {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Error> {
|
||||
if !PROXMOX_TOKEN_NAME_REGEX.is_match(&s) {
|
||||
bail!("invalid token name");
|
||||
}
|
||||
|
||||
Ok(Self(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for &'a TokennameRef {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<&'a TokennameRef, Error> {
|
||||
if !PROXMOX_TOKEN_NAME_REGEX.is_match(s) {
|
||||
bail!("invalid token name in user id");
|
||||
}
|
||||
|
||||
Ok(TokennameRef::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete user id consisting of a user name and a realm
|
||||
#[derive(Clone, Debug, Hash)]
|
||||
pub struct Userid {
|
||||
data: String,
|
||||
@ -366,10 +498,18 @@ impl std::str::FromStr for Userid {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(id: &str) -> Result<Self, Error> {
|
||||
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
|
||||
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
|
||||
None => bail!("not a valid user id"),
|
||||
};
|
||||
let name_len = id
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
let name = &id[..name_len];
|
||||
let realm = &id[(name_len + 1)..];
|
||||
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(name) {
|
||||
bail!("invalid user name in user id");
|
||||
}
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
@ -388,6 +528,10 @@ impl TryFrom<String> for Userid {
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(&data[..name_len]) {
|
||||
bail!("invalid user name in user id");
|
||||
}
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
|
||||
@ -413,5 +557,182 @@ impl PartialEq<String> for Userid {
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete authentication id consisting of a user id and an optional token name.
|
||||
#[derive(Clone, Debug, Hash)]
|
||||
pub struct Authid {
|
||||
user: Userid,
|
||||
tokenname: Option<Tokenname>
|
||||
}
|
||||
|
||||
impl Authid {
|
||||
pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
|
||||
.format(&PROXMOX_AUTH_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
|
||||
Self { user, tokenname }
|
||||
}
|
||||
|
||||
pub fn user(&self) -> &Userid {
|
||||
&self.user
|
||||
}
|
||||
|
||||
pub fn is_token(&self) -> bool {
|
||||
self.tokenname.is_some()
|
||||
}
|
||||
|
||||
pub fn tokenname(&self) -> Option<&TokennameRef> {
|
||||
match &self.tokenname {
|
||||
Some(name) => Some(&name),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the "backup@pam" auth id.
|
||||
pub fn backup_auth_id() -> &'static Self {
|
||||
&*BACKUP_AUTHID
|
||||
}
|
||||
|
||||
/// Get the "root@pam" auth id.
|
||||
pub fn root_auth_id() -> &'static Self {
|
||||
&*ROOT_AUTHID
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BACKUP_AUTHID: Authid = Authid::from(Userid::new("backup@pam".to_string(), 6));
|
||||
pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4));
|
||||
}
|
||||
|
||||
impl Eq for Authid {}
|
||||
|
||||
impl PartialEq for Authid {
|
||||
fn eq(&self, rhs: &Self) -> bool {
|
||||
self.user == rhs.user && match (&self.tokenname, &rhs.tokenname) {
|
||||
(Some(ours), Some(theirs)) => ours.as_str() == theirs.as_str(),
|
||||
(None, None) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Userid> for Authid {
|
||||
fn from(parts: Userid) -> Self {
|
||||
Self::new(parts, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(Userid, Option<Tokenname>)> for Authid {
|
||||
fn from(parts: (Userid, Option<Tokenname>)) -> Self {
|
||||
Self::new(parts.0, parts.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Authid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match &self.tokenname {
|
||||
Some(token) => write!(f, "{}!{}", self.user, token.as_str()),
|
||||
None => self.user.fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Authid {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(id: &str) -> Result<Self, Error> {
|
||||
let name_len = id
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
let realm_end = id
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'!')
|
||||
.map(|pos| if pos < name_len { id.len() } else { pos })
|
||||
.unwrap_or(id.len());
|
||||
|
||||
if realm_end == id.len() - 1 {
|
||||
bail!("empty token name in userid");
|
||||
}
|
||||
|
||||
let user = Userid::from_str(&id[..realm_end])?;
|
||||
|
||||
if id.len() > realm_end {
|
||||
let token = Tokenname::try_from(id[(realm_end + 1)..].to_string())?;
|
||||
Ok(Self::new(user, Some(token)))
|
||||
} else {
|
||||
Ok(Self::new(user, None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Authid {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(mut data: String) -> Result<Self, Error> {
|
||||
let name_len = data
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
let realm_end = data
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'!')
|
||||
.map(|pos| if pos < name_len { data.len() } else { pos })
|
||||
.unwrap_or(data.len());
|
||||
|
||||
if realm_end == data.len() - 1 {
|
||||
bail!("empty token name in userid");
|
||||
}
|
||||
|
||||
let tokenname = if data.len() > realm_end {
|
||||
Some(Tokenname::try_from(data[(realm_end + 1)..].to_string())?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
data.truncate(realm_end);
|
||||
|
||||
let user:Userid = data.parse()?;
|
||||
|
||||
Ok(Self { user, tokenname })
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_id() {
|
||||
let userid: Userid = "test@pam".parse().expect("parsing Userid failed");
|
||||
assert_eq!(userid.name().as_str(), "test");
|
||||
assert_eq!(userid.realm(), "pam");
|
||||
assert_eq!(userid, "test@pam");
|
||||
|
||||
let auth_id: Authid = "test@pam".parse().expect("parsing user Authid failed");
|
||||
assert_eq!(auth_id.to_string(), "test@pam".to_string());
|
||||
assert!(!auth_id.is_token());
|
||||
|
||||
assert_eq!(auth_id.user(), &userid);
|
||||
|
||||
let user_auth_id = Authid::from(userid.clone());
|
||||
assert_eq!(user_auth_id, auth_id);
|
||||
assert!(!user_auth_id.is_token());
|
||||
|
||||
let auth_id: Authid = "test@pam!bar".parse().expect("parsing token Authid failed");
|
||||
let token_userid = auth_id.user();
|
||||
assert_eq!(&userid, token_userid);
|
||||
assert!(auth_id.is_token());
|
||||
assert_eq!(auth_id.tokenname().expect("Token has tokenname").as_str(), TokennameRef::new("bar").as_str());
|
||||
assert_eq!(auth_id.to_string(), "test@pam!bar".to_string());
|
||||
}
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(Userid);
|
||||
proxmox::forward_serialize_to_display!(Userid);
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(Authid);
|
||||
proxmox::forward_serialize_to_display!(Authid);
|
||||
|
199
src/backup.rs
199
src/backup.rs
@ -1,107 +1,146 @@
|
||||
//! This module implements the proxmox backup data storage
|
||||
//! This module implements the data storage and access layer.
|
||||
//!
|
||||
//! Proxmox backup splits large files into chunks, and stores them
|
||||
//! deduplicated using a content addressable storage format.
|
||||
//! # Data formats
|
||||
//!
|
||||
//! A chunk is simply defined as binary blob, which is stored inside a
|
||||
//! `ChunkStore`, addressed by the SHA256 digest of the binary blob.
|
||||
//! PBS splits large files into chunks, and stores them deduplicated using
|
||||
//! a content addressable storage format.
|
||||
//!
|
||||
//! Index files are used to reconstruct the original file. They
|
||||
//! basically contain a list of SHA256 checksums. The `DynamicIndex*`
|
||||
//! format is able to deal with dynamic chunk sizes, whereas the
|
||||
//! `FixedIndex*` format is an optimization to store a list of equal
|
||||
//! sized chunks.
|
||||
//! Backup snapshots are stored as folders containing a manifest file and
|
||||
//! potentially one or more index or blob files.
|
||||
//!
|
||||
//! # ChunkStore Locking
|
||||
//! The manifest contains hashes of all other files and can be signed by
|
||||
//! the client.
|
||||
//!
|
||||
//! We need to be able to restart the proxmox-backup service daemons,
|
||||
//! so that we can update the software without rebooting the host. But
|
||||
//! such restarts must not abort running backup jobs, so we need to
|
||||
//! keep the old service running until those jobs are finished. This
|
||||
//! implies that we need some kind of locking for the
|
||||
//! ChunkStore. Please note that it is perfectly valid to have
|
||||
//! multiple parallel ChunkStore writers, even when they write the
|
||||
//! same chunk (because the chunk would have the same name and the
|
||||
//! same data). The only real problem is garbage collection, because
|
||||
//! we need to avoid deleting chunks which are still referenced.
|
||||
//! Blob files contain data directly. They are used for config files and
|
||||
//! the like.
|
||||
//!
|
||||
//! * Read Index Files:
|
||||
//! Index files are used to reconstruct an original file. They contain a
|
||||
//! list of SHA256 checksums. The `DynamicIndex*` format is able to deal
|
||||
//! with dynamic chunk sizes (CT and host backups), whereas the
|
||||
//! `FixedIndex*` format is an optimization to store a list of equal sized
|
||||
//! chunks (VMs, whole block devices).
|
||||
//!
|
||||
//! Acquire shared lock for .idx files.
|
||||
//!
|
||||
//!
|
||||
//! * Delete Index Files:
|
||||
//!
|
||||
//! Acquire exclusive lock for .idx files. This makes sure that we do
|
||||
//! not delete index files while they are still in use.
|
||||
//!
|
||||
//!
|
||||
//! * Create Index Files:
|
||||
//!
|
||||
//! Acquire shared lock for ChunkStore (process wide).
|
||||
//!
|
||||
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
||||
//! then do an atomic rename ...
|
||||
//!
|
||||
//!
|
||||
//! * Garbage Collect:
|
||||
//!
|
||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
||||
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||
//! lock.
|
||||
//!
|
||||
//!
|
||||
//! * Server Restart
|
||||
//!
|
||||
//! Try to abort the running garbage collection to release exclusive
|
||||
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
||||
//! socket.
|
||||
//! A chunk is defined as a binary blob, which is stored inside a
|
||||
//! [ChunkStore](struct.ChunkStore.html) instead of the backup directory
|
||||
//! directly, and can be addressed by its SHA256 digest.
|
||||
//!
|
||||
//!
|
||||
//! # Garbage Collection (GC)
|
||||
//!
|
||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
||||
//! files. Unfortunately, this does not free up any storage, because
|
||||
//! those files just contain references to chunks.
|
||||
//! Deleting backups is as easy as deleting the corresponding .idx files.
|
||||
//! However, this does not free up any storage, because those files just
|
||||
//! contain references to chunks.
|
||||
//!
|
||||
//! To free up some storage, we run a garbage collection process at
|
||||
//! regular intervals. The collector uses a mark and sweep
|
||||
//! approach. In the first phase, it scans all .idx files to mark used
|
||||
//! chunks. The second phase then removes all unmarked chunks from the
|
||||
//! store.
|
||||
//! regular intervals. The collector uses a mark and sweep approach. In
|
||||
//! the first phase, it scans all .idx files to mark used chunks. The
|
||||
//! second phase then removes all unmarked chunks from the store.
|
||||
//!
|
||||
//! The above locking mechanism makes sure that we are the only
|
||||
//! process running GC. But we still want to be able to create backups
|
||||
//! during GC, so there may be multiple backup threads/tasks
|
||||
//! running. Either started before GC started, or started while GC is
|
||||
//! running.
|
||||
//! The locking mechanisms mentioned below make sure that we are the only
|
||||
//! process running GC. We still want to be able to create backups during
|
||||
//! GC, so there may be multiple backup threads/tasks running, either
|
||||
//! started before GC, or while GC is running.
|
||||
//!
|
||||
//! ## `atime` based GC
|
||||
//!
|
||||
//! The idea here is to mark chunks by updating the `atime` (access
|
||||
//! timestamp) on the chunk file. This is quite simple and does not
|
||||
//! need additional RAM.
|
||||
//! timestamp) on the chunk file. This is quite simple and does not need
|
||||
//! additional RAM.
|
||||
//!
|
||||
//! One minor problem is that recent Linux versions use the `relatime`
|
||||
//! mount flag by default for performance reasons (yes, we want
|
||||
//! that). When enabled, `atime` data is written to the disk only if
|
||||
//! the file has been modified since the `atime` data was last updated
|
||||
//! (`mtime`), or if the file was last accessed more than a certain
|
||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
||||
//! with `atime` older than 24 hours.
|
||||
//!
|
||||
//! Another problem arises from running backups. The mark phase does
|
||||
//! not find any chunks from those backups, because there is no .idx
|
||||
//! file for them (created after the backup). Chunks created or
|
||||
//! touched by those backups may have an `atime` as old as the start
|
||||
//! time of those backups. Please note that the backup start time may
|
||||
//! predate the GC start time. So we may only delete chunks older than
|
||||
//! the start time of those running backup jobs.
|
||||
//! mount flag by default for performance reasons (and we want that). When
|
||||
//! enabled, `atime` data is written to the disk only if the file has been
|
||||
//! modified since the `atime` data was last updated (`mtime`), or if the
|
||||
//! file was last accessed more than a certain amount of time ago (by
|
||||
//! default 24h). So we may only delete chunks with `atime` older than 24
|
||||
//! hours.
|
||||
//!
|
||||
//! Another problem arises from running backups. The mark phase does not
|
||||
//! find any chunks from those backups, because there is no .idx file for
|
||||
//! them (created after the backup). Chunks created or touched by those
|
||||
//! backups may have an `atime` as old as the start time of those backups.
|
||||
//! Please note that the backup start time may predate the GC start time.
|
||||
//! So we may only delete chunks older than the start time of those
|
||||
//! running backup jobs, which might be more than 24h back (this is the
|
||||
//! reason why ProcessLocker exclusive locks only have to be exclusive
|
||||
//! between processes, since within one we can determine the age of the
|
||||
//! oldest shared lock).
|
||||
//!
|
||||
//! ## Store `marks` in RAM using a HASH
|
||||
//!
|
||||
//! Not sure if this is better. TODO
|
||||
//! Might be better. Under investigation.
|
||||
//!
|
||||
//!
|
||||
//! # Locking
|
||||
//!
|
||||
//! Since PBS allows multiple potentially interfering operations at the
|
||||
//! same time (e.g. garbage collect, prune, multiple backup creations
|
||||
//! (only in seperate groups), forget, ...), these need to lock against
|
||||
//! each other in certain scenarios. There is no overarching global lock
|
||||
//! though, instead always the finest grained lock possible is used,
|
||||
//! because running these operations concurrently is treated as a feature
|
||||
//! on its own.
|
||||
//!
|
||||
//! ## Inter-process Locking
|
||||
//!
|
||||
//! We need to be able to restart the proxmox-backup service daemons, so
|
||||
//! that we can update the software without rebooting the host. But such
|
||||
//! restarts must not abort running backup jobs, so we need to keep the
|
||||
//! old service running until those jobs are finished. This implies that
|
||||
//! we need some kind of locking for modifying chunks and indices in the
|
||||
//! ChunkStore.
|
||||
//!
|
||||
//! Please note that it is perfectly valid to have multiple
|
||||
//! parallel ChunkStore writers, even when they write the same chunk
|
||||
//! (because the chunk would have the same name and the same data, and
|
||||
//! writes are completed atomically via a rename). The only problem is
|
||||
//! garbage collection, because we need to avoid deleting chunks which are
|
||||
//! still referenced.
|
||||
//!
|
||||
//! To do this we use the
|
||||
//! [ProcessLocker](../tools/struct.ProcessLocker.html).
|
||||
//!
|
||||
//! ### ChunkStore-wide
|
||||
//!
|
||||
//! * Create Index Files:
|
||||
//!
|
||||
//! Acquire shared lock for ChunkStore.
|
||||
//!
|
||||
//! Note: When creating .idx files, we create a temporary .tmp file,
|
||||
//! then do an atomic rename.
|
||||
//!
|
||||
//! * Garbage Collect:
|
||||
//!
|
||||
//! Acquire exclusive lock for ChunkStore. If we have
|
||||
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||
//! lock.
|
||||
//!
|
||||
//! Exclusive locks only work _between processes_. It is valid to have an
|
||||
//! exclusive and one or more shared locks held within one process. Writing
|
||||
//! chunks within one process is synchronized using the gc_mutex.
|
||||
//!
|
||||
//! On server restart, we stop any running GC in the old process to avoid
|
||||
//! having the exclusive lock held for too long.
|
||||
//!
|
||||
//! ## Locking table
|
||||
//!
|
||||
//! Below table shows all operations that play a role in locking, and which
|
||||
//! mechanisms are used to make their concurrent usage safe.
|
||||
//!
|
||||
//! | starting ><br>v during | read index file | create index file | GC mark | GC sweep | update manifest | forget | prune | create backup | verify | reader api |
|
||||
//! |-|-|-|-|-|-|-|-|-|-|-|
|
||||
//! | **read index file** | / | / | / | / | / | mmap stays valid, oldest_shared_lock prevents GC | see forget column | / | / | / |
|
||||
//! | **create index file** | / | / | / | / | / | / | / | /, happens at the end, after all chunks are touched | /, only happens without a manifest | / |
|
||||
//! | **GC mark** | / | Datastore process-lock shared | gc_mutex, exclusive ProcessLocker | gc_mutex | /, GC only cares about index files, not manifests | tells GC about removed chunks | see forget column | /, index files don’t exist yet | / | / |
|
||||
//! | **GC sweep** | / | Datastore process-lock shared | gc_mutex, exclusive ProcessLocker | gc_mutex | / | /, chunks already marked | see forget column | chunks get touched; chunk_store.mutex; oldest PL lock | / | / |
|
||||
//! | **update manifest** | / | / | / | / | update_manifest lock | update_manifest lock, remove dir under lock | see forget column | /, “write manifest” happens at the end | /, can call “write manifest”, see that column | / |
|
||||
//! | **forget** | / | / | removed_during_gc mutex is held during unlink | marking done, doesn’t matter if forgotten now | update_manifest lock, forget waits for lock | /, unlink is atomic | causes forget to fail, but that’s OK | running backup has snapshot flock | /, potentially detects missing folder | shared snap flock |
|
||||
//! | **prune** | / | / | see forget row | see forget row | see forget row | causes warn in prune, but no error | see forget column | running and last non-running can’t be pruned | see forget row | shared snap flock |
|
||||
//! | **create backup** | / | only time this happens, thus has snapshot flock | / | chunks get touched; chunk_store.mutex; oldest PL lock | no lock, but cannot exist beforehand | snapshot flock, can’t be forgotten | running and last non-running can’t be pruned | snapshot group flock, only one running per group | /, won’t be verified since manifest missing | / |
|
||||
//! | **verify** | / | / | / | / | see “update manifest” row | /, potentially detects missing folder | see forget column | / | /, but useless (“update manifest” protects itself) | / |
|
||||
//! | **reader api** | / | / | / | /, open snap can’t be forgotten, so ref must exist | / | prevented by shared snap flock | prevented by shared snap flock | / | / | /, lock is shared |!
|
||||
//! * / = no interaction
|
||||
//! * shared/exclusive from POV of 'starting' process
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
|
@ -15,6 +15,17 @@ use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
use super::index::ChunkReadInfo;
|
||||
|
||||
// FIXME: This enum may not be required?
|
||||
// - Put the `WaitForData` case directly into a `read_future: Option<>`
|
||||
// - make the read loop as follows:
|
||||
// * if read_buffer is not empty:
|
||||
// use it
|
||||
// * else if read_future is there:
|
||||
// poll it
|
||||
// if read: move data to read_buffer
|
||||
// * else
|
||||
// create read future
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
@ -118,9 +129,8 @@ where
|
||||
}
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
Ok((store, chunk_data)) => {
|
||||
this.read_buffer = chunk_data;
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
this.store = Some(store);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ pub struct DirEntry {
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
File { size: u64, mtime: i64 },
|
||||
Symlink,
|
||||
Hardlink,
|
||||
BlockDevice,
|
||||
@ -89,7 +89,7 @@ pub enum DirEntryAttribute {
|
||||
|
||||
impl DirEntry {
|
||||
|
||||
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime:u64) -> Self {
|
||||
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime: i64) -> Self {
|
||||
match etype {
|
||||
CatalogEntryType::Directory => {
|
||||
DirEntry { name, attr: DirEntryAttribute::Directory { start } }
|
||||
@ -184,7 +184,7 @@ impl DirInfo {
|
||||
catalog_encode_u64(writer, name.len() as u64)?;
|
||||
writer.write_all(name)?;
|
||||
catalog_encode_u64(writer, *size)?;
|
||||
catalog_encode_u64(writer, *mtime)?;
|
||||
catalog_encode_i64(writer, *mtime)?;
|
||||
}
|
||||
DirEntry { name, attr: DirEntryAttribute::Symlink } => {
|
||||
writer.write_all(&[CatalogEntryType::Symlink as u8])?;
|
||||
@ -234,7 +234,7 @@ impl DirInfo {
|
||||
Ok((self.name, data))
|
||||
}
|
||||
|
||||
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, u64) -> Result<bool, Error>>(
|
||||
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, i64) -> Result<bool, Error>>(
|
||||
data: &[u8],
|
||||
mut callback: C,
|
||||
) -> Result<(), Error> {
|
||||
@ -265,7 +265,7 @@ impl DirInfo {
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let size = catalog_decode_u64(&mut cursor)?;
|
||||
let mtime = catalog_decode_u64(&mut cursor)?;
|
||||
let mtime = catalog_decode_i64(&mut cursor)?;
|
||||
callback(etype, name, 0, size, mtime)?
|
||||
}
|
||||
_ => {
|
||||
@ -362,7 +362,7 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: u64) -> Result<(), Error> {
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: i64) -> Result<(), Error> {
|
||||
let dir = self.dirstack.last_mut().ok_or_else(|| format_err!("outside root"))?;
|
||||
let name = name.to_bytes().to_vec();
|
||||
dir.entries.push(DirEntry { name, attr: DirEntryAttribute::File { size, mtime } });
|
||||
@ -587,14 +587,77 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize i64 as short, variable length byte sequence
|
||||
///
|
||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||
/// If the value is negative, we end with a zero byte (0x00).
|
||||
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
|
||||
let mut enc = Vec::new();
|
||||
|
||||
let mut d = if v < 0 {
|
||||
(-1 * (v + 1)) as u64 + 1 // also handles i64::MIN
|
||||
} else {
|
||||
v as u64
|
||||
};
|
||||
|
||||
loop {
|
||||
if d < 128 {
|
||||
if v < 0 {
|
||||
enc.push(128 | d as u8);
|
||||
enc.push(0u8);
|
||||
} else {
|
||||
enc.push(d as u8);
|
||||
}
|
||||
break;
|
||||
}
|
||||
enc.push((128 | (d & 127)) as u8);
|
||||
d = d >> 7;
|
||||
}
|
||||
writer.write_all(&enc)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize i64 from variable length byte sequence
|
||||
///
|
||||
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
|
||||
/// this method is compatible with catalog_encode_u64 iff the
|
||||
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
|
||||
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
|
||||
|
||||
let mut v: u64 = 0;
|
||||
let mut buf = [0u8];
|
||||
|
||||
for i in 0..11 { // only allow 11 bytes (70 bits + sign marker)
|
||||
if buf.is_empty() {
|
||||
bail!("decode_i64 failed - unexpected EOB");
|
||||
}
|
||||
reader.read_exact(&mut buf)?;
|
||||
|
||||
let t = buf[0];
|
||||
|
||||
if t == 0 {
|
||||
if v == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
return Ok(((v - 1) as i64 * -1) - 1); // also handles i64::MIN
|
||||
} else if t < 128 {
|
||||
v |= (t as u64) << (i*7);
|
||||
return Ok(v as i64);
|
||||
} else {
|
||||
v |= ((t & 127) as u64) << (i*7);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("decode_i64 failed - missing end marker");
|
||||
}
|
||||
|
||||
/// Serialize u64 as short, variable length byte sequence
|
||||
///
|
||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||
/// We limit values to a maximum of 2^63.
|
||||
pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error> {
|
||||
let mut enc = Vec::new();
|
||||
|
||||
if (v & (1<<63)) != 0 { bail!("catalog_encode_u64 failed - value >= 2^63"); }
|
||||
let mut d = v;
|
||||
loop {
|
||||
if d < 128 {
|
||||
@ -611,13 +674,14 @@ pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error>
|
||||
|
||||
/// Deserialize u64 from variable length byte sequence
|
||||
///
|
||||
/// We currently read maximal 9 bytes, which give a maximum of 63 bits.
|
||||
/// We currently read maximal 10 bytes, which give a maximum of 70 bits,
|
||||
/// but we currently only encode up to 64 bits
|
||||
pub fn catalog_decode_u64<R: Read>(reader: &mut R) -> Result<u64, Error> {
|
||||
|
||||
let mut v: u64 = 0;
|
||||
let mut buf = [0u8];
|
||||
|
||||
for i in 0..9 { // only allow 9 bytes (63 bits)
|
||||
for i in 0..10 { // only allow 10 bytes (70 bits)
|
||||
if buf.is_empty() {
|
||||
bail!("decode_u64 failed - unexpected EOB");
|
||||
}
|
||||
@ -652,9 +716,58 @@ fn test_catalog_u64_encoder() {
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(u64::MIN);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode((1<<50)-1);
|
||||
test_encode_decode((1<<63)-1);
|
||||
test_encode_decode(u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_catalog_i64_encoder() {
|
||||
|
||||
fn test_encode_decode(value: i64) {
|
||||
|
||||
let mut data = Vec::new();
|
||||
catalog_encode_i64(&mut data, value).unwrap();
|
||||
|
||||
let slice = &mut &data[..];
|
||||
let decoded = catalog_decode_i64(slice).unwrap();
|
||||
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(0);
|
||||
test_encode_decode(-0);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode(-126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode(-(1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode(-(1<<20)-1);
|
||||
test_encode_decode(i64::MIN);
|
||||
test_encode_decode(i64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_catalog_i64_compatibility() {
|
||||
|
||||
fn test_encode_decode(value: u64) {
|
||||
|
||||
let mut data = Vec::new();
|
||||
catalog_encode_u64(&mut data, value).unwrap();
|
||||
|
||||
let slice = &mut &data[..];
|
||||
let decoded = catalog_decode_i64(slice).unwrap() as u64;
|
||||
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(u64::MIN);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode((1<<50)-1);
|
||||
test_encode_decode(u64::MAX);
|
||||
}
|
||||
|
@ -354,9 +354,11 @@ impl ChunkStore {
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||
// chunk hasn't been rewritten yet, keep .bad file
|
||||
status.still_bad += 1;
|
||||
},
|
||||
Err(err) => {
|
||||
// some other error, warn user and keep .bad file around too
|
||||
status.still_bad += 1;
|
||||
crate::task_warn!(
|
||||
worker,
|
||||
"error during stat on '{:?}' - {}",
|
||||
@ -378,14 +380,12 @@ impl ChunkStore {
|
||||
}
|
||||
status.removed_chunks += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
} else if stat.st_atime < oldest_writer {
|
||||
status.pending_chunks += 1;
|
||||
status.pending_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
if stat.st_atime < oldest_writer {
|
||||
status.pending_chunks += 1;
|
||||
status.pending_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
status.disk_chunks += 1;
|
||||
status.disk_bytes += stat.st_size as u64;
|
||||
}
|
||||
status.disk_chunks += 1;
|
||||
status.disk_bytes += stat.st_size as u64;
|
||||
}
|
||||
}
|
||||
drop(lock);
|
||||
|
@ -3,26 +3,27 @@ use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::convert::TryFrom;
|
||||
use std::time::Duration;
|
||||
use std::fs::File;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked};
|
||||
|
||||
use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
use crate::task::TaskState;
|
||||
use crate::tools;
|
||||
use crate::tools::format::HumanByte;
|
||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||
use crate::api2::types::{Authid, GarbageCollectionStatus};
|
||||
use crate::server::UPID;
|
||||
|
||||
lazy_static! {
|
||||
@ -37,6 +38,7 @@ pub struct DataStore {
|
||||
chunk_store: Arc<ChunkStore>,
|
||||
gc_mutex: Mutex<bool>,
|
||||
last_gc_status: Mutex<GarbageCollectionStatus>,
|
||||
verify_new: bool,
|
||||
}
|
||||
|
||||
impl DataStore {
|
||||
@ -45,17 +47,20 @@ impl DataStore {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
|
||||
let path = PathBuf::from(&config.path);
|
||||
|
||||
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||
|
||||
if let Some(datastore) = map.get(name) {
|
||||
// Compare Config - if changed, create new Datastore object!
|
||||
if datastore.chunk_store.base == PathBuf::from(&config.path) {
|
||||
if datastore.chunk_store.base == path &&
|
||||
datastore.verify_new == config.verify_new.unwrap_or(false)
|
||||
{
|
||||
return Ok(datastore.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let datastore = DataStore::open(name)?;
|
||||
let datastore = DataStore::open_with_path(name, &path, config)?;
|
||||
|
||||
let datastore = Arc::new(datastore);
|
||||
map.insert(name.to_string(), datastore.clone());
|
||||
@ -63,26 +68,29 @@ impl DataStore {
|
||||
Ok(datastore)
|
||||
}
|
||||
|
||||
pub fn open(store_name: &str) -> Result<Self, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (_, store_config) = config.sections.get(store_name)
|
||||
.ok_or(format_err!("no such datastore '{}'", store_name))?;
|
||||
|
||||
let path = store_config["path"].as_str().unwrap();
|
||||
|
||||
Self::open_with_path(store_name, Path::new(path))
|
||||
}
|
||||
|
||||
pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
|
||||
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||
|
||||
let gc_status = GarbageCollectionStatus::default();
|
||||
let mut gc_status_path = chunk_store.base_path();
|
||||
gc_status_path.push(".gc-status");
|
||||
|
||||
let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
|
||||
match serde_json::from_str(&state) {
|
||||
Ok(state) => state,
|
||||
Err(err) => {
|
||||
eprintln!("error reading gc-status: {}", err);
|
||||
GarbageCollectionStatus::default()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
GarbageCollectionStatus::default()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
chunk_store: Arc::new(chunk_store),
|
||||
gc_mutex: Mutex::new(false),
|
||||
last_gc_status: Mutex::new(gc_status),
|
||||
verify_new: config.verify_new.unwrap_or(false),
|
||||
})
|
||||
}
|
||||
|
||||
@ -208,10 +216,17 @@ impl DataStore {
|
||||
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
||||
|
||||
log::info!("removing backup group {:?}", full_path);
|
||||
|
||||
// remove all individual backup dirs first to ensure nothing is using them
|
||||
for snap in backup_group.list_backups(&self.base_path())? {
|
||||
self.remove_backup_dir(&snap.backup_dir, false)?;
|
||||
}
|
||||
|
||||
// no snapshots left, we can now safely remove the empty folder
|
||||
std::fs::remove_dir_all(&full_path)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"removing backup group {:?} failed - {}",
|
||||
"removing backup group directory {:?} failed - {}",
|
||||
full_path,
|
||||
err,
|
||||
)
|
||||
@ -225,9 +240,10 @@ impl DataStore {
|
||||
|
||||
let full_path = self.snapshot_path(backup_dir);
|
||||
|
||||
let _guard;
|
||||
let (_guard, _manifest_guard);
|
||||
if !force {
|
||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
|
||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
||||
_manifest_guard = self.lock_manifest(backup_dir);
|
||||
}
|
||||
|
||||
log::info!("removing backup snapshot {:?}", full_path);
|
||||
@ -260,8 +276,8 @@ impl DataStore {
|
||||
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the user who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
|
||||
/// The backup owner is the entity who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.group_path());
|
||||
full_path.push("owner");
|
||||
@ -273,7 +289,7 @@ impl DataStore {
|
||||
pub fn set_owner(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
@ -293,7 +309,7 @@ impl DataStore {
|
||||
let mut file = open_options.open(&path)
|
||||
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
||||
|
||||
write!(file, "{}\n", userid)
|
||||
writeln!(file, "{}", auth_id)
|
||||
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
||||
|
||||
Ok(())
|
||||
@ -308,8 +324,8 @@ impl DataStore {
|
||||
pub fn create_locked_backup_group(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
) -> Result<(Userid, DirLockGuard), Error> {
|
||||
auth_id: &Authid,
|
||||
) -> Result<(Authid, DirLockGuard), Error> {
|
||||
// create intermediate path first:
|
||||
let base_path = self.base_path();
|
||||
|
||||
@ -323,7 +339,7 @@ impl DataStore {
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => {
|
||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||
self.set_owner(backup_group, userid, false)?;
|
||||
self.set_owner(backup_group, auth_id, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok((owner, guard))
|
||||
}
|
||||
@ -454,13 +470,25 @@ impl DataStore {
|
||||
worker.check_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex {
|
||||
let index = self.open_fixed_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
let index = self.open_dynamic_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
let full_path = self.chunk_store.relative_path(&path);
|
||||
match std::fs::File::open(&full_path) {
|
||||
Ok(file) => {
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex {
|
||||
let index = FixedIndexReader::new(file)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
let index = DynamicIndexReader::new(file)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
// simply ignore vanished files
|
||||
} else {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
done += 1;
|
||||
@ -531,7 +559,11 @@ impl DataStore {
|
||||
);
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
|
||||
crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
|
||||
}
|
||||
|
||||
if gc_status.still_bad > 0 {
|
||||
crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
|
||||
}
|
||||
|
||||
crate::task_log!(
|
||||
@ -552,11 +584,36 @@ impl DataStore {
|
||||
|
||||
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
||||
|
||||
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
||||
(gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64)
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
|
||||
crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||
}
|
||||
|
||||
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
||||
let mut path = self.base_path();
|
||||
path.push(".gc-status");
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
|
||||
// set the correct owner/group/permissions while saving file
|
||||
// owner(rw) = backup, group(r)= backup
|
||||
let options = CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid);
|
||||
|
||||
// ignore errors
|
||||
let _ = replace_file(path, serialized.as_bytes(), options);
|
||||
}
|
||||
|
||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||
|
||||
} else {
|
||||
@ -611,8 +668,27 @@ impl DataStore {
|
||||
digest_str,
|
||||
err,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn lock_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<File, Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&MANIFEST_LOCK_NAME);
|
||||
|
||||
// update_manifest should never take a long time, so if someone else has
|
||||
// the lock we can simply block a bit and should get it soon
|
||||
open_file_locked(&path, Duration::from_secs(5), true)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"unable to acquire manifest lock {:?} - {}", &path, err
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Load the manifest without a lock. Must not be written back.
|
||||
pub fn load_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
@ -623,22 +699,20 @@ impl DataStore {
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest_json(
|
||||
/// Update the manifest of the specified snapshot. Never write a manifest directly,
|
||||
/// only use this method - anything else may break locking guarantees.
|
||||
pub fn update_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<Value, Error> {
|
||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
// no expected digest available
|
||||
let manifest_data = blob.decode(None, None)?;
|
||||
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
pub fn store_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
manifest: Value,
|
||||
update_fn: impl FnOnce(&mut BackupManifest),
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _guard = self.lock_manifest(backup_dir)?;
|
||||
let (mut manifest, _) = self.load_manifest(&backup_dir)?;
|
||||
|
||||
update_fn(&mut manifest);
|
||||
|
||||
let manifest = serde_json::to_value(manifest)?;
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?;
|
||||
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||
let raw_data = blob.raw_data();
|
||||
@ -647,8 +721,13 @@ impl DataStore {
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
// atomic replace invalidates flock - no other writes past this point!
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn verify_new(&self) -> bool {
|
||||
self.verify_new
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
mod hex_csum {
|
||||
|
@ -2,6 +2,7 @@ use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||
use std::time::Instant;
|
||||
use nix::dir::Dir;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
@ -13,6 +14,7 @@ use crate::{
|
||||
BackupGroup,
|
||||
BackupDir,
|
||||
BackupInfo,
|
||||
BackupManifest,
|
||||
IndexFile,
|
||||
CryptMode,
|
||||
FileInfo,
|
||||
@ -23,6 +25,7 @@ use crate::{
|
||||
task::TaskState,
|
||||
task_log,
|
||||
tools::ParallelHandler,
|
||||
tools::fs::lock_dir_noblock_shared,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
@ -282,9 +285,48 @@ pub fn verify_backup_dir(
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<bool, Error> {
|
||||
let snap_lock = lock_dir_noblock_shared(
|
||||
&datastore.snapshot_path(&backup_dir),
|
||||
"snapshot",
|
||||
"locked by another operation");
|
||||
match snap_lock {
|
||||
Ok(snap_lock) => verify_backup_dir_with_lock(
|
||||
datastore,
|
||||
backup_dir,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
worker,
|
||||
upid,
|
||||
filter,
|
||||
snap_lock
|
||||
),
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
worker,
|
||||
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
||||
datastore.name(),
|
||||
backup_dir,
|
||||
err,
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||
/// See verify_backup_dir
|
||||
pub fn verify_backup_dir_with_lock(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
_snap_lock: Dir,
|
||||
) -> Result<bool, Error> {
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
@ -298,6 +340,18 @@ pub fn verify_backup_dir(
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(filter) = filter {
|
||||
if filter(&manifest) == false {
|
||||
task_log!(
|
||||
worker,
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
datastore.name(),
|
||||
backup_dir,
|
||||
);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
|
||||
|
||||
let mut error_count = 0;
|
||||
@ -351,9 +405,10 @@ pub fn verify_backup_dir(
|
||||
state: verify_result,
|
||||
upid,
|
||||
};
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
let verify_state = serde_json::to_value(verify_state)?;
|
||||
datastore.update_manifest(&backup_dir, |manifest| {
|
||||
manifest.unprotected["verify_state"] = verify_state;
|
||||
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
@ -373,6 +428,7 @@ pub fn verify_backup_group(
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
@ -398,6 +454,7 @@ pub fn verify_backup_group(
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
count += 1;
|
||||
|
||||
if !verify_backup_dir(
|
||||
datastore.clone(),
|
||||
&info.backup_dir,
|
||||
@ -405,6 +462,7 @@ pub fn verify_backup_group(
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
upid.clone(),
|
||||
filter,
|
||||
)? {
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
@ -435,6 +493,7 @@ pub fn verify_all_backups(
|
||||
datastore: Arc<DataStore>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
@ -479,6 +538,7 @@ pub fn verify_all_backups(
|
||||
Some((done, snapshot_count)),
|
||||
worker.clone(),
|
||||
upid,
|
||||
filter,
|
||||
)?;
|
||||
errors.append(&mut group_errors);
|
||||
|
||||
|
@ -37,7 +37,7 @@ async fn run() -> Result<(), Error> {
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
proxmox_backup::config::jobstate::create_jobstate_dir()?;
|
||||
proxmox_backup::server::jobstate::create_jobstate_dir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
@ -49,9 +49,11 @@ async fn run() -> Result<(), Error> {
|
||||
}
|
||||
let _ = csrf_secret(); // load with lazy_static
|
||||
|
||||
let config = server::ApiConfig::new(
|
||||
let mut config = server::ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
||||
|
||||
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
// http server future:
|
||||
|
@ -36,7 +36,7 @@ use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::api2::version;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::config::user::complete_user_name;
|
||||
use proxmox_backup::config::user::complete_userid;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
decrypt_key,
|
||||
@ -193,7 +193,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
||||
result
|
||||
}
|
||||
|
||||
fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
fn connect(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
|
||||
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
|
||||
@ -212,7 +212,7 @@ fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error
|
||||
.fingerprint_cache(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
HttpClient::new(server, port, userid, options)
|
||||
HttpClient::new(server, port, auth_id, options)
|
||||
}
|
||||
|
||||
async fn view_task_result(
|
||||
@ -366,7 +366,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
||||
|
||||
@ -425,7 +425,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
description: "Backup group.",
|
||||
},
|
||||
"new-owner": {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -435,7 +435,7 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
param.as_object_mut().unwrap().remove("repository");
|
||||
|
||||
@ -478,7 +478,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||
Some(path.parse()?)
|
||||
@ -510,7 +510,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
.sortby("backup-id", false)
|
||||
.sortby("backup-time", false)
|
||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
|
||||
.column(ColumnConfig::new("files").renderer(render_files))
|
||||
;
|
||||
|
||||
@ -543,7 +543,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
||||
|
||||
@ -573,7 +573,7 @@ async fn api_login(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
client.login().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -630,7 +630,7 @@ async fn api_version(param: Value) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m);
|
||||
if let Ok(repo) = repo {
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
match client.get("api2/json/version", None).await {
|
||||
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||
@ -680,7 +680,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
||||
|
||||
@ -724,7 +724,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
||||
|
||||
@ -1036,7 +1036,7 @@ async fn create_backup(
|
||||
|
||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
record_repository(&repo);
|
||||
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
||||
@ -1339,7 +1339,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
@ -1512,7 +1512,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
@ -1583,7 +1583,7 @@ fn prune<'a>(
|
||||
async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||
|
||||
@ -1657,7 +1657,10 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
)]
|
||||
/// Get repository status.
|
||||
async fn status(param: Value) -> Result<Value, Error> {
|
||||
@ -1666,7 +1669,7 @@ async fn status(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
||||
|
||||
@ -1690,7 +1693,7 @@ async fn status(param: Value) -> Result<Value, Error> {
|
||||
.column(ColumnConfig::new("used").renderer(render_total_percentage))
|
||||
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
|
||||
|
||||
let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
|
||||
let schema = &API_RETURN_SCHEMA_STATUS;
|
||||
|
||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||
|
||||
@ -1711,7 +1714,7 @@ async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||
.fingerprint_cache(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = match HttpClient::new(repo.host(), repo.port(), repo.user(), options) {
|
||||
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
|
||||
Ok(v) => v,
|
||||
_ => return Value::Null,
|
||||
};
|
||||
@ -2010,7 +2013,7 @@ fn main() {
|
||||
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
||||
.arg_param(&["group", "new-owner"])
|
||||
.completion_cb("group", complete_backup_group)
|
||||
.completion_cb("new-owner", complete_user_name)
|
||||
.completion_cb("new-owner", complete_userid)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
|
@ -62,10 +62,10 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||
.sign(private_auth_key(), None)?;
|
||||
options = options.password(Some(ticket));
|
||||
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
||||
} else {
|
||||
options = options.ticket_cache(true).interactive(true);
|
||||
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
||||
};
|
||||
|
||||
Ok(client)
|
||||
@ -388,7 +388,7 @@ fn main() {
|
||||
|
||||
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
rpcenv.set_auth_id(Some(String::from("root@pam")));
|
||||
|
||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use std::sync::{Arc};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -9,16 +10,45 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::{
|
||||
backup::DataStore,
|
||||
server::{
|
||||
UPID,
|
||||
WorkerTask,
|
||||
ApiConfig,
|
||||
rest::*,
|
||||
jobstate::{
|
||||
self,
|
||||
Job,
|
||||
},
|
||||
rotate_task_log_archive,
|
||||
},
|
||||
tools::systemd::time::{
|
||||
parse_calendar_event,
|
||||
compute_next_event,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
use proxmox_backup::api2::types::{Authid, Userid};
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
use proxmox_backup::tools::{
|
||||
daemon,
|
||||
disks::{
|
||||
DiskManage,
|
||||
zfs_pool_stats,
|
||||
},
|
||||
socket::{
|
||||
set_tcp_keepalive,
|
||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||
},
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::pull::do_sync_job;
|
||||
use proxmox_backup::server::do_verification_job;
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
@ -63,6 +93,8 @@ async fn run() -> Result<(), Error> {
|
||||
config.register_template("index", &indexpath)?;
|
||||
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||
|
||||
config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||
@ -87,6 +119,9 @@ async fn run() -> Result<(), Error> {
|
||||
let acceptor = Arc::clone(&acceptor);
|
||||
async move {
|
||||
sock.set_nodelay(true).unwrap();
|
||||
|
||||
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
|
||||
Ok(tokio_openssl::accept(&acceptor, sock)
|
||||
.await
|
||||
.ok() // handshake errors aren't be fatal, so return None to filter
|
||||
@ -196,8 +231,8 @@ async fn schedule_tasks() -> Result<(), Error> {
|
||||
|
||||
schedule_datastore_garbage_collection().await;
|
||||
schedule_datastore_prune().await;
|
||||
schedule_datastore_verification().await;
|
||||
schedule_datastore_sync_jobs().await;
|
||||
schedule_datastore_verify_jobs().await;
|
||||
schedule_task_log_rotate().await;
|
||||
|
||||
Ok(())
|
||||
@ -205,14 +240,14 @@ async fn schedule_tasks() -> Result<(), Error> {
|
||||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::backup::DataStore;
|
||||
use proxmox_backup::server::{UPID, WorkerTask};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
datastore::{
|
||||
self,
|
||||
DataStoreConfig,
|
||||
},
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let email = server::lookup_user_email(Userid::root_userid());
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
@ -294,11 +329,12 @@ async fn schedule_datastore_garbage_collection() {
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
let email2 = email.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
Userid::backup_userid().clone(),
|
||||
Authid::backup_auth_id().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
@ -314,6 +350,13 @@ async fn schedule_datastore_garbage_collection() {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
if let Some(email2) = email2 {
|
||||
let gc_status = datastore.last_gc_status();
|
||||
if let Err(err) = crate::server::send_gc_status(&email2, datastore.name(), &gc_status, &result) {
|
||||
eprintln!("send gc notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
@ -324,15 +367,17 @@ async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
async fn schedule_datastore_prune() {
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
use proxmox_backup::{
|
||||
backup::{
|
||||
PruneOptions,
|
||||
BackupGroup,
|
||||
compute_prune_info,
|
||||
},
|
||||
config::datastore::{
|
||||
self,
|
||||
DataStoreConfig,
|
||||
},
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
@ -418,7 +463,7 @@ async fn schedule_datastore_prune() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
Userid::backup_userid().clone(),
|
||||
Authid::backup_auth_id().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
|
||||
@ -469,126 +514,11 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_verification() {
|
||||
use proxmox_backup::backup::{DataStore, verify_all_backups};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (store, (_, store_config)) in config.sections {
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("datastore config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match store_config.verify_schedule {
|
||||
Some(event_str) => event_str,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let worker_type = "verify";
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let worker_id = store.clone();
|
||||
let store2 = store.clone();
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(worker_id),
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting verification on store {}", store2));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
let result = try_block!({
|
||||
let failed_dirs =
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid())?;
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
Err(format_err!("verification failed - please check the log for details"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
},
|
||||
) {
|
||||
eprintln!("unable to start verification on store {} - {}", store, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
|
||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
||||
use proxmox_backup::config::sync::{
|
||||
self,
|
||||
SyncJobConfig,
|
||||
};
|
||||
|
||||
let config = match sync::config() {
|
||||
@ -649,25 +579,80 @@ async fn schedule_datastore_sync_jobs() {
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let userid = Userid::backup_userid().clone();
|
||||
let auth_id = Authid::backup_auth_id();
|
||||
|
||||
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
|
||||
if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_task_log_rotate() {
|
||||
use proxmox_backup::{
|
||||
config::jobstate::{self, Job},
|
||||
server::rotate_task_log_archive,
|
||||
async fn schedule_datastore_verify_jobs() {
|
||||
|
||||
use proxmox_backup::config::verify::{
|
||||
self,
|
||||
VerificationJobConfig,
|
||||
};
|
||||
use proxmox_backup::server::WorkerTask;
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match verify::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read verification job config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
for (job_id, (_, job_config)) in config.sections {
|
||||
let job_config: VerificationJobConfig = match serde_json::from_value(job_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("verification job config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let event_str = match job_config.schedule {
|
||||
Some(ref event_str) => event_str.clone(),
|
||||
None => continue,
|
||||
};
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let worker_type = "verificationjob";
|
||||
let last = match jobstate::last_run_time(worker_type, &job_id) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
if next > now { continue; }
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
let auth_id = Authid::backup_auth_id();
|
||||
if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
|
||||
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_task_log_rotate() {
|
||||
|
||||
let worker_type = "logrotate";
|
||||
let job_id = "task-archive";
|
||||
let job_id = "task_archive";
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, job_id) {
|
||||
Ok(time) => time,
|
||||
@ -719,15 +704,16 @@ async fn schedule_task_log_rotate() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(job_id.to_string()),
|
||||
Userid::backup_userid().clone(),
|
||||
Authid::backup_auth_id().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting task log rotation"));
|
||||
// one entry has normally about ~100-150 bytes
|
||||
let max_size = 500000; // at least 5000 entries
|
||||
let max_files = 20; // at least 100000 entries
|
||||
|
||||
let result = try_block!({
|
||||
// rotate task log archive
|
||||
let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
|
||||
let max_files = 20; // times twenty files gives at least 100000 task entries
|
||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||
if has_rotated {
|
||||
worker.log(format!("task log archive was rotated"));
|
||||
|
@ -225,7 +225,7 @@ async fn test_upload_speed(
|
||||
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
record_repository(&repo);
|
||||
|
||||
if verbose { eprintln!("Connecting to backup server"); }
|
||||
|
@ -79,7 +79,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
};
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
@ -153,7 +153,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
/// Shell to interactively inspect and restore snapshots.
|
||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
|
@ -414,13 +414,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||
println!("</p>");
|
||||
|
||||
let data = data.join("\n");
|
||||
let qr_code = generate_qr_code("png", data.as_bytes())?;
|
||||
let qr_code = generate_qr_code("svg", data.as_bytes())?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
println!("<center>");
|
||||
println!("<img");
|
||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||
println!("src=\"data:image/svg+xml;base64,{}\"/>", qr_code);
|
||||
println!("</center>");
|
||||
println!("</div>");
|
||||
}
|
||||
@ -447,13 +447,13 @@ fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||
|
||||
println!("</p>");
|
||||
|
||||
let qr_code = generate_qr_code("png", key_text.as_bytes())?;
|
||||
let qr_code = generate_qr_code("svg", key_text.as_bytes())?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
println!("<center>");
|
||||
println!("<img");
|
||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||
println!("src=\"data:image/svg+xml;base64,{}\"/>", qr_code);
|
||||
println!("</center>");
|
||||
|
||||
println!("</div>");
|
||||
|
@ -144,7 +144,7 @@ fn mount(
|
||||
// Process should be deamonized.
|
||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||
let pipe = pipe()?;
|
||||
match fork() {
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
nix::unistd::close(pipe.1).unwrap();
|
||||
// Blocks the parent process until we are ready to go in the child
|
||||
@ -163,7 +163,7 @@ fn mount(
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let target = param["target"].as_str();
|
||||
|
||||
|
@ -48,7 +48,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||
let running = !param["all"].as_bool().unwrap_or(false);
|
||||
@ -57,7 +57,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
"running": running,
|
||||
"start": 0,
|
||||
"limit": limit,
|
||||
"userfilter": repo.user(),
|
||||
"userfilter": repo.auth_id(),
|
||||
"store": repo.store(),
|
||||
});
|
||||
|
||||
@ -96,7 +96,7 @@ async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
display_task_log(client, upid, true).await?;
|
||||
|
||||
@ -122,7 +122,7 @@ async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.auth_id())?;
|
||||
|
||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||
let _ = client.delete(&path, None).await?;
|
||||
|
@ -60,7 +60,7 @@ pub fn acl_commands() -> CommandLineInterface {
|
||||
"update",
|
||||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||
.arg_param(&["path", "role"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
.completion_cb("path", config::datastore::complete_acl_path)
|
||||
|
||||
);
|
||||
|
@ -1,11 +1,14 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2;
|
||||
use proxmox_backup::api2::types::{ACL_PATH_SCHEMA, Authid, Userid};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -48,6 +51,106 @@ fn list_users(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Er
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
userid: {
|
||||
type: Userid,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List tokens associated with user.
|
||||
fn list_tokens(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::access::user::API_METHOD_LIST_TOKENS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("tokenid"))
|
||||
.column(
|
||||
ColumnConfig::new("enable")
|
||||
.renderer(tools::format::render_bool_with_default_true)
|
||||
)
|
||||
.column(
|
||||
ColumnConfig::new("expire")
|
||||
.renderer(tools::format::render_epoch)
|
||||
)
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
auth_id: {
|
||||
type: Authid,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List permissions of user/token.
|
||||
fn list_permissions(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::access::API_METHOD_LIST_PERMISSIONS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if output_format == "text" {
|
||||
println!("Privileges with (*) have the propagate flag set\n");
|
||||
let data:HashMap<String, HashMap<String, bool>> = serde_json::from_value(data)?;
|
||||
let mut paths:Vec<String> = data.keys().cloned().collect();
|
||||
paths.sort_unstable();
|
||||
for path in paths {
|
||||
println!("Path: {}", path);
|
||||
let priv_map = data.get(&path).unwrap();
|
||||
let mut privs:Vec<String> = priv_map.keys().cloned().collect();
|
||||
if privs.is_empty() {
|
||||
println!("- NoAccess");
|
||||
} else {
|
||||
privs.sort_unstable();
|
||||
for privilege in privs {
|
||||
if *priv_map.get(&privilege).unwrap() {
|
||||
println!("- {} (*)", privilege);
|
||||
} else {
|
||||
println!("- {}", privilege);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
format_and_print_result(&mut data, &output_format);
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
|
||||
pub fn user_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
@ -62,13 +165,39 @@ pub fn user_commands() -> CommandLineInterface {
|
||||
"update",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
)
|
||||
.insert(
|
||||
"list-tokens",
|
||||
CliCommand::new(&&API_METHOD_LIST_TOKENS)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
)
|
||||
.insert(
|
||||
"generate-token",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_GENERATE_TOKEN)
|
||||
.arg_param(&["userid", "tokenname"])
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
)
|
||||
.insert(
|
||||
"delete-token",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_DELETE_TOKEN)
|
||||
.arg_param(&["userid", "tokenname"])
|
||||
.completion_cb("userid", config::user::complete_userid)
|
||||
.completion_cb("tokenname", config::user::complete_token_name)
|
||||
)
|
||||
.insert(
|
||||
"permissions",
|
||||
CliCommand::new(&&API_METHOD_LIST_PERMISSIONS)
|
||||
.arg_param(&["auth_id"])
|
||||
.completion_cb("auth_id", config::user::complete_authid)
|
||||
.completion_cb("path", config::datastore::complete_acl_path)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
|
@ -4,6 +4,8 @@
|
||||
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
||||
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
||||
|
||||
pub const API_ACCESS_LOG_FN: &str = "/var/log/proxmox-backup/api/access.log";
|
||||
|
||||
/// Prepend configuration directory to a file name
|
||||
///
|
||||
/// This is a simply way to get the full path for configuration files.
|
||||
|
@ -16,7 +16,7 @@ pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_RE
|
||||
#[derive(Debug)]
|
||||
pub struct BackupRepository {
|
||||
/// The user name used for Authentication
|
||||
user: Option<Userid>,
|
||||
auth_id: Option<Authid>,
|
||||
/// The host name or IP address
|
||||
host: Option<String>,
|
||||
/// The port
|
||||
@ -27,20 +27,29 @@ pub struct BackupRepository {
|
||||
|
||||
impl BackupRepository {
|
||||
|
||||
pub fn new(user: Option<Userid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
|
||||
pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
|
||||
let host = match host {
|
||||
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
|
||||
Some(format!("[{}]", host))
|
||||
},
|
||||
other => other,
|
||||
};
|
||||
Self { user, host, port, store }
|
||||
Self { auth_id, host, port, store }
|
||||
}
|
||||
|
||||
pub fn auth_id(&self) -> &Authid {
|
||||
if let Some(ref auth_id) = self.auth_id {
|
||||
return auth_id;
|
||||
}
|
||||
|
||||
&Authid::root_auth_id()
|
||||
}
|
||||
|
||||
pub fn user(&self) -> &Userid {
|
||||
if let Some(ref user) = self.user {
|
||||
return &user;
|
||||
if let Some(auth_id) = &self.auth_id {
|
||||
return auth_id.user();
|
||||
}
|
||||
|
||||
Userid::root_userid()
|
||||
}
|
||||
|
||||
@ -65,8 +74,8 @@ impl BackupRepository {
|
||||
|
||||
impl fmt::Display for BackupRepository {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match (&self.user, &self.host, self.port) {
|
||||
(Some(user), _, _) => write!(f, "{}@{}:{}:{}", user, self.host(), self.port(), self.store),
|
||||
match (&self.auth_id, &self.host, self.port) {
|
||||
(Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
|
||||
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
|
||||
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
|
||||
(None, None, None) => write!(f, "{}", self.store),
|
||||
@ -88,7 +97,7 @@ impl std::str::FromStr for BackupRepository {
|
||||
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
|
||||
|
||||
Ok(Self {
|
||||
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
||||
auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
|
||||
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
||||
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
|
||||
store: cap[4].to_owned(),
|
||||
|
@ -38,6 +38,9 @@ pub struct BackupStats {
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||
|
||||
impl BackupWriter {
|
||||
|
||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||
@ -262,7 +265,7 @@ impl BackupWriter {
|
||||
let archive = if self.verbose {
|
||||
archive_name.to_string()
|
||||
} else {
|
||||
crate::tools::format::strip_server_file_extension(archive_name.clone())
|
||||
crate::tools::format::strip_server_file_extension(archive_name)
|
||||
};
|
||||
if archive_name != CATALOG_NAME {
|
||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||
@ -335,15 +338,15 @@ impl BackupWriter {
|
||||
(verify_queue_tx, verify_result_rx)
|
||||
}
|
||||
|
||||
fn append_chunk_queue(h2: H2Client, wid: u64, path: String, verbose: bool) -> (
|
||||
mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>,
|
||||
oneshot::Receiver<Result<(), Error>>,
|
||||
) {
|
||||
fn append_chunk_queue(
|
||||
h2: H2Client,
|
||||
wid: u64,
|
||||
path: String,
|
||||
verbose: bool,
|
||||
) -> (UploadQueueSender, UploadResultReceiver) {
|
||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||
|
||||
let h2_2 = h2.clone();
|
||||
|
||||
// FIXME: async-block-ify this code!
|
||||
tokio::spawn(
|
||||
verify_queue_rx
|
||||
@ -381,7 +384,7 @@ impl BackupWriter {
|
||||
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
||||
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
||||
let upload_data = Some(param_data);
|
||||
h2_2.send_request(request, upload_data)
|
||||
h2.send_request(request, upload_data)
|
||||
.and_then(move |response| {
|
||||
response
|
||||
.map_err(Error::from)
|
||||
@ -489,6 +492,10 @@ impl BackupWriter {
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
|
||||
// funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use
|
||||
// since this is a private method.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn upload_chunk_info_stream(
|
||||
h2: H2Client,
|
||||
wid: u64,
|
||||
@ -515,7 +522,7 @@ impl BackupWriter {
|
||||
let is_fixed_chunk_size = prefix == "fixed";
|
||||
|
||||
let (upload_queue, upload_result) =
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path.to_owned(), verbose);
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -574,10 +581,12 @@ impl BackupWriter {
|
||||
let digest = chunk_info.digest;
|
||||
let digest_str = digest_to_hex(&digest);
|
||||
|
||||
if false && verbose { // TO verbose, needs finer verbosity setting granularity
|
||||
/* too verbose, needs finer verbosity setting granularity
|
||||
if verbose {
|
||||
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
||||
chunk_info.chunk_len, offset);
|
||||
}
|
||||
*/
|
||||
|
||||
let chunk_data = chunk_info.chunk.into_inner();
|
||||
let param = json!({
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::io::Write;
|
||||
use std::task::{Context, Poll};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
@ -18,19 +17,21 @@ use xdg::BaseDirectories;
|
||||
use proxmox::{
|
||||
api::error::HttpError,
|
||||
sys::linux::tty,
|
||||
tools::{
|
||||
fs::{file_get_json, replace_file, CreateOptions},
|
||||
}
|
||||
tools::fs::{file_get_json, replace_file, CreateOptions},
|
||||
};
|
||||
|
||||
use super::pipe_to_stream::PipeToSendStream;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::async_io::EitherStream;
|
||||
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
||||
use crate::api2::types::{Authid, Userid};
|
||||
use crate::tools::{
|
||||
self,
|
||||
BroadcastFuture,
|
||||
DEFAULT_ENCODE_SET,
|
||||
http::HttpsConnector,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthInfo {
|
||||
pub userid: Userid,
|
||||
pub auth_id: Authid,
|
||||
pub ticket: String,
|
||||
pub token: String,
|
||||
}
|
||||
@ -101,7 +102,7 @@ pub struct HttpClient {
|
||||
server: String,
|
||||
port: u16,
|
||||
fingerprint: Arc<Mutex<Option<String>>>,
|
||||
first_auth: BroadcastFuture<()>,
|
||||
first_auth: Option<BroadcastFuture<()>>,
|
||||
auth: Arc<RwLock<AuthInfo>>,
|
||||
ticket_abort: futures::future::AbortHandle,
|
||||
_options: HttpClientOptions,
|
||||
@ -181,10 +182,8 @@ fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
|
||||
|
||||
for line in raw.split('\n') {
|
||||
let items: Vec<String> = line.split_whitespace().map(String::from).collect();
|
||||
if items.len() == 2 {
|
||||
if &items[0] == server {
|
||||
return Some(items[1].clone());
|
||||
}
|
||||
if items.len() == 2 && &items[0] == server {
|
||||
return Some(items[1].clone());
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,11 +211,11 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
|
||||
let empty = serde_json::map::Map::new();
|
||||
for (server, info) in data.as_object().unwrap_or(&empty) {
|
||||
for (_user, uinfo) in info.as_object().unwrap_or(&empty) {
|
||||
for (user, uinfo) in info.as_object().unwrap_or(&empty) {
|
||||
if let Some(timestamp) = uinfo["timestamp"].as_i64() {
|
||||
let age = now - timestamp;
|
||||
if age < ticket_lifetime {
|
||||
new_data[server][username] = uinfo.clone();
|
||||
new_data[server][user] = uinfo.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -252,7 +251,7 @@ impl HttpClient {
|
||||
pub fn new(
|
||||
server: &str,
|
||||
port: u16,
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
mut options: HttpClientOptions,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
@ -294,10 +293,11 @@ impl HttpClient {
|
||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||
}
|
||||
|
||||
let mut httpc = hyper::client::HttpConnector::new();
|
||||
let mut httpc = HttpConnector::new();
|
||||
httpc.set_nodelay(true); // important for h2 download performance!
|
||||
httpc.enforce_http(false); // we want https...
|
||||
|
||||
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
|
||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
|
||||
|
||||
let client = Client::builder()
|
||||
@ -311,6 +311,11 @@ impl HttpClient {
|
||||
let password = if let Some(password) = password {
|
||||
password
|
||||
} else {
|
||||
let userid = if auth_id.is_token() {
|
||||
bail!("API token secret must be provided!");
|
||||
} else {
|
||||
auth_id.user()
|
||||
};
|
||||
let mut ticket_info = None;
|
||||
if use_ticket_cache {
|
||||
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
|
||||
@ -323,7 +328,7 @@ impl HttpClient {
|
||||
};
|
||||
|
||||
let auth = Arc::new(RwLock::new(AuthInfo {
|
||||
userid: userid.clone(),
|
||||
auth_id: auth_id.clone(),
|
||||
ticket: password.clone(),
|
||||
token: "".to_string(),
|
||||
}));
|
||||
@ -336,14 +341,14 @@ impl HttpClient {
|
||||
let renewal_future = async move {
|
||||
loop {
|
||||
tokio::time::delay_for(Duration::new(60*15, 0)).await; // 15 minutes
|
||||
let (userid, ticket) = {
|
||||
let (auth_id, ticket) = {
|
||||
let authinfo = auth2.read().unwrap().clone();
|
||||
(authinfo.userid, authinfo.ticket)
|
||||
(authinfo.auth_id, authinfo.ticket)
|
||||
};
|
||||
match Self::credentials(client2.clone(), server2.clone(), port, userid, ticket).await {
|
||||
match Self::credentials(client2.clone(), server2.clone(), port, auth_id.user().clone(), ticket).await {
|
||||
Ok(auth) => {
|
||||
if use_ticket_cache & &prefix2.is_some() {
|
||||
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
*auth2.write().unwrap() = auth;
|
||||
},
|
||||
@ -361,7 +366,7 @@ impl HttpClient {
|
||||
client.clone(),
|
||||
server.to_owned(),
|
||||
port,
|
||||
userid.to_owned(),
|
||||
auth_id.user().clone(),
|
||||
password.to_owned(),
|
||||
).map_ok({
|
||||
let server = server.to_string();
|
||||
@ -370,13 +375,20 @@ impl HttpClient {
|
||||
|
||||
move |auth| {
|
||||
if use_ticket_cache & &prefix.is_some() {
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
*authinfo.write().unwrap() = auth;
|
||||
tokio::spawn(renewal_future);
|
||||
}
|
||||
});
|
||||
|
||||
let first_auth = if auth_id.is_token() {
|
||||
// TODO check access here?
|
||||
None
|
||||
} else {
|
||||
Some(BroadcastFuture::new(Box::new(login_future)))
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
server: String::from(server),
|
||||
@ -384,7 +396,7 @@ impl HttpClient {
|
||||
fingerprint: verified_fingerprint,
|
||||
auth,
|
||||
ticket_abort,
|
||||
first_auth: BroadcastFuture::new(Box::new(login_future)),
|
||||
first_auth,
|
||||
_options: options,
|
||||
})
|
||||
}
|
||||
@ -394,7 +406,10 @@ impl HttpClient {
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
/// access to authentication data in 'AuthInfo'.
|
||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||
self.first_auth.listen().await?;
|
||||
if let Some(future) = &self.first_auth {
|
||||
future.listen().await?;
|
||||
}
|
||||
|
||||
let authinfo = self.auth.read().unwrap();
|
||||
Ok(authinfo.clone())
|
||||
}
|
||||
@ -477,10 +492,14 @@ impl HttpClient {
|
||||
let client = self.client.clone();
|
||||
|
||||
let auth = self.login().await?;
|
||||
|
||||
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
|
||||
if auth.auth_id.is_token() {
|
||||
let enc_api_token = format!("{}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
|
||||
} else {
|
||||
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
|
||||
}
|
||||
|
||||
Self::api_request(client, req).await
|
||||
}
|
||||
@ -579,11 +598,18 @@ impl HttpClient {
|
||||
protocol_name: String,
|
||||
) -> Result<(H2Client, futures::future::AbortHandle), Error> {
|
||||
|
||||
let auth = self.login().await?;
|
||||
let client = self.client.clone();
|
||||
let auth = self.login().await?;
|
||||
|
||||
if auth.auth_id.is_token() {
|
||||
let enc_api_token = format!("{}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
|
||||
} else {
|
||||
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
|
||||
}
|
||||
|
||||
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
||||
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
|
||||
|
||||
let resp = client.request(req).await?;
|
||||
@ -609,7 +635,7 @@ impl HttpClient {
|
||||
.await?;
|
||||
|
||||
let connection = connection
|
||||
.map_err(|_| panic!("HTTP/2.0 connection failed"));
|
||||
.map_err(|_| eprintln!("HTTP/2.0 connection failed"));
|
||||
|
||||
let (connection, abort) = futures::future::abortable(connection);
|
||||
// A cancellable future returns an Option which is None when cancelled and
|
||||
@ -636,7 +662,7 @@ impl HttpClient {
|
||||
let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
|
||||
let cred = Self::api_request(client, req).await?;
|
||||
let auth = AuthInfo {
|
||||
userid: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||
auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
|
||||
token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
|
||||
};
|
||||
@ -921,61 +947,3 @@ impl H2Client {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HttpsConnector {
|
||||
http: HttpConnector,
|
||||
ssl_connector: std::sync::Arc<SslConnector>,
|
||||
}
|
||||
|
||||
impl HttpsConnector {
|
||||
pub fn with_connector(mut http: HttpConnector, ssl_connector: SslConnector) -> Self {
|
||||
http.enforce_http(false);
|
||||
|
||||
Self {
|
||||
http,
|
||||
ssl_connector: std::sync::Arc::new(ssl_connector),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MaybeTlsStream = EitherStream<
|
||||
tokio::net::TcpStream,
|
||||
tokio_openssl::SslStream<tokio::net::TcpStream>,
|
||||
>;
|
||||
|
||||
impl hyper::service::Service<Uri> for HttpsConnector {
|
||||
type Response = MaybeTlsStream;
|
||||
type Error = Error;
|
||||
type Future = std::pin::Pin<Box<
|
||||
dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static
|
||||
>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// This connector is always ready, but others might not be.
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, dst: Uri) -> Self::Future {
|
||||
let mut this = self.clone();
|
||||
async move {
|
||||
let is_https = dst
|
||||
.scheme()
|
||||
.ok_or_else(|| format_err!("missing URL scheme"))?
|
||||
== "https";
|
||||
let host = dst
|
||||
.host()
|
||||
.ok_or_else(|| format_err!("missing hostname in destination url?"))?
|
||||
.to_string();
|
||||
|
||||
let config = this.ssl_connector.configure();
|
||||
let conn = this.http.call(dst).await?;
|
||||
if is_https {
|
||||
let conn = tokio_openssl::connect(config?, &host, conn).await?;
|
||||
Ok(MaybeTlsStream::Right(conn))
|
||||
} else {
|
||||
Ok(MaybeTlsStream::Left(conn))
|
||||
}
|
||||
}.boxed()
|
||||
}
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ pub async fn pull_group(
|
||||
.password(Some(auth_info.ticket.clone()))
|
||||
.fingerprint(fingerprint.clone());
|
||||
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.port(), src_repo.user(), options)?;
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.port(), src_repo.auth_id(), options)?;
|
||||
|
||||
let reader = BackupReader::start(
|
||||
new_client,
|
||||
@ -491,7 +491,7 @@ pub async fn pull_store(
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
delete: bool,
|
||||
userid: Userid,
|
||||
auth_id: Authid,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
@ -524,27 +524,29 @@ pub async fn pull_store(
|
||||
for (groups_done, item) in list.into_iter().enumerate() {
|
||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||
|
||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &auth_id)?;
|
||||
// permission check
|
||||
if userid != owner { // only the owner is allowed to create additional snapshots
|
||||
if auth_id != owner { // only the owner is allowed to create additional snapshots
|
||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||
item.backup_type, item.backup_id, userid, owner));
|
||||
item.backup_type, item.backup_id, auth_id, owner));
|
||||
errors = true; // do not stop here, instead continue
|
||||
|
||||
} else {
|
||||
|
||||
if let Err(err) = pull_group(
|
||||
worker,
|
||||
client,
|
||||
src_repo,
|
||||
tgt_store.clone(),
|
||||
&group,
|
||||
delete,
|
||||
Some((groups_done, group_count)),
|
||||
).await {
|
||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
||||
errors = true; // do not stop here, instead continue
|
||||
}
|
||||
} else if let Err(err) = pull_group(
|
||||
worker,
|
||||
client,
|
||||
src_repo,
|
||||
tgt_store.clone(),
|
||||
&group,
|
||||
delete,
|
||||
Some((groups_done, group_count)),
|
||||
).await {
|
||||
worker.log(format!(
|
||||
"sync group {}/{} failed - {}",
|
||||
item.backup_type,
|
||||
item.backup_id,
|
||||
err,
|
||||
));
|
||||
errors = true; // do not stop here, instead continue
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,8 @@ pub async fn display_task_log(
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if lines != limit { bail!("got wrong number of lines from server ({} != {})", lines, limit); }
|
||||
} else if lines != limit {
|
||||
bail!("got wrong number of lines from server ({} != {})", lines, limit);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,11 +18,12 @@ use crate::buildcfg;
|
||||
pub mod acl;
|
||||
pub mod cached_user_info;
|
||||
pub mod datastore;
|
||||
pub mod jobstate;
|
||||
pub mod network;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
pub mod token_shadow;
|
||||
pub mod user;
|
||||
pub mod verify;
|
||||
|
||||
/// Check configuration directory permissions
|
||||
///
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::io::Write;
|
||||
use std::collections::{HashMap, HashSet, BTreeMap, BTreeSet};
|
||||
use std::collections::{HashMap, BTreeMap, BTreeSet};
|
||||
use std::path::{PathBuf, Path};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::str::FromStr;
|
||||
@ -15,7 +15,7 @@ use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
use proxmox::constnamedbitmap;
|
||||
use proxmox::api::{api, schema::*};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::{Authid,Userid};
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
@ -231,7 +231,7 @@ pub struct AclTree {
|
||||
}
|
||||
|
||||
pub struct AclTreeNode {
|
||||
pub users: HashMap<Userid, HashMap<String, bool>>,
|
||||
pub users: HashMap<Authid, HashMap<String, bool>>,
|
||||
pub groups: HashMap<String, HashMap<String, bool>>,
|
||||
pub children: BTreeMap<String, AclTreeNode>,
|
||||
}
|
||||
@ -246,43 +246,43 @@ impl AclTreeNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
||||
let user_roles = self.extract_user_roles(user, all);
|
||||
if !user_roles.is_empty() {
|
||||
pub fn extract_roles(&self, auth_id: &Authid, all: bool) -> HashMap<String, bool> {
|
||||
let user_roles = self.extract_user_roles(auth_id, all);
|
||||
if !user_roles.is_empty() || auth_id.is_token() {
|
||||
// user privs always override group privs
|
||||
return user_roles
|
||||
};
|
||||
|
||||
self.extract_group_roles(user, all)
|
||||
self.extract_group_roles(auth_id.user(), all)
|
||||
}
|
||||
|
||||
pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
||||
pub fn extract_user_roles(&self, auth_id: &Authid, all: bool) -> HashMap<String, bool> {
|
||||
|
||||
let mut set = HashSet::new();
|
||||
let mut map = HashMap::new();
|
||||
|
||||
let roles = match self.users.get(user) {
|
||||
let roles = match self.users.get(auth_id) {
|
||||
Some(m) => m,
|
||||
None => return set,
|
||||
None => return map,
|
||||
};
|
||||
|
||||
for (role, propagate) in roles {
|
||||
if *propagate || all {
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
// return a set with a single role 'NoAccess'
|
||||
let mut set = HashSet::new();
|
||||
set.insert(role.to_string());
|
||||
return set;
|
||||
// return a map with a single role 'NoAccess'
|
||||
let mut map = HashMap::new();
|
||||
map.insert(role.to_string(), false);
|
||||
return map;
|
||||
}
|
||||
set.insert(role.to_string());
|
||||
map.insert(role.to_string(), *propagate);
|
||||
}
|
||||
}
|
||||
|
||||
set
|
||||
map
|
||||
}
|
||||
|
||||
pub fn extract_group_roles(&self, _user: &Userid, all: bool) -> HashSet<String> {
|
||||
pub fn extract_group_roles(&self, _user: &Userid, all: bool) -> HashMap<String, bool> {
|
||||
|
||||
let mut set = HashSet::new();
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for (_group, roles) in &self.groups {
|
||||
let is_member = false; // fixme: check if user is member of the group
|
||||
@ -291,17 +291,17 @@ impl AclTreeNode {
|
||||
for (role, propagate) in roles {
|
||||
if *propagate || all {
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
// return a set with a single role 'NoAccess'
|
||||
let mut set = HashSet::new();
|
||||
set.insert(role.to_string());
|
||||
return set;
|
||||
// return a map with a single role 'NoAccess'
|
||||
let mut map = HashMap::new();
|
||||
map.insert(role.to_string(), false);
|
||||
return map;
|
||||
}
|
||||
set.insert(role.to_string());
|
||||
map.insert(role.to_string(), *propagate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
set
|
||||
map
|
||||
}
|
||||
|
||||
pub fn delete_group_role(&mut self, group: &str, role: &str) {
|
||||
@ -312,8 +312,8 @@ impl AclTreeNode {
|
||||
roles.remove(role);
|
||||
}
|
||||
|
||||
pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
|
||||
let roles = match self.users.get_mut(userid) {
|
||||
pub fn delete_user_role(&mut self, auth_id: &Authid, role: &str) {
|
||||
let roles = match self.users.get_mut(auth_id) {
|
||||
Some(r) => r,
|
||||
None => return,
|
||||
};
|
||||
@ -331,8 +331,8 @@ impl AclTreeNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
|
||||
let map = self.users.entry(user).or_insert_with(|| HashMap::new());
|
||||
pub fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
||||
let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
map.clear();
|
||||
map.insert(role, propagate);
|
||||
@ -346,7 +346,9 @@ impl AclTreeNode {
|
||||
impl AclTree {
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self { root: AclTreeNode::new() }
|
||||
Self {
|
||||
root: AclTreeNode::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_node(&mut self, path: &str) -> Option<&mut AclTreeNode> {
|
||||
@ -383,13 +385,13 @@ impl AclTree {
|
||||
node.delete_group_role(group, role);
|
||||
}
|
||||
|
||||
pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
|
||||
pub fn delete_user_role(&mut self, path: &str, auth_id: &Authid, role: &str) {
|
||||
let path = split_acl_path(path);
|
||||
let node = match self.get_node(&path) {
|
||||
Some(n) => n,
|
||||
None => return,
|
||||
};
|
||||
node.delete_user_role(userid, role);
|
||||
node.delete_user_role(auth_id, role);
|
||||
}
|
||||
|
||||
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
|
||||
@ -398,10 +400,10 @@ impl AclTree {
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
|
||||
pub fn insert_user_role(&mut self, path: &str, auth_id: &Authid, role: &str, propagate: bool) {
|
||||
let path = split_acl_path(path);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
node.insert_user_role(user.to_owned(), role.to_string(), propagate);
|
||||
node.insert_user_role(auth_id.to_owned(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
fn write_node_config(
|
||||
@ -413,18 +415,18 @@ impl AclTree {
|
||||
let mut role_ug_map0 = HashMap::new();
|
||||
let mut role_ug_map1 = HashMap::new();
|
||||
|
||||
for (user, roles) in &node.users {
|
||||
for (auth_id, roles) in &node.users {
|
||||
// no need to save, because root is always 'Administrator'
|
||||
if user == "root@pam" { continue; }
|
||||
if !auth_id.is_token() && auth_id.user() == "root@pam" { continue; }
|
||||
for (role, propagate) in roles {
|
||||
let role = role.as_str();
|
||||
let user = user.to_string();
|
||||
let auth_id = auth_id.to_string();
|
||||
if *propagate {
|
||||
role_ug_map1.entry(role).or_insert_with(|| BTreeSet::new())
|
||||
.insert(user);
|
||||
.insert(auth_id);
|
||||
} else {
|
||||
role_ug_map0.entry(role).or_insert_with(|| BTreeSet::new())
|
||||
.insert(user);
|
||||
.insert(auth_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -512,7 +514,8 @@ impl AclTree {
|
||||
bail!("expected '0' or '1' for propagate flag.");
|
||||
};
|
||||
|
||||
let path = split_acl_path(items[2]);
|
||||
let path_str = items[2];
|
||||
let path = split_acl_path(path_str);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
|
||||
let uglist: Vec<&str> = items[3].split(',').map(|v| v.trim()).collect();
|
||||
@ -576,25 +579,26 @@ impl AclTree {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
|
||||
pub fn roles(&self, auth_id: &Authid, path: &[&str]) -> HashMap<String, bool> {
|
||||
|
||||
let mut node = &self.root;
|
||||
let mut role_set = node.extract_roles(userid, path.is_empty());
|
||||
let mut role_map = node.extract_roles(auth_id, path.is_empty());
|
||||
|
||||
for (pos, comp) in path.iter().enumerate() {
|
||||
let last_comp = (pos + 1) == path.len();
|
||||
node = match node.children.get(*comp) {
|
||||
Some(n) => n,
|
||||
None => return role_set, // path not found
|
||||
None => return role_map, // path not found
|
||||
};
|
||||
let new_set = node.extract_roles(userid, last_comp);
|
||||
if !new_set.is_empty() {
|
||||
// overwrite previous settings
|
||||
role_set = new_set;
|
||||
|
||||
let new_map = node.extract_roles(auth_id, last_comp);
|
||||
if !new_map.is_empty() {
|
||||
// overwrite previous maptings
|
||||
role_map = new_map;
|
||||
}
|
||||
}
|
||||
|
||||
role_set
|
||||
role_map
|
||||
}
|
||||
}
|
||||
|
||||
@ -675,22 +679,22 @@ mod test {
|
||||
use anyhow::{Error};
|
||||
use super::AclTree;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::Authid;
|
||||
|
||||
fn check_roles(
|
||||
tree: &AclTree,
|
||||
user: &Userid,
|
||||
auth_id: &Authid,
|
||||
path: &str,
|
||||
expected_roles: &str,
|
||||
) {
|
||||
|
||||
let path_vec = super::split_acl_path(path);
|
||||
let mut roles = tree.roles(user, &path_vec)
|
||||
.iter().map(|v| v.clone()).collect::<Vec<String>>();
|
||||
let mut roles = tree.roles(auth_id, &path_vec)
|
||||
.iter().map(|(v, _)| v.clone()).collect::<Vec<String>>();
|
||||
roles.sort();
|
||||
let roles = roles.join(",");
|
||||
|
||||
assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", user, path);
|
||||
assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", auth_id, path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -721,13 +725,13 @@ acl:1:/storage:user1@pbs:Admin
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
acl:1:/storage/store2:user2@pbs:DatastoreBackup
|
||||
"###)?;
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "");
|
||||
check_roles(&tree, &user1, "/storage", "Admin");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||
|
||||
let user2: Userid = "user2@pbs".parse()?;
|
||||
let user2: Authid = "user2@pbs".parse()?;
|
||||
check_roles(&tree, &user2, "/", "");
|
||||
check_roles(&tree, &user2, "/storage", "");
|
||||
check_roles(&tree, &user2, "/storage/store1", "");
|
||||
@ -744,7 +748,7 @@ acl:1:/:user1@pbs:Admin
|
||||
acl:1:/storage:user1@pbs:NoAccess
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
"###)?;
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "Admin");
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
@ -770,7 +774,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
|
||||
tree.insert_user_role("/", &user1, "Admin", true);
|
||||
tree.insert_user_role("/", &user1, "Audit", true);
|
||||
@ -794,7 +798,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Userid = "user1@pbs".parse()?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
|
||||
|
@ -9,10 +9,10 @@ use lazy_static::lazy_static;
|
||||
use proxmox::api::UserInformation;
|
||||
|
||||
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
|
||||
use super::user::User;
|
||||
use crate::api2::types::Userid;
|
||||
use super::user::{ApiToken, User};
|
||||
use crate::api2::types::{Authid, Userid};
|
||||
|
||||
/// Cache User/Group/Acl configuration data for fast permission tests
|
||||
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
||||
pub struct CachedUserInfo {
|
||||
user_cfg: Arc<SectionConfigData>,
|
||||
acl_tree: Arc<AclTree>,
|
||||
@ -57,37 +57,54 @@ impl CachedUserInfo {
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Test if a user account is enabled and not expired
|
||||
pub fn is_active_user(&self, userid: &Userid) -> bool {
|
||||
/// Test if a authentication id is enabled and not expired
|
||||
pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
|
||||
let userid = auth_id.user();
|
||||
|
||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
||||
if !info.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = info.expire {
|
||||
if expire > 0 {
|
||||
if expire <= now() {
|
||||
return false;
|
||||
}
|
||||
if expire > 0 && expire <= now() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if auth_id.is_token() {
|
||||
if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
|
||||
if !info.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = info.expire {
|
||||
if expire > 0 && expire <= now() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn check_privs(
|
||||
&self,
|
||||
userid: &Userid,
|
||||
auth_id: &Authid,
|
||||
path: &[&str],
|
||||
required_privs: u64,
|
||||
partial: bool,
|
||||
) -> Result<(), Error> {
|
||||
let user_privs = self.lookup_privs(&userid, path);
|
||||
let privs = self.lookup_privs(&auth_id, path);
|
||||
let allowed = if partial {
|
||||
(user_privs & required_privs) != 0
|
||||
(privs & required_privs) != 0
|
||||
} else {
|
||||
(user_privs & required_privs) == required_privs
|
||||
(privs & required_privs) == required_privs
|
||||
};
|
||||
if !allowed {
|
||||
// printing the path doesn't leaks any information as long as we
|
||||
@ -97,29 +114,48 @@ impl CachedUserInfo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
||||
userid == "root@pam"
|
||||
pub fn is_superuser(&self, auth_id: &Authid) -> bool {
|
||||
!auth_id.is_token() && auth_id.user() == "root@pam"
|
||||
}
|
||||
|
||||
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn lookup_privs(&self, userid: &Userid, path: &[&str]) -> u64 {
|
||||
pub fn lookup_privs(&self, auth_id: &Authid, path: &[&str]) -> u64 {
|
||||
let (privs, _) = self.lookup_privs_details(auth_id, path);
|
||||
privs
|
||||
}
|
||||
|
||||
if self.is_superuser(userid) {
|
||||
return ROLE_ADMIN;
|
||||
pub fn lookup_privs_details(&self, auth_id: &Authid, path: &[&str]) -> (u64, u64) {
|
||||
if self.is_superuser(auth_id) {
|
||||
return (ROLE_ADMIN, ROLE_ADMIN);
|
||||
}
|
||||
|
||||
let roles = self.acl_tree.roles(userid, path);
|
||||
let roles = self.acl_tree.roles(auth_id, path);
|
||||
let mut privs: u64 = 0;
|
||||
for role in roles {
|
||||
let mut propagated_privs: u64 = 0;
|
||||
for (role, propagate) in roles {
|
||||
if let Some((role_privs, _)) = ROLE_NAMES.get(role.as_str()) {
|
||||
if propagate {
|
||||
propagated_privs |= role_privs;
|
||||
}
|
||||
privs |= role_privs;
|
||||
}
|
||||
}
|
||||
privs
|
||||
|
||||
if auth_id.is_token() {
|
||||
// limit privs to that of owning user
|
||||
let user_auth_id = Authid::from(auth_id.user().clone());
|
||||
privs &= self.lookup_privs(&user_auth_id, path);
|
||||
let (owner_privs, owner_propagated_privs) = self.lookup_privs_details(&user_auth_id, path);
|
||||
privs &= owner_privs;
|
||||
propagated_privs &= owner_propagated_privs;
|
||||
}
|
||||
|
||||
(privs, propagated_privs)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl UserInformation for CachedUserInfo {
|
||||
@ -131,9 +167,9 @@ impl UserInformation for CachedUserInfo {
|
||||
false
|
||||
}
|
||||
|
||||
fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
|
||||
match userid.parse::<Userid>() {
|
||||
Ok(userid) => Self::lookup_privs(self, &userid, path),
|
||||
fn lookup_privs(&self, auth_id: &str, path: &[&str]) -> u64 {
|
||||
match auth_id.parse::<Authid>() {
|
||||
Ok(auth_id) => Self::lookup_privs(self, &auth_id, path),
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
|
@ -44,10 +44,6 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -72,6 +68,10 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
"verify-new": {
|
||||
optional: true,
|
||||
type: bool,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
@ -87,8 +87,6 @@ pub struct DataStoreConfig {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prune_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
@ -100,6 +98,9 @@ pub struct DataStoreConfig {
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
/// If enabled, all backups will be verified right after completion.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_new: Option<bool>,
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
@ -157,12 +158,12 @@ pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<St
|
||||
let mut list = Vec::new();
|
||||
|
||||
list.push(String::from("/"));
|
||||
list.push(String::from("/storage"));
|
||||
list.push(String::from("/storage/"));
|
||||
list.push(String::from("/datastore"));
|
||||
list.push(String::from("/datastore/"));
|
||||
|
||||
if let Ok((data, _digest)) = config() {
|
||||
for id in data.sections.keys() {
|
||||
list.push(format!("/storage/{}", id));
|
||||
list.push(format!("/datastore/{}", id));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,8 +289,12 @@ impl Interface {
|
||||
|
||||
if let Some(method6) = self.method6 {
|
||||
let mut skip_v6 = false; // avoid empty inet6 manual entry
|
||||
if self.method.is_some() && method6 == NetworkConfigMethod::Manual {
|
||||
if self.comments6.is_none() && self.options6.is_empty() { skip_v6 = true; }
|
||||
if self.method.is_some()
|
||||
&& method6 == NetworkConfigMethod::Manual
|
||||
&& self.comments6.is_none()
|
||||
&& self.options6.is_empty()
|
||||
{
|
||||
skip_v6 = true;
|
||||
}
|
||||
|
||||
if !skip_v6 {
|
||||
|
@ -10,7 +10,7 @@ use regex::Regex;
|
||||
|
||||
use proxmox::*; // for IP macros
|
||||
|
||||
pub static IPV4_REVERSE_MASK: &[&'static str] = &[
|
||||
pub static IPV4_REVERSE_MASK: &[&str] = &[
|
||||
"0.0.0.0",
|
||||
"128.0.0.0",
|
||||
"192.0.0.0",
|
||||
|
@ -45,7 +45,7 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
|
||||
type: u16,
|
||||
},
|
||||
userid: {
|
||||
type: Userid,
|
||||
type: Authid,
|
||||
},
|
||||
password: {
|
||||
schema: REMOTE_PASSWORD_SCHEMA,
|
||||
@ -65,7 +65,7 @@ pub struct Remote {
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
pub userid: Userid,
|
||||
pub userid: Authid,
|
||||
#[serde(skip_serializing_if="String::is_empty")]
|
||||
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||
pub password: String,
|
||||
|
@ -51,7 +51,7 @@ lazy_static! {
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[derive(Serialize,Deserialize,Clone)]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
pub id: String,
|
||||
|
91
src/config/token_shadow.rs
Normal file
91
src/config/token_shadow.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde::{Serialize, Deserialize};
|
||||
use serde_json::{from_value, Value};
|
||||
|
||||
use proxmox::tools::fs::{open_file_locked, CreateOptions};
|
||||
|
||||
use crate::api2::types::Authid;
|
||||
use crate::auth;
|
||||
|
||||
const LOCK_FILE: &str = configdir!("/token.shadow.lock");
|
||||
const CONF_FILE: &str = configdir!("/token.shadow");
|
||||
const LOCK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// ApiToken id / secret pair
|
||||
pub struct ApiTokenSecret {
|
||||
pub tokenid: Authid,
|
||||
pub secret: String,
|
||||
}
|
||||
|
||||
fn read_file() -> Result<HashMap<Authid, String>, Error> {
|
||||
let json = proxmox::tools::fs::file_get_json(CONF_FILE, Some(Value::Null))?;
|
||||
|
||||
if json == Value::Null {
|
||||
Ok(HashMap::new())
|
||||
} else {
|
||||
// swallow serde error which might contain sensitive data
|
||||
from_value(json).map_err(|_err| format_err!("unable to parse '{}'", CONF_FILE))
|
||||
}
|
||||
}
|
||||
|
||||
fn write_file(data: HashMap<Authid, String>) -> Result<(), Error> {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let options = CreateOptions::new()
|
||||
.perm(nix::sys::stat::Mode::from_bits_truncate(0o0640))
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid);
|
||||
|
||||
let json = serde_json::to_vec(&data)?;
|
||||
proxmox::tools::fs::replace_file(CONF_FILE, &json, options)
|
||||
}
|
||||
|
||||
/// Verifies that an entry for given tokenid / API token secret exists
|
||||
pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let data = read_file()?;
|
||||
match data.get(tokenid) {
|
||||
Some(hashed_secret) => {
|
||||
auth::verify_crypt_pw(secret, &hashed_secret)
|
||||
},
|
||||
None => bail!("invalid API token"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
|
||||
pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let _guard = open_file_locked(LOCK_FILE, LOCK_TIMEOUT, true)?;
|
||||
|
||||
let mut data = read_file()?;
|
||||
let hashed_secret = auth::encrypt_pw(secret)?;
|
||||
data.insert(tokenid.clone(), hashed_secret);
|
||||
write_file(data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes the entry for the given tokenid.
|
||||
pub fn delete_secret(tokenid: &Authid) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let _guard = open_file_locked(LOCK_FILE, LOCK_TIMEOUT, true)?;
|
||||
|
||||
let mut data = read_file()?;
|
||||
data.remove(tokenid);
|
||||
write_file(data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -52,6 +52,36 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
tokenid: {
|
||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// ApiToken properties.
|
||||
pub struct ApiToken {
|
||||
pub tokenid: Authid,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
@ -103,15 +133,21 @@ pub struct User {
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match User::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
let mut config = SectionConfig::new(&Authid::API_SCHEMA);
|
||||
|
||||
let user_schema = match User::API_SCHEMA {
|
||||
Schema::Object(ref user_schema) => user_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let user_plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), user_schema);
|
||||
config.register_plugin(user_plugin);
|
||||
|
||||
let plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), obj_schema);
|
||||
let mut config = SectionConfig::new(&Userid::API_SCHEMA);
|
||||
|
||||
config.register_plugin(plugin);
|
||||
let token_schema = match ApiToken::API_SCHEMA {
|
||||
Schema::Object(ref token_schema) => token_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let token_plugin = SectionConfigPlugin::new("token".to_string(), Some("tokenid".to_string()), token_schema);
|
||||
config.register_plugin(token_plugin);
|
||||
|
||||
config
|
||||
}
|
||||
@ -206,9 +242,57 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_user_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
pub fn complete_userid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||
Ok((data, _digest)) => {
|
||||
data.sections.iter()
|
||||
.filter_map(|(id, (section_type, _))| {
|
||||
if section_type == "user" {
|
||||
Some(id.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).collect()
|
||||
},
|
||||
Err(_) => return vec![],
|
||||
}
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_authid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||
Err(_) => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_token_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
let data = match config() {
|
||||
Ok((data, _digest)) => data,
|
||||
Err(_) => return Vec::new(),
|
||||
};
|
||||
|
||||
match param.get("userid") {
|
||||
Some(userid) => {
|
||||
let user = data.lookup::<User>("user", userid);
|
||||
let tokens = data.convert_to_typed_array("token");
|
||||
match (user, tokens) {
|
||||
(Ok(_), Ok(tokens)) => {
|
||||
tokens
|
||||
.into_iter()
|
||||
.filter_map(|token: ApiToken| {
|
||||
let tokenid = token.tokenid;
|
||||
if tokenid.is_token() && tokenid.user() == userid {
|
||||
Some(tokenid.tokenname().unwrap().as_str().to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).collect()
|
||||
},
|
||||
_ => vec![],
|
||||
}
|
||||
},
|
||||
None => vec![],
|
||||
}
|
||||
}
|
||||
|
205
src/config/verify.rs
Normal file
205
src/config/verify.rs
Normal file
@ -0,0 +1,205 @@
|
||||
use anyhow::{Error};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::*,
|
||||
section_config::{
|
||||
SectionConfig,
|
||||
SectionConfigData,
|
||||
SectionConfigPlugin,
|
||||
}
|
||||
};
|
||||
|
||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
lazy_static! {
|
||||
static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// Verification Job
|
||||
pub struct VerificationJobConfig {
|
||||
/// unique ID to address this job
|
||||
pub id: String,
|
||||
/// the datastore ID this verificaiton job affects
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// if not set to false, check the age of the last snapshot verification to filter
|
||||
/// out recent ones, depending on 'outdated_after' configuration.
|
||||
pub ignore_verified: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||
pub outdated_after: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// Status of Verification Job
|
||||
pub struct VerificationJobStatus {
|
||||
/// unique ID to address this job
|
||||
pub id: String,
|
||||
/// the datastore ID this verificaiton job affects
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// if not set to false, check the age of the last snapshot verification to filter
|
||||
/// out recent ones, depending on 'outdated_after' configuration.
|
||||
pub ignore_verified: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||
pub outdated_after: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// The timestamp when this job runs the next time.
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// The state of the last scheduled run, if any
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// The task UPID of the last scheduled run, if any
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// When the last run was finished, combined with UPID.starttime one can calculate the duration
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match VerificationJobConfig::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let plugin = SectionConfigPlugin::new("verification".to_string(), Some(String::from("id")), obj_schema);
|
||||
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
|
||||
config.register_plugin(plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
pub const VERIFICATION_CFG_FILENAME: &str = "/etc/proxmox-backup/verification.cfg";
|
||||
pub const VERIFICATION_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.verification.lck";
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(VERIFICATION_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or_else(String::new);
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(VERIFICATION_CFG_FILENAME, &content)?;
|
||||
Ok((data, digest))
|
||||
}
|
||||
|
||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, &config)?;
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||
// set the correct owner/group/permissions while saving file
|
||||
// owner(rw) = root, group(r)= backup
|
||||
|
||||
let options = CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid);
|
||||
|
||||
replace_file(VERIFICATION_CFG_FILENAME, raw.as_bytes(), options)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_verification_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||
Err(_) => return vec![],
|
||||
}
|
||||
}
|
@ -1,3 +1,8 @@
|
||||
//! See the different modules for documentation on their usage.
|
||||
//!
|
||||
//! The [backup](backup/index.html) module contains some detailed information
|
||||
//! on the inner workings of the backup server regarding data storage.
|
||||
|
||||
pub mod task;
|
||||
|
||||
#[macro_use]
|
||||
|
@ -9,7 +9,7 @@ use std::ffi::CStr;
|
||||
pub trait BackupCatalogWriter {
|
||||
fn start_directory(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn end_directory(&mut self) -> Result<(), Error>;
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: u64) -> Result<(), Error>;
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: i64) -> Result<(), Error>;
|
||||
fn add_symlink(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_hardlink(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_block_device(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
|
@ -12,7 +12,7 @@ use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::{FileStat, Mode};
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchType, PatternFlag};
|
||||
use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
|
||||
use pxar::Metadata;
|
||||
use pxar::encoder::LinkOffset;
|
||||
|
||||
@ -291,59 +291,68 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
}
|
||||
|
||||
fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
|
||||
let fd = self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)?;
|
||||
let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? {
|
||||
Some(fd) => fd,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let old_pattern_count = self.patterns.len();
|
||||
|
||||
let path_bytes = self.path.as_os_str().as_bytes();
|
||||
|
||||
if let Some(fd) = fd {
|
||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||
|
||||
use io::BufRead;
|
||||
for line in io::BufReader::new(file).split(b'\n') {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => {
|
||||
let _ = writeln!(
|
||||
self.errors,
|
||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||
self.path,
|
||||
err,
|
||||
);
|
||||
self.patterns.truncate(old_pattern_count);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let line = crate::tools::strip_ascii_whitespace(&line);
|
||||
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
use io::BufRead;
|
||||
for line in io::BufReader::new(file).split(b'\n') {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => {
|
||||
let _ = writeln!(
|
||||
self.errors,
|
||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||
self.path,
|
||||
err,
|
||||
);
|
||||
self.patterns.truncate(old_pattern_count);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let mut buf;
|
||||
let (line, mode) = if line[0] == b'/' {
|
||||
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(line);
|
||||
(&buf[..], MatchType::Exclude)
|
||||
} else if line.starts_with(b"!/") {
|
||||
// inverted case with absolute path
|
||||
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(&line[1..]); // without the '!'
|
||||
(&buf[..], MatchType::Include)
|
||||
} else {
|
||||
(line, MatchType::Exclude)
|
||||
};
|
||||
let line = crate::tools::strip_ascii_whitespace(&line);
|
||||
|
||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
||||
Ok(pattern) => self.patterns.push(pattern),
|
||||
Err(err) => {
|
||||
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut buf;
|
||||
let (line, mode, anchored) = if line[0] == b'/' {
|
||||
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(line);
|
||||
(&buf[..], MatchType::Exclude, true)
|
||||
} else if line.starts_with(b"!/") {
|
||||
// inverted case with absolute path
|
||||
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(&line[1..]); // without the '!'
|
||||
(&buf[..], MatchType::Include, true)
|
||||
} else if line.starts_with(b"!") {
|
||||
(&line[1..], MatchType::Include, false)
|
||||
} else {
|
||||
(line, MatchType::Exclude, false)
|
||||
};
|
||||
|
||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
||||
Ok(pattern) => {
|
||||
if anchored {
|
||||
self.patterns.push(pattern.add_flags(MatchFlag::ANCHORED));
|
||||
} else {
|
||||
self.patterns.push(pattern);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,7 +535,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
|
||||
let file_size = stat.st_size as u64;
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_file(c_file_name, file_size, stat.st_mtime as u64)?;
|
||||
catalog.add_file(c_file_name, file_size, stat.st_mtime)?;
|
||||
}
|
||||
|
||||
let offset: LinkOffset =
|
||||
@ -997,7 +1006,7 @@ fn process_acl(
|
||||
/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
|
||||
/// prefix.
|
||||
fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
|
||||
use pathpatterns::{MatchFlag, MatchPattern};
|
||||
use pathpatterns::MatchPattern;
|
||||
|
||||
let mut content = Vec::new();
|
||||
|
||||
|
@ -266,10 +266,8 @@ impl SessionImpl {
|
||||
) {
|
||||
let final_result = match err.downcast::<io::Error>() {
|
||||
Ok(err) => {
|
||||
if err.kind() == io::ErrorKind::Other {
|
||||
if self.verbose {
|
||||
eprintln!("an IO error occurred: {}", err);
|
||||
}
|
||||
if err.kind() == io::ErrorKind::Other && self.verbose {
|
||||
eprintln!("an IO error occurred: {}", err);
|
||||
}
|
||||
|
||||
// fail the request
|
||||
|
@ -30,3 +30,10 @@ pub mod formatter;
|
||||
#[macro_use]
|
||||
pub mod rest;
|
||||
|
||||
pub mod jobstate;
|
||||
|
||||
mod verify_job;
|
||||
pub use verify_job::*;
|
||||
|
||||
mod email_notifications;
|
||||
pub use email_notifications::*;
|
||||
|
@ -2,7 +2,7 @@ use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::time::SystemTime;
|
||||
use std::fs::metadata;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
|
||||
use anyhow::{bail, Error, format_err};
|
||||
use hyper::Method;
|
||||
@ -10,6 +10,9 @@ use handlebars::Handlebars;
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::tools::{FileLogger, FileLogOptions};
|
||||
|
||||
pub struct ApiConfig {
|
||||
basedir: PathBuf,
|
||||
@ -18,6 +21,7 @@ pub struct ApiConfig {
|
||||
env_type: RpcEnvironmentType,
|
||||
templates: RwLock<Handlebars<'static>>,
|
||||
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
|
||||
request_log: Option<Mutex<FileLogger>>,
|
||||
}
|
||||
|
||||
impl ApiConfig {
|
||||
@ -30,6 +34,7 @@ impl ApiConfig {
|
||||
env_type,
|
||||
templates: RwLock::new(Handlebars::new()),
|
||||
template_files: RwLock::new(HashMap::new()),
|
||||
request_log: None,
|
||||
})
|
||||
}
|
||||
|
||||
@ -118,4 +123,30 @@ impl ApiConfig {
|
||||
templates.render(name, data).map_err(|err| format_err!("{}", err))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_file_log<P>(&mut self, path: P) -> Result<(), Error>
|
||||
where
|
||||
P: Into<PathBuf>
|
||||
{
|
||||
let path: PathBuf = path.into();
|
||||
if let Some(base) = path.parent() {
|
||||
if !base.exists() {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
|
||||
create_path(base, None, Some(opts)).map_err(|err| format_err!("{}", err))?;
|
||||
}
|
||||
}
|
||||
|
||||
let logger_options = FileLogOptions {
|
||||
append: true,
|
||||
owned_by_backup: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.request_log = Some(Mutex::new(FileLogger::new(&path, logger_options)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn get_file_log(&self) -> Option<&Mutex<FileLogger>> {
|
||||
self.request_log.as_ref()
|
||||
}
|
||||
}
|
||||
|
322
src/server/email_notifications.rs
Normal file
322
src/server/email_notifications.rs
Normal file
@ -0,0 +1,322 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::json;
|
||||
|
||||
use handlebars::{Handlebars, Helper, Context, RenderError, RenderContext, Output, HelperResult};
|
||||
|
||||
use proxmox::tools::email::sendmail;
|
||||
|
||||
use crate::{
|
||||
config::verify::VerificationJobConfig,
|
||||
config::sync::SyncJobConfig,
|
||||
api2::types::{
|
||||
Userid,
|
||||
GarbageCollectionStatus,
|
||||
},
|
||||
tools::format::HumanByte,
|
||||
};
|
||||
|
||||
const GC_OK_TEMPLATE: &str = r###"
|
||||
|
||||
Datastore: {{datastore}}
|
||||
Task ID: {{status.upid}}
|
||||
Index file count: {{status.index-file-count}}
|
||||
|
||||
Removed garbage: {{human-bytes status.removed-bytes}}
|
||||
Removed chunks: {{status.removed-chunks}}
|
||||
Remove bad files: {{status.removed-bad}}
|
||||
|
||||
Bad files: {{status.still-bad}}
|
||||
Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
|
||||
|
||||
Original Data usage: {{human-bytes status.index-data-bytes}}
|
||||
On Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
|
||||
On Disk chunks: {{status.disk-chunks}}
|
||||
|
||||
Deduplication Factor: {{deduplication-factor}}
|
||||
|
||||
Garbage collection successful.
|
||||
|
||||
"###;
|
||||
|
||||
|
||||
const GC_ERR_TEMPLATE: &str = r###"
|
||||
|
||||
Datastore: {{datastore}}
|
||||
|
||||
Garbage collection failed: {{error}}
|
||||
|
||||
"###;
|
||||
|
||||
const VERIFY_OK_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
|
||||
Verification successful.
|
||||
|
||||
"###;
|
||||
|
||||
const VERIFY_ERR_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
|
||||
Verification failed on these snapshots:
|
||||
|
||||
{{#each errors}}
|
||||
{{this}}
|
||||
{{/each}}
|
||||
|
||||
"###;
|
||||
|
||||
const SYNC_OK_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
Remote: {{job.remote}}
|
||||
Remote Store: {{job.remote-store}}
|
||||
|
||||
Synchronization successful.
|
||||
|
||||
"###;
|
||||
|
||||
const SYNC_ERR_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
Remote: {{job.remote}}
|
||||
Remote Store: {{job.remote-store}}
|
||||
|
||||
Synchronization failed: {{error}}
|
||||
|
||||
"###;
|
||||
|
||||
lazy_static::lazy_static!{
|
||||
|
||||
static ref HANDLEBARS: Handlebars<'static> = {
|
||||
let mut hb = Handlebars::new();
|
||||
|
||||
hb.set_strict_mode(true);
|
||||
|
||||
hb.register_helper("human-bytes", Box::new(handlebars_humam_bytes_helper));
|
||||
hb.register_helper("relative-percentage", Box::new(handlebars_relative_percentage_helper));
|
||||
|
||||
hb.register_template_string("gc_ok_template", GC_OK_TEMPLATE).unwrap();
|
||||
hb.register_template_string("gc_err_template", GC_ERR_TEMPLATE).unwrap();
|
||||
|
||||
hb.register_template_string("verify_ok_template", VERIFY_OK_TEMPLATE).unwrap();
|
||||
hb.register_template_string("verify_err_template", VERIFY_ERR_TEMPLATE).unwrap();
|
||||
|
||||
hb.register_template_string("sync_ok_template", SYNC_OK_TEMPLATE).unwrap();
|
||||
hb.register_template_string("sync_err_template", SYNC_ERR_TEMPLATE).unwrap();
|
||||
|
||||
hb
|
||||
};
|
||||
}
|
||||
|
||||
fn send_job_status_mail(
|
||||
email: &str,
|
||||
subject: &str,
|
||||
text: &str,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
// Note: OX has serious problems displaying text mails,
|
||||
// so we include html as well
|
||||
let html = format!("<html><body><pre>\n{}\n<pre>", handlebars::html_escape(text));
|
||||
|
||||
let nodename = proxmox::tools::nodename();
|
||||
|
||||
let author = format!("Proxmox Backup Server - {}", nodename);
|
||||
|
||||
sendmail(
|
||||
&[email],
|
||||
&subject,
|
||||
Some(&text),
|
||||
Some(&html),
|
||||
None,
|
||||
Some(&author),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_gc_status(
|
||||
email: &str,
|
||||
datastore: &str,
|
||||
status: &GarbageCollectionStatus,
|
||||
result: &Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let text = match result {
|
||||
Ok(()) => {
|
||||
let deduplication_factor = if status.disk_bytes > 0 {
|
||||
(status.index_data_bytes as f64)/(status.disk_bytes as f64)
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
|
||||
let data = json!({
|
||||
"status": status,
|
||||
"datastore": datastore,
|
||||
"deduplication-factor": format!("{:.2}", deduplication_factor),
|
||||
});
|
||||
|
||||
HANDLEBARS.render("gc_ok_template", &data)?
|
||||
}
|
||||
Err(err) => {
|
||||
let data = json!({
|
||||
"error": err.to_string(),
|
||||
"datastore": datastore,
|
||||
});
|
||||
HANDLEBARS.render("gc_err_template", &data)?
|
||||
}
|
||||
};
|
||||
|
||||
let subject = match result {
|
||||
Ok(()) => format!(
|
||||
"Garbage Collect Datastore '{}' successful",
|
||||
datastore,
|
||||
),
|
||||
Err(_) => format!(
|
||||
"Garbage Collect Datastore '{}' failed",
|
||||
datastore,
|
||||
),
|
||||
};
|
||||
|
||||
send_job_status_mail(email, &subject, &text)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_verify_status(
|
||||
email: &str,
|
||||
job: VerificationJobConfig,
|
||||
result: &Result<Vec<String>, Error>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
|
||||
let text = match result {
|
||||
Ok(errors) if errors.is_empty() => {
|
||||
let data = json!({ "job": job });
|
||||
HANDLEBARS.render("verify_ok_template", &data)?
|
||||
}
|
||||
Ok(errors) => {
|
||||
let data = json!({ "job": job, "errors": errors });
|
||||
HANDLEBARS.render("verify_err_template", &data)?
|
||||
}
|
||||
Err(_) => {
|
||||
// aboreted job - do not send any email
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let subject = match result {
|
||||
Ok(errors) if errors.is_empty() => format!(
|
||||
"Verify Datastore '{}' successful",
|
||||
job.store,
|
||||
),
|
||||
_ => format!(
|
||||
"Verify Datastore '{}' failed",
|
||||
job.store,
|
||||
),
|
||||
};
|
||||
|
||||
send_job_status_mail(email, &subject, &text)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_sync_status(
|
||||
email: &str,
|
||||
job: &SyncJobConfig,
|
||||
result: &Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let text = match result {
|
||||
Ok(()) => {
|
||||
let data = json!({ "job": job });
|
||||
HANDLEBARS.render("sync_ok_template", &data)?
|
||||
}
|
||||
Err(err) => {
|
||||
let data = json!({ "job": job, "error": err.to_string() });
|
||||
HANDLEBARS.render("sync_err_template", &data)?
|
||||
}
|
||||
};
|
||||
|
||||
let subject = match result {
|
||||
Ok(()) => format!(
|
||||
"Sync remote '{}' datastore '{}' successful",
|
||||
job.remote,
|
||||
job.remote_store,
|
||||
),
|
||||
Err(_) => format!(
|
||||
"Sync remote '{}' datastore '{}' failed",
|
||||
job.remote,
|
||||
job.remote_store,
|
||||
),
|
||||
};
|
||||
|
||||
send_job_status_mail(email, &subject, &text)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lookup users email address
|
||||
///
|
||||
/// For "backup@pam", this returns the address from "root@pam".
|
||||
pub fn lookup_user_email(userid: &Userid) -> Option<String> {
|
||||
|
||||
use crate::config::user::{self, User};
|
||||
|
||||
if userid == Userid::backup_userid() {
|
||||
return lookup_user_email(Userid::root_userid());
|
||||
}
|
||||
|
||||
if let Ok(user_config) = user::cached_config() {
|
||||
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
||||
return user.email.clone();
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
// Handlerbar helper functions
|
||||
|
||||
fn handlebars_humam_bytes_helper(
|
||||
h: &Helper,
|
||||
_: &Handlebars,
|
||||
_: &Context,
|
||||
_rc: &mut RenderContext,
|
||||
out: &mut dyn Output
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).map(|v| v.value().as_u64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("human-bytes: param not found"))?;
|
||||
|
||||
out.write(&HumanByte::from(param).to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handlebars_relative_percentage_helper(
|
||||
h: &Helper,
|
||||
_: &Handlebars,
|
||||
_: &Context,
|
||||
_rc: &mut RenderContext,
|
||||
out: &mut dyn Output
|
||||
) -> HelperResult {
|
||||
let param0 = h.param(0).map(|v| v.value().as_f64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("relative-percentage: param0 not found"))?;
|
||||
let param1 = h.param(1).map(|v| v.value().as_f64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("relative-percentage: param1 not found"))?;
|
||||
|
||||
if param1 == 0.0 {
|
||||
out.write("-")?;
|
||||
} else {
|
||||
out.write(&format!("{:.2}%", (param0*100.0)/param1))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
@ -6,14 +6,16 @@ use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
pub struct RestEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: Option<String>,
|
||||
auth_id: Option<String>,
|
||||
client_ip: Option<std::net::SocketAddr>,
|
||||
}
|
||||
|
||||
impl RestEnvironment {
|
||||
pub fn new(env_type: RpcEnvironmentType) -> Self {
|
||||
Self {
|
||||
result_attributes: json!({}),
|
||||
user: None,
|
||||
auth_id: None,
|
||||
client_ip: None,
|
||||
env_type,
|
||||
}
|
||||
}
|
||||
@ -33,11 +35,19 @@ impl RpcEnvironment for RestEnvironment {
|
||||
self.env_type
|
||||
}
|
||||
|
||||
fn set_user(&mut self, user: Option<String>) {
|
||||
self.user = user;
|
||||
fn set_auth_id(&mut self, auth_id: Option<String>) {
|
||||
self.auth_id = auth_id;
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
self.user.clone()
|
||||
fn get_auth_id(&self) -> Option<String> {
|
||||
self.auth_id.clone()
|
||||
}
|
||||
|
||||
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
|
||||
self.client_ip = client_ip;
|
||||
}
|
||||
|
||||
fn get_client_ip(&self) -> Option<std::net::SocketAddr> {
|
||||
self.client_ip.clone()
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
//! ```no_run
|
||||
//! # use anyhow::{bail, Error};
|
||||
//! # use proxmox_backup::server::TaskState;
|
||||
//! # use proxmox_backup::config::jobstate::*;
|
||||
//! # use proxmox_backup::server::jobstate::*;
|
||||
//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
|
||||
//! # fn code() -> Result<(), Error> {
|
||||
//! // locks the correct file under /var/lib
|
@ -3,19 +3,22 @@ use std::future::Future;
|
||||
use std::hash::BuildHasher;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::future::{self, FutureExt, TryFutureExt};
|
||||
use futures::stream::TryStreamExt;
|
||||
use hyper::header;
|
||||
use hyper::header::{self, HeaderMap};
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{Body, Request, Response, StatusCode};
|
||||
use lazy_static::lazy_static;
|
||||
use serde_json::{json, Value};
|
||||
use tokio::fs::File;
|
||||
use tokio::time::Instant;
|
||||
use url::form_urlencoded;
|
||||
use regex::Regex;
|
||||
|
||||
use proxmox::http_err;
|
||||
use proxmox::api::{
|
||||
@ -39,8 +42,9 @@ use super::formatter::*;
|
||||
use super::ApiConfig;
|
||||
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::{Authid, Userid};
|
||||
use crate::tools;
|
||||
use crate::tools::FileLogger;
|
||||
use crate::tools::ticket::Ticket;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
@ -50,6 +54,8 @@ pub struct RestServer {
|
||||
pub api_config: Arc<ApiConfig>,
|
||||
}
|
||||
|
||||
const MAX_URI_QUERY_LENGTH: usize = 3072;
|
||||
|
||||
impl RestServer {
|
||||
|
||||
pub fn new(api_config: ApiConfig) -> Self {
|
||||
@ -105,14 +111,20 @@ pub struct ApiService {
|
||||
}
|
||||
|
||||
fn log_response(
|
||||
logfile: Option<&Mutex<FileLogger>>,
|
||||
peer: &std::net::SocketAddr,
|
||||
method: hyper::Method,
|
||||
path: &str,
|
||||
path_query: &str,
|
||||
resp: &Response<Body>,
|
||||
user_agent: Option<String>,
|
||||
) {
|
||||
|
||||
if resp.extensions().get::<NoLogExtension>().is_some() { return; };
|
||||
|
||||
// we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
|
||||
// to profit from atomicty guarantees for O_APPEND opened logfiles
|
||||
let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
|
||||
|
||||
let status = resp.status();
|
||||
|
||||
if !(status.is_success() || status.is_informational()) {
|
||||
@ -125,6 +137,51 @@ fn log_response(
|
||||
|
||||
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
|
||||
}
|
||||
if let Some(logfile) = logfile {
|
||||
let auth_id = match resp.extensions().get::<Authid>() {
|
||||
Some(auth_id) => auth_id.to_string(),
|
||||
None => "-".to_string(),
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
// time format which apache/nginx use (by default), copied from pve-http-server
|
||||
let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
||||
.unwrap_or("-".into());
|
||||
|
||||
logfile
|
||||
.lock()
|
||||
.unwrap()
|
||||
.log(format!(
|
||||
"{} - {} [{}] \"{} {}\" {} {} {}",
|
||||
peer.ip(),
|
||||
auth_id,
|
||||
datetime,
|
||||
method.as_str(),
|
||||
path,
|
||||
status.as_str(),
|
||||
resp.body().size_hint().lower(),
|
||||
user_agent.unwrap_or("-".into()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
|
||||
}
|
||||
let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
|
||||
let capture = RE.captures(&forwarded)?;
|
||||
let rhost = capture.get(1)?.as_str();
|
||||
|
||||
rhost.parse().ok()
|
||||
}
|
||||
|
||||
fn get_user_agent(headers: &HeaderMap) -> Option<String> {
|
||||
let agent = headers.get(header::USER_AGENT)?.to_str();
|
||||
agent.map(|s| {
|
||||
let mut s = s.to_owned();
|
||||
s.truncate(128);
|
||||
s
|
||||
}).ok()
|
||||
}
|
||||
|
||||
impl tower_service::Service<Request<Body>> for ApiService {
|
||||
@ -137,31 +194,29 @@ impl tower_service::Service<Request<Body>> for ApiService {
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
||||
let path = req.uri().path().to_owned();
|
||||
let path = req.uri().path_and_query().unwrap().as_str().to_owned();
|
||||
let method = req.method().clone();
|
||||
let user_agent = get_user_agent(req.headers());
|
||||
|
||||
let config = Arc::clone(&self.api_config);
|
||||
let peer = self.peer;
|
||||
let peer = match get_proxied_peer(req.headers()) {
|
||||
Some(proxied_peer) => proxied_peer,
|
||||
None => self.peer,
|
||||
};
|
||||
async move {
|
||||
match handle_request(config, req).await {
|
||||
Ok(res) => {
|
||||
log_response(&peer, method, &path, &res);
|
||||
Ok::<_, Self::Error>(res)
|
||||
}
|
||||
let response = match handle_request(Arc::clone(&config), req, &peer).await {
|
||||
Ok(response) => response,
|
||||
Err(err) => {
|
||||
if let Some(apierr) = err.downcast_ref::<HttpError>() {
|
||||
let mut resp = Response::new(Body::from(apierr.message.clone()));
|
||||
*resp.status_mut() = apierr.code;
|
||||
log_response(&peer, method, &path, &resp);
|
||||
Ok(resp)
|
||||
} else {
|
||||
let mut resp = Response::new(Body::from(err.to_string()));
|
||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
log_response(&peer, method, &path, &resp);
|
||||
Ok(resp)
|
||||
}
|
||||
let (err, code) = match err.downcast_ref::<HttpError>() {
|
||||
Some(apierr) => (apierr.message.clone(), apierr.code),
|
||||
_ => (err.to_string(), StatusCode::BAD_REQUEST),
|
||||
};
|
||||
Response::builder().status(code).body(err.into())?
|
||||
}
|
||||
}
|
||||
};
|
||||
let logger = config.get_file_log();
|
||||
log_response(logger, &peer, method, &path, &response, user_agent);
|
||||
Ok(response)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
@ -253,6 +308,7 @@ async fn proxy_protected_request(
|
||||
info: &'static ApiMethod,
|
||||
mut parts: Parts,
|
||||
req_body: Body,
|
||||
peer: &std::net::SocketAddr,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
|
||||
let mut uri_parts = parts.uri.clone().into_parts();
|
||||
@ -263,7 +319,10 @@ async fn proxy_protected_request(
|
||||
|
||||
parts.uri = new_uri;
|
||||
|
||||
let request = Request::from_parts(parts, req_body);
|
||||
let mut request = Request::from_parts(parts, req_body);
|
||||
request
|
||||
.headers_mut()
|
||||
.insert(header::FORWARDED, format!("for=\"{}\";", peer).parse().unwrap());
|
||||
|
||||
let reload_timezone = info.reload_timezone;
|
||||
|
||||
@ -375,11 +434,17 @@ fn get_index(
|
||||
}
|
||||
};
|
||||
|
||||
Response::builder()
|
||||
let mut resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, ct)
|
||||
.body(index.into())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
if let Some(userid) = userid {
|
||||
resp.extensions_mut().insert(Authid::from((userid, None)));
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
|
||||
@ -466,67 +531,116 @@ async fn handle_static_file_download(filename: PathBuf) -> Result<Response<Body
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_auth_data(headers: &http::HeaderMap) -> (Option<String>, Option<String>, Option<String>) {
|
||||
|
||||
let mut ticket = None;
|
||||
let mut language = None;
|
||||
fn extract_lang_header(headers: &http::HeaderMap) -> Option<String> {
|
||||
if let Some(raw_cookie) = headers.get("COOKIE") {
|
||||
if let Ok(cookie) = raw_cookie.to_str() {
|
||||
ticket = tools::extract_cookie(cookie, "PBSAuthCookie");
|
||||
language = tools::extract_cookie(cookie, "PBSLangCookie");
|
||||
return tools::extract_cookie(cookie, "PBSLangCookie");
|
||||
}
|
||||
}
|
||||
|
||||
let token = match headers.get("CSRFPreventionToken").map(|v| v.to_str()) {
|
||||
Some(Ok(v)) => Some(v.to_owned()),
|
||||
_ => None,
|
||||
};
|
||||
None
|
||||
}
|
||||
|
||||
(ticket, token, language)
|
||||
struct UserAuthData{
|
||||
ticket: String,
|
||||
csrf_token: Option<String>,
|
||||
}
|
||||
|
||||
enum AuthData {
|
||||
User(UserAuthData),
|
||||
ApiToken(String),
|
||||
}
|
||||
|
||||
fn extract_auth_data(headers: &http::HeaderMap) -> Option<AuthData> {
|
||||
if let Some(raw_cookie) = headers.get("COOKIE") {
|
||||
if let Ok(cookie) = raw_cookie.to_str() {
|
||||
if let Some(ticket) = tools::extract_cookie(cookie, "PBSAuthCookie") {
|
||||
let csrf_token = match headers.get("CSRFPreventionToken").map(|v| v.to_str()) {
|
||||
Some(Ok(v)) => Some(v.to_owned()),
|
||||
_ => None,
|
||||
};
|
||||
return Some(AuthData::User(UserAuthData {
|
||||
ticket,
|
||||
csrf_token,
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match headers.get("AUTHORIZATION").map(|v| v.to_str()) {
|
||||
Some(Ok(v)) => Some(AuthData::ApiToken(v.to_owned())),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_auth(
|
||||
method: &hyper::Method,
|
||||
ticket: &Option<String>,
|
||||
token: &Option<String>,
|
||||
auth_data: &AuthData,
|
||||
user_info: &CachedUserInfo,
|
||||
) -> Result<Userid, Error> {
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
) -> Result<Authid, Error> {
|
||||
match auth_data {
|
||||
AuthData::User(user_auth_data) => {
|
||||
let ticket = user_auth_data.ticket.clone();
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
let ticket = ticket.as_ref().map(String::as_str);
|
||||
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
|
||||
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
|
||||
let userid: Userid = Ticket::parse(&ticket)?
|
||||
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
|
||||
|
||||
if !user_info.is_active_user(&userid) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
let auth_id = Authid::from(userid.clone());
|
||||
if !user_info.is_active_auth_id(&auth_id) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
if method != hyper::Method::GET {
|
||||
if let Some(token) = token {
|
||||
verify_csrf_prevention_token(csrf_secret(), &userid, &token, -300, ticket_lifetime)?;
|
||||
} else {
|
||||
bail!("missing CSRF prevention token");
|
||||
if method != hyper::Method::GET {
|
||||
if let Some(csrf_token) = &user_auth_data.csrf_token {
|
||||
verify_csrf_prevention_token(csrf_secret(), &userid, &csrf_token, -300, ticket_lifetime)?;
|
||||
} else {
|
||||
bail!("missing CSRF prevention token");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(auth_id)
|
||||
},
|
||||
AuthData::ApiToken(api_token) => {
|
||||
let mut parts = api_token.splitn(2, ':');
|
||||
let tokenid = parts.next()
|
||||
.ok_or_else(|| format_err!("failed to split API token header"))?;
|
||||
let tokenid: Authid = tokenid.parse()?;
|
||||
|
||||
let tokensecret = parts.next()
|
||||
.ok_or_else(|| format_err!("failed to split API token header"))?;
|
||||
crate::config::token_shadow::verify_secret(&tokenid, &tokensecret)?;
|
||||
|
||||
Ok(tokenid)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(userid)
|
||||
}
|
||||
|
||||
async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Response<Body>, Error> {
|
||||
async fn handle_request(
|
||||
api: Arc<ApiConfig>,
|
||||
req: Request<Body>,
|
||||
peer: &std::net::SocketAddr,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
|
||||
let (parts, body) = req.into_parts();
|
||||
|
||||
let method = parts.method.clone();
|
||||
let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||
|
||||
let comp_len = components.len();
|
||||
|
||||
//println!("REQUEST {} {}", method, path);
|
||||
//println!("COMPO {:?}", components);
|
||||
let query = parts.uri.query().unwrap_or_default();
|
||||
if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
|
||||
return Ok(Response::builder()
|
||||
.status(StatusCode::URI_TOO_LONG)
|
||||
.body("".into())
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
let env_type = api.env_type();
|
||||
let mut rpcenv = RestEnvironment::new(env_type);
|
||||
|
||||
rpcenv.set_client_ip(Some(*peer));
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
|
||||
@ -555,9 +669,12 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
||||
}
|
||||
|
||||
if auth_required {
|
||||
let (ticket, token, _) = extract_auth_data(&parts.headers);
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
||||
let auth_result = match extract_auth_data(&parts.headers) {
|
||||
Some(auth_data) => check_auth(&method, &auth_data, &user_info),
|
||||
None => Err(format_err!("no authentication credentials provided.")),
|
||||
};
|
||||
match auth_result {
|
||||
Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
|
||||
Err(err) => {
|
||||
// always delay unauthorized calls by 3 seconds (from start of request)
|
||||
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
|
||||
@ -573,23 +690,30 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
Some(api_method) => {
|
||||
let user = rpcenv.get_user();
|
||||
if !check_api_permission(api_method.access.permission, user.as_deref(), &uri_param, user_info.as_ref()) {
|
||||
let auth_id = rpcenv.get_auth_id();
|
||||
if !check_api_permission(api_method.access.permission, auth_id.as_deref(), &uri_param, user_info.as_ref()) {
|
||||
let err = http_err!(FORBIDDEN, "permission check failed");
|
||||
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
|
||||
let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
|
||||
proxy_protected_request(api_method, parts, body).await
|
||||
proxy_protected_request(api_method, parts, body, peer).await
|
||||
} else {
|
||||
handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param).await
|
||||
};
|
||||
|
||||
if let Err(err) = result {
|
||||
return Ok((formatter.format_error)(err));
|
||||
let mut response = match result {
|
||||
Ok(resp) => resp,
|
||||
Err(err) => (formatter.format_error)(err),
|
||||
};
|
||||
|
||||
if let Some(auth_id) = auth_id {
|
||||
let auth_id: Authid = auth_id.parse()?;
|
||||
response.extensions_mut().insert(auth_id);
|
||||
}
|
||||
return result;
|
||||
|
||||
return Ok(response);
|
||||
}
|
||||
}
|
||||
|
||||
@ -602,13 +726,14 @@ async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<Respo
|
||||
}
|
||||
|
||||
if comp_len == 0 {
|
||||
let (ticket, token, language) = extract_auth_data(&parts.headers);
|
||||
if ticket != None {
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(userid) => {
|
||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
||||
return Ok(get_index(Some(userid), Some(new_token), language, &api, parts));
|
||||
}
|
||||
let language = extract_lang_header(&parts.headers);
|
||||
if let Some(auth_data) = extract_auth_data(&parts.headers) {
|
||||
match check_auth(&method, &auth_data, &user_info) {
|
||||
Ok(auth_id) if !auth_id.is_token() => {
|
||||
let userid = auth_id.user();
|
||||
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
|
||||
return Ok(get_index(Some(userid.clone()), Some(new_csrf_token), language, &api, parts));
|
||||
},
|
||||
_ => {
|
||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||
return Ok(get_index(None, None, language, &api, parts));
|
||||
|
@ -6,7 +6,7 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::Authid;
|
||||
|
||||
/// Unique Process/Task Identifier
|
||||
///
|
||||
@ -34,8 +34,8 @@ pub struct UPID {
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The user who started the task
|
||||
pub userid: Userid,
|
||||
/// The authenticated entity who started the task
|
||||
pub auth_id: Authid,
|
||||
/// The node name.
|
||||
pub node: String,
|
||||
}
|
||||
@ -47,7 +47,7 @@ const_regex! {
|
||||
pub PROXMOX_UPID_REGEX = concat!(
|
||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<userid>[^:\s]+):$"
|
||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
||||
);
|
||||
}
|
||||
|
||||
@ -65,7 +65,7 @@ impl UPID {
|
||||
pub fn new(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
userid: Userid,
|
||||
auth_id: Authid,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let pid = unsafe { libc::getpid() };
|
||||
@ -75,11 +75,6 @@ impl UPID {
|
||||
if worker_type.contains(bad) {
|
||||
bail!("illegal characters in worker type '{}'", worker_type);
|
||||
}
|
||||
if let Some(ref worker_id) = worker_id {
|
||||
if worker_id.contains(bad) {
|
||||
bail!("illegal characters in worker id '{}'", worker_id);
|
||||
}
|
||||
}
|
||||
|
||||
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
@ -92,7 +87,7 @@ impl UPID {
|
||||
task_id,
|
||||
worker_type: worker_type.to_owned(),
|
||||
worker_id,
|
||||
userid,
|
||||
auth_id,
|
||||
node: proxmox::tools::nodename().to_owned(),
|
||||
})
|
||||
}
|
||||
@ -112,14 +107,22 @@ impl std::str::FromStr for UPID {
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||
|
||||
let worker_id = if cap["wid"].is_empty() {
|
||||
None
|
||||
} else {
|
||||
let wid = crate::tools::systemd::unescape_unit(&cap["wid"])?;
|
||||
Some(wid)
|
||||
};
|
||||
|
||||
Ok(UPID {
|
||||
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||
worker_type: cap["wtype"].to_string(),
|
||||
worker_id: if cap["wid"].is_empty() { None } else { Some(cap["wid"].to_string()) },
|
||||
userid: cap["userid"].parse()?,
|
||||
worker_id,
|
||||
auth_id: cap["authid"].parse()?,
|
||||
node: cap["node"].to_string(),
|
||||
})
|
||||
} else {
|
||||
@ -133,12 +136,16 @@ impl std::fmt::Display for UPID {
|
||||
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
|
||||
let wid = if let Some(ref id) = self.worker_id { id } else { "" };
|
||||
let wid = if let Some(ref id) = self.worker_id {
|
||||
crate::tools::systemd::escape_unit(id, false)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
||||
// more that 8 characters for pstart
|
||||
|
||||
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.userid)
|
||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
||||
}
|
||||
}
|
||||
|
96
src/server/verify_job.rs
Normal file
96
src/server/verify_job.rs
Normal file
@ -0,0 +1,96 @@
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use crate::{
|
||||
server::WorkerTask,
|
||||
api2::types::*,
|
||||
server::jobstate::Job,
|
||||
config::verify::VerificationJobConfig,
|
||||
backup::{
|
||||
DataStore,
|
||||
BackupManifest,
|
||||
verify_all_backups,
|
||||
},
|
||||
task_log,
|
||||
};
|
||||
|
||||
/// Runs a verification job.
|
||||
pub fn do_verification_job(
|
||||
mut job: Job,
|
||||
verification_job: VerificationJobConfig,
|
||||
auth_id: &Authid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
||||
|
||||
let outdated_after = verification_job.outdated_after.clone();
|
||||
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
||||
|
||||
let filter = move |manifest: &BackupManifest| {
|
||||
if !ignore_verified_snapshots {
|
||||
return true;
|
||||
}
|
||||
|
||||
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
||||
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
||||
Err(_) => return true, // no last verification, always include
|
||||
Ok(last_verify) => {
|
||||
match outdated_after {
|
||||
None => false, // never re-verify if ignored and no max age
|
||||
Some(max_age) => {
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
|
||||
|
||||
days_since_last_verify > max_age
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let email = crate::server::lookup_user_email(auth_id.user());
|
||||
|
||||
let job_id = job.jobname().to_string();
|
||||
let worker_type = job.jobtype().to_string();
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
&worker_type,
|
||||
Some(job.jobname().to_string()),
|
||||
auth_id.clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker,"Starting datastore verify job '{}'", job_id);
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), Some(&filter));
|
||||
let job_result = match result {
|
||||
Ok(ref errors) if errors.is_empty() => Ok(()),
|
||||
Ok(_) => Err(format_err!("verification failed - please check the log for details")),
|
||||
Err(_) => Err(format_err!("verification failed - job aborted")),
|
||||
};
|
||||
|
||||
let status = worker.create_state(&job_result);
|
||||
|
||||
match job.finish(status) {
|
||||
Err(err) => eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
err
|
||||
),
|
||||
Ok(_) => (),
|
||||
}
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_verify_status(&email, verification_job, &result) {
|
||||
eprintln!("send verify notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
job_result
|
||||
},
|
||||
)?;
|
||||
Ok(upid_str)
|
||||
}
|
@ -1,6 +1,5 @@
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::fs::File;
|
||||
use std::path::Path;
|
||||
use std::io::{Read, Write, BufRead, BufReader};
|
||||
use std::panic::UnwindSafe;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@ -21,8 +20,8 @@ use proxmox::tools::fs::{create_path, open_file_locked, replace_file, CreateOpti
|
||||
use super::UPID;
|
||||
|
||||
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
||||
use crate::tools::FileLogger;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::{FileLogger, FileLogOptions};
|
||||
use crate::api2::types::Authid;
|
||||
|
||||
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||
@ -36,8 +35,6 @@ pub const PROXMOX_BACKUP_ACTIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_
|
||||
pub const PROXMOX_BACKUP_INDEX_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/index");
|
||||
pub const PROXMOX_BACKUP_ARCHIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/archive");
|
||||
|
||||
const MAX_INDEX_TASKS: usize = 1000;
|
||||
|
||||
lazy_static! {
|
||||
static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
|
||||
|
||||
@ -93,22 +90,27 @@ pub fn create_task_control_socket() -> Result<(), Error> {
|
||||
"\0{}/proxmox-task-control-{}.sock", PROXMOX_BACKUP_VAR_RUN_DIR, *MY_PID);
|
||||
|
||||
let control_future = super::create_control_socket(socketname, |param| {
|
||||
let param = param.as_object()
|
||||
let param = param
|
||||
.as_object()
|
||||
.ok_or_else(|| format_err!("unable to parse parameters (expected json object)"))?;
|
||||
if param.keys().count() != 2 { bail!("wrong number of parameters"); }
|
||||
|
||||
let command = param["command"].as_str()
|
||||
let command = param["command"]
|
||||
.as_str()
|
||||
.ok_or_else(|| format_err!("unable to parse parameters (missing command)"))?;
|
||||
|
||||
// we have only two commands for now
|
||||
if !(command == "abort-task" || command == "status") { bail!("got unknown command '{}'", command); }
|
||||
if !(command == "abort-task" || command == "status") {
|
||||
bail!("got unknown command '{}'", command);
|
||||
}
|
||||
|
||||
let upid_str = param["upid"].as_str()
|
||||
let upid_str = param["upid"]
|
||||
.as_str()
|
||||
.ok_or_else(|| format_err!("unable to parse parameters (missing upid)"))?;
|
||||
|
||||
let upid = upid_str.parse::<UPID>()?;
|
||||
|
||||
if !((upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART)) {
|
||||
if !(upid.pid == *MY_PID && upid.pstart == *MY_PID_PSTART) {
|
||||
bail!("upid does not belong to this process");
|
||||
}
|
||||
|
||||
@ -344,26 +346,11 @@ fn lock_task_list_files(exclusive: bool) -> Result<std::fs::File, Error> {
|
||||
/// rotates it if it is
|
||||
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
||||
let _lock = lock_task_list_files(true)?;
|
||||
let path = Path::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN);
|
||||
let metadata = match path.metadata() {
|
||||
Ok(metadata) => metadata,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
||||
Err(err) => bail!("unable to open task archive - {}", err),
|
||||
};
|
||||
|
||||
if metadata.len() > size_threshold {
|
||||
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress).ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
logrotate.rotate(
|
||||
CreateOptions::new()
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid),
|
||||
max_files,
|
||||
)?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
||||
.ok_or(format_err!("could not get archive file names"))?;
|
||||
|
||||
logrotate.rotate(size_threshold, None, max_files)
|
||||
}
|
||||
|
||||
// atomically read/update the task list, update status of finished tasks
|
||||
@ -374,7 +361,10 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
|
||||
let lock = lock_task_list_files(true)?;
|
||||
|
||||
// TODO remove with 1.x
|
||||
let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN)?;
|
||||
let had_index_file = !finish_list.is_empty();
|
||||
|
||||
let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?
|
||||
.into_iter()
|
||||
.filter_map(|info| {
|
||||
@ -385,7 +375,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
if !worker_is_active_local(&info.upid) {
|
||||
println!("Detected stopped UPID {}", &info.upid_str);
|
||||
println!("Detected stopped task '{}'", &info.upid_str);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let status = upid_read_status(&info.upid)
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||
@ -423,33 +413,10 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
let start = if finish_list.len() > MAX_INDEX_TASKS {
|
||||
finish_list.len() - MAX_INDEX_TASKS
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let end = (start+MAX_INDEX_TASKS).min(finish_list.len());
|
||||
|
||||
let index_raw = if end > start {
|
||||
render_task_list(&finish_list[start..end])
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
replace_file(
|
||||
PROXMOX_BACKUP_INDEX_TASK_FN,
|
||||
index_raw.as_bytes(),
|
||||
CreateOptions::new()
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid),
|
||||
)?;
|
||||
|
||||
if !finish_list.is_empty() && start > 0 {
|
||||
if !finish_list.is_empty() {
|
||||
match std::fs::OpenOptions::new().append(true).create(true).open(PROXMOX_BACKUP_ARCHIVE_TASK_FN) {
|
||||
Ok(mut writer) => {
|
||||
for info in &finish_list[0..start] {
|
||||
for info in &finish_list {
|
||||
writer.write_all(render_task_line(&info).as_bytes())?;
|
||||
}
|
||||
},
|
||||
@ -459,6 +426,12 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
nix::unistd::chown(PROXMOX_BACKUP_ARCHIVE_TASK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
}
|
||||
|
||||
// TODO Remove with 1.x
|
||||
// for compatibility, if we had an INDEX file, we do not need it anymore
|
||||
if had_index_file {
|
||||
let _ = nix::unistd::unlink(PROXMOX_BACKUP_INDEX_TASK_FN);
|
||||
}
|
||||
|
||||
drop(lock);
|
||||
|
||||
Ok(())
|
||||
@ -522,16 +495,9 @@ where
|
||||
read_task_file(file)
|
||||
}
|
||||
|
||||
enum TaskFile {
|
||||
Active,
|
||||
Index,
|
||||
Archive,
|
||||
End,
|
||||
}
|
||||
|
||||
pub struct TaskListInfoIterator {
|
||||
list: VecDeque<TaskListInfo>,
|
||||
file: TaskFile,
|
||||
end: bool,
|
||||
archive: Option<LogRotateFiles>,
|
||||
lock: Option<File>,
|
||||
}
|
||||
@ -546,7 +512,10 @@ impl TaskListInfoIterator {
|
||||
.iter()
|
||||
.any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
|
||||
|
||||
if needs_update {
|
||||
// TODO remove with 1.x
|
||||
let index_exists = std::path::Path::new(PROXMOX_BACKUP_INDEX_TASK_FN).is_file();
|
||||
|
||||
if needs_update || index_exists {
|
||||
drop(lock);
|
||||
update_active_workers(None)?;
|
||||
let lock = lock_task_list_files(false)?;
|
||||
@ -560,16 +529,16 @@ impl TaskListInfoIterator {
|
||||
let archive = if active_only {
|
||||
None
|
||||
} else {
|
||||
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true).ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true)
|
||||
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||
Some(logrotate.files())
|
||||
};
|
||||
|
||||
let file = if active_only { TaskFile::End } else { TaskFile::Active };
|
||||
let lock = if active_only { None } else { Some(read_lock) };
|
||||
|
||||
Ok(Self {
|
||||
list: active_list.into(),
|
||||
file,
|
||||
end: active_only,
|
||||
archive,
|
||||
lock,
|
||||
})
|
||||
@ -583,35 +552,23 @@ impl Iterator for TaskListInfoIterator {
|
||||
loop {
|
||||
if let Some(element) = self.list.pop_back() {
|
||||
return Some(Ok(element));
|
||||
} else if self.end {
|
||||
return None;
|
||||
} else {
|
||||
match self.file {
|
||||
TaskFile::Active => {
|
||||
let index = match read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN) {
|
||||
Ok(index) => index,
|
||||
if let Some(mut archive) = self.archive.take() {
|
||||
if let Some(file) = archive.next() {
|
||||
let list = match read_task_file(file) {
|
||||
Ok(list) => list,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
self.list.append(&mut index.into());
|
||||
self.file = TaskFile::Index;
|
||||
},
|
||||
TaskFile::Index | TaskFile::Archive => {
|
||||
if let Some(mut archive) = self.archive.take() {
|
||||
if let Some(file) = archive.next() {
|
||||
let list = match read_task_file(file) {
|
||||
Ok(list) => list,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
self.list.append(&mut list.into());
|
||||
self.archive = Some(archive);
|
||||
self.file = TaskFile::Archive;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
self.file = TaskFile::End;
|
||||
self.lock.take();
|
||||
return None;
|
||||
self.list.append(&mut list.into());
|
||||
self.archive = Some(archive);
|
||||
continue;
|
||||
}
|
||||
TaskFile::End => return None,
|
||||
}
|
||||
|
||||
self.end = true;
|
||||
self.lock.take();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -654,15 +611,15 @@ impl Drop for WorkerTask {
|
||||
|
||||
impl WorkerTask {
|
||||
|
||||
pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
||||
pub fn new(worker_type: &str, worker_id: Option<String>, auth_id: Authid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
||||
println!("register worker");
|
||||
|
||||
let upid = UPID::new(worker_type, worker_id, userid)?;
|
||||
let upid = UPID::new(worker_type, worker_id, auth_id)?;
|
||||
let task_id = upid.task_id;
|
||||
|
||||
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
||||
|
||||
path.push(format!("{:02X}", upid.pstart % 256));
|
||||
path.push(format!("{:02X}", upid.pstart & 255));
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
|
||||
@ -670,9 +627,14 @@ impl WorkerTask {
|
||||
|
||||
path.push(upid.to_string());
|
||||
|
||||
println!("FILE: {:?}", path);
|
||||
|
||||
let logger = FileLogger::new(&path, to_stdout)?;
|
||||
let logger_options = FileLogOptions {
|
||||
to_stdout: to_stdout,
|
||||
exclusive: true,
|
||||
prefix_time: true,
|
||||
read: true,
|
||||
..Default::default()
|
||||
};
|
||||
let logger = FileLogger::new(&path, logger_options)?;
|
||||
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
|
||||
let worker = Arc::new(Self {
|
||||
@ -702,14 +664,14 @@ impl WorkerTask {
|
||||
pub fn spawn<F, T>(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
userid: Userid,
|
||||
auth_id: Authid,
|
||||
to_stdout: bool,
|
||||
f: F,
|
||||
) -> Result<String, Error>
|
||||
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
|
||||
T: Send + 'static + Future<Output = Result<(), Error>>,
|
||||
{
|
||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
||||
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
||||
let upid_str = worker.upid.to_string();
|
||||
let f = f(worker.clone());
|
||||
tokio::spawn(async move {
|
||||
@ -724,7 +686,7 @@ impl WorkerTask {
|
||||
pub fn new_thread<F>(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
userid: Userid,
|
||||
auth_id: Authid,
|
||||
to_stdout: bool,
|
||||
f: F,
|
||||
) -> Result<String, Error>
|
||||
@ -732,7 +694,7 @@ impl WorkerTask {
|
||||
{
|
||||
println!("register worker thread");
|
||||
|
||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
||||
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
||||
let upid_str = worker.upid.to_string();
|
||||
|
||||
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
|
||||
|
16
src/tools.rs
16
src/tools.rs
@ -35,6 +35,10 @@ pub mod nom;
|
||||
pub mod logrotate;
|
||||
pub mod loopdev;
|
||||
pub mod fuse_loop;
|
||||
pub mod socket;
|
||||
pub mod subscription;
|
||||
pub mod zip;
|
||||
pub mod http;
|
||||
|
||||
mod parallel_handler;
|
||||
pub use parallel_handler::*;
|
||||
@ -42,6 +46,10 @@ pub use parallel_handler::*;
|
||||
mod wrapped_reader_stream;
|
||||
pub use wrapped_reader_stream::*;
|
||||
|
||||
mod async_channel_writer;
|
||||
pub use async_channel_writer::*;
|
||||
|
||||
|
||||
mod std_channel_writer;
|
||||
pub use std_channel_writer::*;
|
||||
|
||||
@ -317,10 +325,12 @@ pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
|
||||
pub fn get_hardware_address() -> Result<String, Error> {
|
||||
static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
|
||||
|
||||
let contents = proxmox::tools::fs::file_get_contents(FILENAME)?;
|
||||
let digest = md5sum(&contents)?;
|
||||
let contents = proxmox::tools::fs::file_get_contents(FILENAME)
|
||||
.map_err(|e| format_err!("Error getting host key - {}", e))?;
|
||||
let digest = md5sum(&contents)
|
||||
.map_err(|e| format_err!("Error digesting host key - {}", e))?;
|
||||
|
||||
Ok(proxmox::tools::bin_to_hex(&digest))
|
||||
Ok(proxmox::tools::bin_to_hex(&digest).to_uppercase())
|
||||
}
|
||||
|
||||
pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
|
||||
|
@ -43,8 +43,8 @@ pub const ACL_NEXT_ENTRY: c_int = 1;
|
||||
|
||||
// acl to extended attribute names constants
|
||||
// from: acl/include/acl_ea.h
|
||||
pub const ACL_EA_ACCESS: &'static str = "system.posix_acl_access";
|
||||
pub const ACL_EA_DEFAULT: &'static str = "system.posix_acl_default";
|
||||
pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
|
||||
pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
|
||||
pub const ACL_EA_VERSION: u32 = 0x0002;
|
||||
|
||||
#[link(name = "acl")]
|
||||
|
106
src/tools/async_channel_writer.rs
Normal file
106
src/tools/async_channel_writer.rs
Normal file
@ -0,0 +1,106 @@
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{Error, Result};
|
||||
use futures::{future::FutureExt, ready};
|
||||
use tokio::io::AsyncWrite;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use proxmox::io_format_err;
|
||||
use proxmox::tools::byte_buffer::ByteBuffer;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
/// Wrapper around tokio::sync::mpsc::Sender, which implements Write
|
||||
pub struct AsyncChannelWriter {
|
||||
sender: Option<Sender<Result<Vec<u8>, Error>>>,
|
||||
buf: ByteBuffer,
|
||||
state: WriterState,
|
||||
}
|
||||
|
||||
type SendResult = io::Result<Sender<Result<Vec<u8>>>>;
|
||||
|
||||
enum WriterState {
|
||||
Ready,
|
||||
Sending(Pin<Box<dyn Future<Output = SendResult> + Send + 'static>>),
|
||||
}
|
||||
|
||||
impl AsyncChannelWriter {
|
||||
pub fn new(sender: Sender<Result<Vec<u8>, Error>>, buf_size: usize) -> Self {
|
||||
Self {
|
||||
sender: Some(sender),
|
||||
buf: ByteBuffer::with_capacity(buf_size),
|
||||
state: WriterState::Ready,
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_write_impl(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
buf: &[u8],
|
||||
flush: bool,
|
||||
) -> Poll<io::Result<usize>> {
|
||||
loop {
|
||||
match &mut self.state {
|
||||
WriterState::Ready => {
|
||||
if flush {
|
||||
if self.buf.is_empty() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
} else {
|
||||
let free_size = self.buf.free_size();
|
||||
if free_size > buf.len() || self.buf.is_empty() {
|
||||
let count = free_size.min(buf.len());
|
||||
self.buf.get_free_mut_slice()[..count].copy_from_slice(&buf[..count]);
|
||||
self.buf.add_size(count);
|
||||
return Poll::Ready(Ok(count));
|
||||
}
|
||||
}
|
||||
|
||||
let mut sender = match self.sender.take() {
|
||||
Some(sender) => sender,
|
||||
None => return Poll::Ready(Err(io_err_other("no sender"))),
|
||||
};
|
||||
|
||||
let data = self.buf.remove_data(self.buf.len()).to_vec();
|
||||
let future = async move {
|
||||
sender
|
||||
.send(Ok(data))
|
||||
.await
|
||||
.map(move |_| sender)
|
||||
.map_err(|err| io_format_err!("could not send: {}", err))
|
||||
};
|
||||
|
||||
self.state = WriterState::Sending(future.boxed());
|
||||
}
|
||||
WriterState::Sending(ref mut future) => match ready!(future.as_mut().poll(cx)) {
|
||||
Ok(sender) => {
|
||||
self.sender = Some(sender);
|
||||
self.state = WriterState::Ready;
|
||||
}
|
||||
Err(err) => return Poll::Ready(Err(err)),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for AsyncChannelWriter {
|
||||
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
this.poll_write_impl(cx, buf, false)
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
match ready!(this.poll_write_impl(cx, &[], true)) {
|
||||
Ok(_) => Poll::Ready(Ok(())),
|
||||
Err(err) => Poll::Ready(Err(err)),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||
self.poll_flush(cx)
|
||||
}
|
||||
}
|
@ -101,10 +101,10 @@ impl Reloader {
|
||||
|
||||
// Start ourselves in the background:
|
||||
use nix::unistd::{fork, ForkResult};
|
||||
match fork() {
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Child) => {
|
||||
// Double fork so systemd can supervise us without nagging...
|
||||
match fork() {
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Child) => {
|
||||
std::mem::drop(pold);
|
||||
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
||||
|
@ -1,7 +1,7 @@
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
use std::io::Write;
|
||||
|
||||
/// Log messages with timestamps into files
|
||||
/// Log messages with optional automatically added timestamps into files
|
||||
///
|
||||
/// Logs messages to file, and optionally to standard output.
|
||||
///
|
||||
@ -10,18 +10,44 @@ use std::io::Write;
|
||||
/// ```
|
||||
/// #[macro_use] extern crate proxmox_backup;
|
||||
/// # use anyhow::{bail, format_err, Error};
|
||||
/// use proxmox_backup::tools::FileLogger;
|
||||
/// use proxmox_backup::tools::{FileLogger, FileLogOptions};
|
||||
///
|
||||
/// # std::fs::remove_file("test.log");
|
||||
/// let mut log = FileLogger::new("test.log", true).unwrap();
|
||||
/// let options = FileLogOptions {
|
||||
/// to_stdout: true,
|
||||
/// exclusive: true,
|
||||
/// ..Default::default()
|
||||
/// };
|
||||
/// let mut log = FileLogger::new("test.log", options).unwrap();
|
||||
/// flog!(log, "A simple log: {}", "Hello!");
|
||||
/// ```
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
/// Options to control the behavior of a ['FileLogger'] instance
|
||||
pub struct FileLogOptions {
|
||||
/// Open underlying log file in append mode, useful when multiple concurrent processes
|
||||
/// want to log to the same file (e.g., HTTP access log). Note that it is only atomic
|
||||
/// for writes smaller than the PIPE_BUF (4k on Linux).
|
||||
/// Inside the same process you may need to still use an mutex, for shared access.
|
||||
pub append: bool,
|
||||
/// Open underlying log file as readable
|
||||
pub read: bool,
|
||||
/// If set, ensure that the file is newly created or error out if already existing.
|
||||
pub exclusive: bool,
|
||||
/// Duplicate logged messages to STDOUT, like tee
|
||||
pub to_stdout: bool,
|
||||
/// Prefix messages logged to the file with the current local time as RFC 3339
|
||||
pub prefix_time: bool,
|
||||
/// if set, the file is tried to be chowned by the backup:backup user/group
|
||||
/// Note, this is not designed race free as anybody could set it to another user afterwards
|
||||
/// anyway. It must thus be used by all processes which doe not run as backup uid/gid.
|
||||
pub owned_by_backup: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileLogger {
|
||||
file: std::fs::File,
|
||||
to_stdout: bool,
|
||||
options: FileLogOptions,
|
||||
}
|
||||
|
||||
/// Log messages to [FileLogger](tools/struct.FileLogger.html)
|
||||
@ -33,24 +59,31 @@ macro_rules! flog {
|
||||
}
|
||||
|
||||
impl FileLogger {
|
||||
|
||||
pub fn new<P: AsRef<std::path::Path>>(file_name: P, to_stdout: bool) -> Result<Self, Error> {
|
||||
|
||||
pub fn new<P: AsRef<std::path::Path>>(
|
||||
file_name: P,
|
||||
options: FileLogOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.read(true)
|
||||
.read(options.read)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open(file_name)?;
|
||||
.append(options.append)
|
||||
.create_new(options.exclusive)
|
||||
.create(!options.exclusive)
|
||||
.open(&file_name)?;
|
||||
|
||||
Ok(Self { file , to_stdout })
|
||||
if options.owned_by_backup {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
nix::unistd::chown(file_name.as_ref(), Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
}
|
||||
|
||||
Ok(Self { file, options })
|
||||
}
|
||||
|
||||
pub fn log<S: AsRef<str>>(&mut self, msg: S) {
|
||||
|
||||
let msg = msg.as_ref();
|
||||
|
||||
let mut stdout = std::io::stdout();
|
||||
if self.to_stdout {
|
||||
if self.options.to_stdout {
|
||||
let mut stdout = std::io::stdout();
|
||||
stdout.write_all(msg.as_bytes()).unwrap();
|
||||
stdout.write_all(b"\n").unwrap();
|
||||
}
|
||||
@ -58,19 +91,27 @@ impl FileLogger {
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let rfc3339 = proxmox::tools::time::epoch_to_rfc3339(now).unwrap();
|
||||
|
||||
let line = format!("{}: {}\n", rfc3339, msg);
|
||||
let line = if self.options.prefix_time {
|
||||
format!("{}: {}\n", rfc3339, msg)
|
||||
} else {
|
||||
format!("{}\n", msg)
|
||||
};
|
||||
self.file.write_all(line.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl std::io::Write for FileLogger {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
if self.to_stdout { let _ = std::io::stdout().write(buf); }
|
||||
if self.options.to_stdout {
|
||||
let _ = std::io::stdout().write(buf);
|
||||
}
|
||||
self.file.write(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
if self.to_stdout { let _ = std::io::stdout().flush(); }
|
||||
if self.options.to_stdout {
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
self.file.flush()
|
||||
}
|
||||
}
|
||||
|
@ -50,6 +50,19 @@ pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<S
|
||||
Ok((if value { "1" } else { "0" }).to_string())
|
||||
}
|
||||
|
||||
pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result<String, Error> {
|
||||
if value.is_null() { return Ok(String::new()); }
|
||||
let text = match value.as_u64() {
|
||||
Some(bytes) => {
|
||||
HumanByte::from(bytes).to_string()
|
||||
}
|
||||
None => {
|
||||
value.to_string()
|
||||
}
|
||||
};
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
pub struct HumanByte {
|
||||
b: usize,
|
||||
}
|
||||
|
@ -265,11 +265,31 @@ impl Default for FSXAttr {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to acquire a shared flock on the given path, 'what' and
|
||||
/// 'would_block_message' are used for error formatting.
|
||||
pub fn lock_dir_noblock_shared(
|
||||
path: &std::path::Path,
|
||||
what: &str,
|
||||
would_block_msg: &str,
|
||||
) -> Result<DirLockGuard, Error> {
|
||||
do_lock_dir_noblock(path, what, would_block_msg, false)
|
||||
}
|
||||
|
||||
/// Attempt to acquire an exclusive flock on the given path, 'what' and
|
||||
/// 'would_block_message' are used for error formatting.
|
||||
pub fn lock_dir_noblock(
|
||||
path: &std::path::Path,
|
||||
what: &str,
|
||||
would_block_msg: &str,
|
||||
) -> Result<DirLockGuard, Error> {
|
||||
do_lock_dir_noblock(path, what, would_block_msg, true)
|
||||
}
|
||||
|
||||
fn do_lock_dir_noblock(
|
||||
path: &std::path::Path,
|
||||
what: &str,
|
||||
would_block_msg: &str,
|
||||
exclusive: bool,
|
||||
) -> Result<DirLockGuard, Error> {
|
||||
let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
|
||||
.map_err(|err| {
|
||||
@ -278,7 +298,7 @@ pub fn lock_dir_noblock(
|
||||
|
||||
// acquire in non-blocking mode, no point in waiting here since other
|
||||
// backups could still take a very long time
|
||||
proxmox::tools::fs::lock_file(&mut handle, true, Some(std::time::Duration::from_nanos(0)))
|
||||
proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0)))
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"unable to acquire lock on {} directory {:?} - {}", what, path,
|
||||
|
@ -21,7 +21,7 @@ use proxmox_fuse::{*, requests::FuseRequest};
|
||||
use super::loopdev;
|
||||
use super::fs;
|
||||
|
||||
const RUN_DIR: &'static str = "/run/pbs-loopdev";
|
||||
const RUN_DIR: &str = "/run/pbs-loopdev";
|
||||
|
||||
const_regex! {
|
||||
pub LOOPDEV_REGEX = r"^loop\d+$";
|
||||
|
130
src/tools/http.rs
Normal file
130
src/tools/http.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use lazy_static::lazy_static;
|
||||
use std::task::{Context, Poll};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
use hyper::{Uri, Body};
|
||||
use hyper::client::{Client, HttpConnector};
|
||||
use http::{Request, Response};
|
||||
use openssl::ssl::{SslConnector, SslMethod};
|
||||
use futures::*;
|
||||
|
||||
use crate::tools::{
|
||||
async_io::EitherStream,
|
||||
socket::{
|
||||
set_tcp_keepalive,
|
||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||
},
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref HTTP_CLIENT: Client<HttpsConnector, Body> = {
|
||||
let connector = SslConnector::builder(SslMethod::tls()).unwrap().build();
|
||||
let httpc = HttpConnector::new();
|
||||
let https = HttpsConnector::with_connector(httpc, connector);
|
||||
Client::builder().build(https)
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn get_string(uri: &str) -> Result<String, Error> {
|
||||
let res = HTTP_CLIENT.get(uri.parse()?).await?;
|
||||
|
||||
let status = res.status();
|
||||
if !status.is_success() {
|
||||
bail!("Got bad status '{}' from server", status)
|
||||
}
|
||||
|
||||
response_body_string(res).await
|
||||
}
|
||||
|
||||
pub async fn response_body_string(res: Response<Body>) -> Result<String, Error> {
|
||||
let buf = hyper::body::to_bytes(res).await?;
|
||||
String::from_utf8(buf.to_vec())
|
||||
.map_err(|err| format_err!("Error converting HTTP result data: {}", err))
|
||||
}
|
||||
|
||||
pub async fn post(
|
||||
uri: &str,
|
||||
body: Option<String>,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let body = if let Some(body) = body {
|
||||
Body::from(body)
|
||||
} else {
|
||||
Body::empty()
|
||||
};
|
||||
let content_type = content_type.unwrap_or("application/json");
|
||||
|
||||
let request = Request::builder()
|
||||
.method("POST")
|
||||
.uri(uri)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, content_type)
|
||||
.body(body)?;
|
||||
|
||||
|
||||
HTTP_CLIENT.request(request)
|
||||
.map_err(Error::from)
|
||||
.await
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HttpsConnector {
|
||||
http: HttpConnector,
|
||||
ssl_connector: std::sync::Arc<SslConnector>,
|
||||
}
|
||||
|
||||
impl HttpsConnector {
|
||||
pub fn with_connector(mut http: HttpConnector, ssl_connector: SslConnector) -> Self {
|
||||
http.enforce_http(false);
|
||||
|
||||
Self {
|
||||
http,
|
||||
ssl_connector: std::sync::Arc::new(ssl_connector),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MaybeTlsStream = EitherStream<
|
||||
tokio::net::TcpStream,
|
||||
tokio_openssl::SslStream<tokio::net::TcpStream>,
|
||||
>;
|
||||
|
||||
impl hyper::service::Service<Uri> for HttpsConnector {
|
||||
type Response = MaybeTlsStream;
|
||||
type Error = Error;
|
||||
type Future = std::pin::Pin<Box<
|
||||
dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static
|
||||
>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// This connector is always ready, but others might not be.
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, dst: Uri) -> Self::Future {
|
||||
let mut this = self.clone();
|
||||
async move {
|
||||
let is_https = dst
|
||||
.scheme()
|
||||
.ok_or_else(|| format_err!("missing URL scheme"))?
|
||||
== "https";
|
||||
let host = dst
|
||||
.host()
|
||||
.ok_or_else(|| format_err!("missing hostname in destination url?"))?
|
||||
.to_string();
|
||||
|
||||
let config = this.ssl_connector.configure();
|
||||
let conn = this.http.call(dst).await?;
|
||||
|
||||
let _ = set_tcp_keepalive(conn.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
|
||||
if is_https {
|
||||
let conn = tokio_openssl::connect(config?, &host, conn).await?;
|
||||
Ok(MaybeTlsStream::Right(conn))
|
||||
} else {
|
||||
Ok(MaybeTlsStream::Left(conn))
|
||||
}
|
||||
}.boxed()
|
||||
}
|
||||
}
|
@ -6,7 +6,7 @@ use std::io::Read;
|
||||
use anyhow::{bail, Error};
|
||||
use nix::unistd;
|
||||
|
||||
use proxmox::tools::fs::{CreateOptions, make_tmp_file, replace_file};
|
||||
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
|
||||
|
||||
/// Used for rotating log files and iterating over them
|
||||
pub struct LogRotate {
|
||||
@ -46,73 +46,74 @@ impl LogRotate {
|
||||
}
|
||||
}
|
||||
|
||||
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
|
||||
let mut source = File::open(source_path)?;
|
||||
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
|
||||
let target = unsafe { File::from_raw_fd(fd) };
|
||||
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
||||
Ok(encoder) => encoder,
|
||||
Err(err) => {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("creating zstd encoder failed - {}", err);
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
|
||||
}
|
||||
|
||||
if let Err(err) = encoder.finish() {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("zstd finish failed for file {:?} - {}", target_path, err);
|
||||
}
|
||||
|
||||
if let Err(err) = rename(&tmp_path, target_path) {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("rename failed for file {:?} - {}", target_path, err);
|
||||
}
|
||||
|
||||
if let Err(err) = unistd::unlink(source_path) {
|
||||
bail!("unlink failed for file {:?} - {}", source_path, err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rotates the files up to 'max_files'
|
||||
/// if the 'compress' option was given it will compress the newest file
|
||||
///
|
||||
/// e.g. rotates
|
||||
/// foo.2.zst => foo.3.zst
|
||||
/// foo.1.zst => foo.2.zst
|
||||
/// foo => foo.1.zst
|
||||
/// => foo
|
||||
pub fn rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
||||
/// foo.1 => foo.2.zst
|
||||
/// foo => foo.1
|
||||
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
||||
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
||||
if filenames.is_empty() {
|
||||
return Ok(()); // no file means nothing to rotate
|
||||
}
|
||||
|
||||
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
||||
|
||||
if self.compress {
|
||||
next_filename.push(format!(".{}.zst", filenames.len()));
|
||||
} else {
|
||||
next_filename.push(format!(".{}", filenames.len()));
|
||||
}
|
||||
next_filename.push(format!(".{}", filenames.len()));
|
||||
|
||||
filenames.push(PathBuf::from(next_filename));
|
||||
let count = filenames.len();
|
||||
|
||||
// rotate all but the first, that we maybe have to compress
|
||||
for i in (1..count-1).rev() {
|
||||
for i in (0..count-1).rev() {
|
||||
rename(&filenames[i], &filenames[i+1])?;
|
||||
}
|
||||
|
||||
if self.compress {
|
||||
let mut source = File::open(&filenames[0])?;
|
||||
let (fd, tmp_path) = make_tmp_file(&filenames[1], options.clone())?;
|
||||
let target = unsafe { File::from_raw_fd(fd) };
|
||||
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
||||
Ok(encoder) => encoder,
|
||||
Err(err) => {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("creating zstd encoder failed - {}", err);
|
||||
for i in 2..count {
|
||||
if filenames[i].extension().unwrap_or(std::ffi::OsStr::new("")) != "zst" {
|
||||
let mut target = filenames[i].clone().into_os_string();
|
||||
target.push(".zstd");
|
||||
Self::compress(&filenames[i], &target.into(), &options)?;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("zstd encoding failed for file {:?} - {}", &filenames[1], err);
|
||||
}
|
||||
|
||||
if let Err(err) = encoder.finish() {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("zstd finish failed for file {:?} - {}", &filenames[1], err);
|
||||
}
|
||||
|
||||
if let Err(err) = rename(&tmp_path, &filenames[1]) {
|
||||
let _ = unistd::unlink(&tmp_path);
|
||||
bail!("rename failed for file {:?} - {}", &filenames[1], err);
|
||||
}
|
||||
|
||||
unistd::unlink(&filenames[0])?;
|
||||
} else {
|
||||
rename(&filenames[0], &filenames[1])?;
|
||||
}
|
||||
|
||||
// create empty original file
|
||||
replace_file(&filenames[0], b"", options)?;
|
||||
|
||||
if let Some(max_files) = max_files {
|
||||
// delete all files > max_files
|
||||
for file in filenames.iter().skip(max_files) {
|
||||
if let Err(err) = unistd::unlink(file) {
|
||||
eprintln!("could not remove {:?}: {}", &file, err);
|
||||
@ -122,6 +123,35 @@ impl LogRotate {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn rotate(
|
||||
&mut self,
|
||||
max_size: u64,
|
||||
options: Option<CreateOptions>,
|
||||
max_files: Option<usize>
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let options = match options {
|
||||
Some(options) => options,
|
||||
None => {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
CreateOptions::new().owner(backup_user.uid).group(backup_user.gid)
|
||||
},
|
||||
};
|
||||
|
||||
let metadata = match self.base_path.metadata() {
|
||||
Ok(metadata) => metadata,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
||||
Err(err) => bail!("unable to open task archive - {}", err),
|
||||
};
|
||||
|
||||
if metadata.len() > max_size {
|
||||
self.do_rotate(options, max_files)?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over logrotated file names
|
||||
|
23
src/tools/socket.rs
Normal file
23
src/tools/socket.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use nix::sys::socket::sockopt::{KeepAlive, TcpKeepIdle};
|
||||
use nix::sys::socket::setsockopt;
|
||||
|
||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||
|
||||
/// Set TCP keepalive time on a socket
|
||||
///
|
||||
/// See "man 7 tcp" for details.
|
||||
///
|
||||
/// The default on Linux is 7200 (2 hours) which is far too long for
|
||||
/// our backup tools.
|
||||
pub fn set_tcp_keepalive(
|
||||
socket_fd: RawFd,
|
||||
tcp_keepalive_time: u32,
|
||||
) -> nix::Result<()> {
|
||||
|
||||
setsockopt(socket_fd, KeepAlive, &true)?;
|
||||
setsockopt(socket_fd, TcpKeepIdle, &tcp_keepalive_time)?;
|
||||
|
||||
Ok(())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user