Compare commits
262 Commits
Author | SHA1 | Date | |
---|---|---|---|
4d86df04a0 | |||
2165f0d450 | |||
1e7639bfc4 | |||
4121628d99 | |||
da78b90f9c | |||
1ef6e8b6a7 | |||
10351f7075 | |||
70a152deb7 | |||
5446bfbba8 | |||
400885e620 | |||
f960fc3b6f | |||
ddfa4d679a | |||
10e8026786 | |||
2527c039df | |||
93d8a2044e | |||
d2354a16cd | |||
34ee1f1c76 | |||
2de4dc3a81 | |||
b90036dadd | |||
4708f4fc21 | |||
062cf75cdf | |||
e5950360ca | |||
5b358ff0b1 | |||
4c00391d78 | |||
9594362e35 | |||
3420029b5e | |||
f432a1c927 | |||
e8b32f2d87 | |||
3e3b505cc8 | |||
0bca966ec5 | |||
84737fb33f | |||
e21a15ab17 | |||
90066d22a0 | |||
dbf5dad1c4 | |||
c793da1edc | |||
f8735e5988 | |||
e9805b2486 | |||
eb90405a78 | |||
ecf5f468c3 | |||
51aee8cac8 | |||
7d5049c350 | |||
01a99f5651 | |||
2914e99ff3 | |||
f9b824ac30 | |||
9a535ec77b | |||
ffba023c91 | |||
e01689978e | |||
68ac8976eb | |||
afb790db73 | |||
0732de361a | |||
d455270fa1 | |||
1336be16c9 | |||
03380db560 | |||
927ebc702c | |||
c24cb13382 | |||
3a804a8a20 | |||
1fde4167ea | |||
75f9f40922 | |||
e9c2638f90 | |||
338c545f85 | |||
e379b4a31c | |||
3d7ca2bdb9 | |||
d34019e246 | |||
7cb2ebba79 | |||
4e8581950e | |||
2a9a3d632e | |||
b6d07fa038 | |||
4599e7959c | |||
82ed13c7d7 | |||
5aaa81ab89 | |||
8a06d1935e | |||
f44254b4bd | |||
07875ce13e | |||
98dc770efa | |||
8848f1d487 | |||
5128ae48a0 | |||
104ae6093a | |||
e830d63f6a | |||
ce32cd487a | |||
f36c659365 | |||
47e5cbdb03 | |||
4923a76f22 | |||
e01ca6a2dd | |||
5e989333cd | |||
af39c399bc | |||
64591e731e | |||
5658504b90 | |||
64e0786aa9 | |||
90761f0f62 | |||
74f74d1e64 | |||
4db4b9706c | |||
00a5072ad3 | |||
3d3d698bb3 | |||
1b9521bb87 | |||
1d781c5b20 | |||
8e8836d1ea | |||
a904e3755d | |||
7ba99fef86 | |||
7d2be91bc9 | |||
578895336a | |||
8c090937f5 | |||
4229633d98 | |||
3ed7e87538 | |||
5b43cc4487 | |||
3241392117 | |||
c474a66b41 | |||
b32cf6a1e0 | |||
f32791b4b2 | |||
8f33fe8e59 | |||
d19010481d | |||
6b11524a8b | |||
e953029e8f | |||
10f788b7eb | |||
9348544e46 | |||
126ccbcfa6 | |||
440472cb32 | |||
4ce7da516d | |||
a7f8efcf35 | |||
9fe4c79005 | |||
f09f4d5fd5 | |||
38b4f9b534 | |||
fca1cef29f | |||
45b8a0327f | |||
a723c08715 | |||
c381a162fb | |||
b4931192c3 | |||
cc269b9ff9 | |||
a5e3be4992 | |||
137309cc4e | |||
85f4e834d8 | |||
065013ccec | |||
56d98ba966 | |||
dda1b4fa44 | |||
68b102269f | |||
0ecdaa0dc0 | |||
13f435caab | |||
ff99780303 | |||
fa9507020a | |||
1bff50afea | |||
37ff72720b | |||
2d5d264f99 | |||
c9c07445b7 | |||
a4388ffc36 | |||
ea1458923e | |||
e857f1fae8 | |||
3ec42e81b1 | |||
be1163acfe | |||
d308dc8af7 | |||
60643023ad | |||
875d53ef6c | |||
b41f9e9fec | |||
a1b71c3c7d | |||
013fa2d886 | |||
72e311c6b2 | |||
2732c47466 | |||
0466089316 | |||
5e42d38598 | |||
82a4bb5e80 | |||
94bc7957c1 | |||
c9e6b07145 | |||
3c06eba17a | |||
8081e4aa7b | |||
d8769d659e | |||
572cd0381b | |||
5e91b40087 | |||
936eceda61 | |||
61c4087041 | |||
7d39e47182 | |||
c4e1af3069 | |||
3e234af16e | |||
bbbf662d20 | |||
25d78b1068 | |||
78bf292343 | |||
e5ef69ecf7 | |||
b7b9a57425 | |||
c4a04b7c62 | |||
2e41dbe828 | |||
56d36ca439 | |||
e0ba5553be | |||
8d6fb677c1 | |||
a2daecc25d | |||
ee0c5c8e01 | |||
ae5b1e188f | |||
49f9aca627 | |||
4cba875379 | |||
7ab4382476 | |||
eaef6c8d00 | |||
95f3692545 | |||
686173dc2a | |||
39c5db7f0f | |||
603aa09d54 | |||
88aa3076f0 | |||
5400fe171c | |||
87bf9f569f | |||
8fb24a2c0a | |||
4b5d9b6e64 | |||
72bd8293e3 | |||
09989d9963 | |||
4088d5bc62 | |||
d4b84c1dec | |||
426847e1ce | |||
79b902d512 | |||
73c607497e | |||
f2f526b61d | |||
cb67ecaddb | |||
5bf9b0b0bb | |||
7a61f89e5a | |||
671c6a96e7 | |||
f0d23e5370 | |||
d1bee4344d | |||
d724116c0c | |||
888d89e2dd | |||
a6471bc346 | |||
6b1da1c166 | |||
18210d8958 | |||
bc5c1a9aa6 | |||
3df77ef5da | |||
e8d9d9adfa | |||
01d152720f | |||
5e58381ea9 | |||
0b6d9442bd | |||
134ed9e14f | |||
0796b642de | |||
f912ba6a3e | |||
a576e6685b | |||
b1c793cfa5 | |||
c0147e49c4 | |||
d52b120905 | |||
84c8a580b5 | |||
467bd01cdf | |||
7a7fcb4715 | |||
cf8e44bc30 | |||
279e7eb497 | |||
606828cc65 | |||
aac424674c | |||
8fd1e10830 | |||
12509a6d9e | |||
5e169f387c | |||
8369ade880 | |||
73cef112eb | |||
4a0132382a | |||
6ee69fccd3 | |||
a862835be2 | |||
ddbd63ed5f | |||
6a59fa0e18 | |||
1ed9069ad3 | |||
a588b67906 | |||
37a634f550 | |||
951fe0cb7d | |||
4ca3f0c6ae | |||
69e5ba29c4 | |||
e045d154e9 | |||
6526709d48 | |||
603f80d813 | |||
398636b61c | |||
eb70464839 | |||
75054859ff | |||
8e898895cc | |||
4be6beab6f | |||
a3b4b5b50e | |||
33b8d7e5e8 | |||
f2f43e1904 |
14
Cargo.toml
14
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "1.1.3"
|
||||
version = "1.1.10"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -15,6 +15,7 @@ edition = "2018"
|
||||
license = "AGPL-3"
|
||||
description = "Proxmox Backup"
|
||||
homepage = "https://www.proxmox.com"
|
||||
build = "build.rs"
|
||||
|
||||
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
||||
|
||||
@ -32,6 +33,7 @@ endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
env_logger = "0.7"
|
||||
flate2 = "1.0"
|
||||
anyhow = "1.0"
|
||||
foreign-types = "0.3"
|
||||
thiserror = "1.0"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.3", features = [ "stream" ] }
|
||||
@ -51,10 +53,12 @@ percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pin-project = "1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.11.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.11.5", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.1"
|
||||
proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
|
||||
#proxmox-http = { version = "0.2.0", path = "../proxmox/proxmox-http", features = [ "client", "http-helpers", "websocket" ] }
|
||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||
regex = "1.2"
|
||||
@ -63,7 +67,7 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
siphasher = "0.3"
|
||||
syslog = "4.0"
|
||||
tokio = { version = "1.0", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||
@ -78,6 +82,8 @@ zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
crossbeam-channel = "0.5"
|
||||
|
||||
proxmox-acme-rs = "0.2.1"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
#valgrind = ["valgrind_request"]
|
||||
|
10
Makefile
10
Makefile
@ -82,7 +82,13 @@ doc:
|
||||
build:
|
||||
rm -rf build
|
||||
rm -f debian/control
|
||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||
debcargo package \
|
||||
--config debian/debcargo.toml \
|
||||
--changelog-ready \
|
||||
--no-overlay-write-back \
|
||||
--directory build \
|
||||
proxmox-backup \
|
||||
$(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
||||
rm build/debian/control.in build/debian/control.src
|
||||
@ -168,5 +174,5 @@ upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
|
||||
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg" --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist buster
|
||||
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster
|
||||
|
24
build.rs
Normal file
24
build.rs
Normal file
@ -0,0 +1,24 @@
|
||||
// build.rs
|
||||
use std::env;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
let repoid = match env::var("REPOID") {
|
||||
Ok(repoid) => repoid,
|
||||
Err(_) => {
|
||||
match Command::new("git")
|
||||
.args(&["rev-parse", "HEAD"])
|
||||
.output()
|
||||
{
|
||||
Ok(output) => {
|
||||
String::from_utf8(output.stdout).unwrap()
|
||||
}
|
||||
Err(err) => {
|
||||
panic!("git rev-parse failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
println!("cargo:rustc-env=REPOID={}", repoid);
|
||||
}
|
224
debian/changelog
vendored
224
debian/changelog
vendored
@ -1,3 +1,227 @@
|
||||
rust-proxmox-backup (1.1.10-1) buster; urgency=medium
|
||||
|
||||
* ui: datastore list summary: catch and show errors per datastore
|
||||
|
||||
* ui: dashboard: task summary: add a 'close' tool to the header
|
||||
|
||||
* ensure that backups which are currently being restored or backed up to a
|
||||
tape won't get pruned
|
||||
|
||||
* improve error handling when locking a tape drive for a backup job
|
||||
|
||||
* client/pull: log snapshots that are skipped because of creation time being
|
||||
older than last sync time
|
||||
|
||||
* ui: datastore options: add remove button to drop a datastore from the
|
||||
configuration, without removing any actual data
|
||||
|
||||
* ui: tape: drive selector: do not autoselect the drive
|
||||
|
||||
* ui: tape: backup job: use correct default value for pbsUserSelector
|
||||
|
||||
* fix #3433: disks: port over Proxmox VE's S.M.A.R.T wearout logic
|
||||
|
||||
* backup: add helpers for async last recently used (LRU) caches for chunk
|
||||
and index reading of backup snapshot
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Jun 2021 09:46:15 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.9-1) stable; urgency=medium
|
||||
|
||||
* lto/sg_tape/encryption: remove non lto-4 supported byte
|
||||
|
||||
* ui: improve tape restore
|
||||
|
||||
* ui: panel/UsageChart: change downloadServerUrl
|
||||
|
||||
* ui: css fixes and cleanups
|
||||
|
||||
* api2/tape: add api call to list media sets
|
||||
|
||||
* ui: tape/BackupOverview: expand pools by default
|
||||
|
||||
* api: node/journal: fix parameter extraction of /nodes/node/journal
|
||||
|
||||
* file-restore-daemon: limit concurrent download calls
|
||||
|
||||
* file-restore-daemon: watchdog: add inhibit for long downloads
|
||||
|
||||
* file-restore-daemon: work around tokio DuplexStream bug
|
||||
|
||||
* apt: fix removal of non-existant http-proxy config
|
||||
|
||||
* file-restore-daemon: disk: add RawFs bucket type
|
||||
|
||||
* file-restore-daemon: disk: ignore "invalid fs" error
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 01 Jun 2021 08:24:01 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.8-1) stable; urgency=medium
|
||||
|
||||
* api-proxy: implement 'reload-certificate' command and hot-reload proxy
|
||||
certificate when updating via the API
|
||||
|
||||
* ui: add task descriptions for ACME/Let's Encrypt related tasks
|
||||
|
||||
* correctly set apt proxy configuration
|
||||
|
||||
* ui: configuration: support setting a HTTP proxy for APT and subscription
|
||||
checks.
|
||||
|
||||
* ui: tape: add 'Force new Media-Set' checkbox to manual backup
|
||||
|
||||
* ui: datastore/Content: add forget (delete) button for whole backup groups
|
||||
|
||||
* ui: tape: backup overview: move restore buttons inline to action-buttons,
|
||||
making the UX more similar to the datastore content tree-view
|
||||
|
||||
* ui: tape restore: enabling selecting multiple snapshots
|
||||
|
||||
* ui: dashboards statistics: visualize datastores where querying the usage
|
||||
failed
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 21 May 2021 18:21:28 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.7-1) unstable; urgency=medium
|
||||
|
||||
* client: use stderr for all fingerprint confirm msgs
|
||||
|
||||
* fix #3391: improve mismatched fingerprint handling
|
||||
|
||||
* tape: add single snapshot restore
|
||||
|
||||
* docs/api-viewer: improve rendering of array format
|
||||
|
||||
* tape/pool_writer: do not unwrap on channel send
|
||||
|
||||
* ui: window/SyncJobEdit: disable autoSelect for remote datastore
|
||||
|
||||
* ui: tape: rename 'Datastore' to 'Target Datastore'
|
||||
|
||||
* manager: acme plugin: auto-complete available DNS challenge types
|
||||
|
||||
* manager: acme plugin: remove ID completion helper from add command
|
||||
|
||||
* completion: ACME plugin type: comment out http type for now, not useful
|
||||
|
||||
* acme: use proxmox-acme-plugins and load schema from there
|
||||
|
||||
* fix 3296: add http_proxy to node config, and provide a cli
|
||||
|
||||
* fix #3331: improve progress for last snapshot in group
|
||||
|
||||
* file-restore: add debug mode with serial access
|
||||
|
||||
* file-restore: support more drives
|
||||
|
||||
* file-restore: add more RAM for VMs with many drives or debug
|
||||
|
||||
* file-restore: try to kill VM when stale
|
||||
|
||||
* make sure URI paths start with a slash
|
||||
|
||||
* tape: use LOCATE(16) SCSI command
|
||||
|
||||
* call create_run_dir() at daemon startup
|
||||
|
||||
* tape/drive: add 'move_to_file' to TapeDriver trait
|
||||
|
||||
* proxmox_restore_daemon: mount ntfs with 'utf8' option
|
||||
|
||||
* client/http_client: add necessary brackets for ipv6
|
||||
|
||||
* docs: tape: clarify LTO-4/5 support
|
||||
|
||||
* tape/restore: optimize chunk restore behaviour
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 11 May 2021 13:22:49 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.6-2) unstable; urgency=medium
|
||||
|
||||
* fix permissions set in create_run_dir
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:25:00 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.6-1) unstable; urgency=medium
|
||||
|
||||
* tape restore: do not verify restored files
|
||||
|
||||
* tape restore: add restore speed to logs
|
||||
|
||||
* tape restore: write datastore in separate thread
|
||||
|
||||
* add ACME support
|
||||
|
||||
* add node config
|
||||
|
||||
* docs: user-management: add note about untrusted certificates for
|
||||
webauthn
|
||||
|
||||
* bin: use extract_output_format where necessary
|
||||
|
||||
* add ctime and size function to IndexFile trait
|
||||
|
||||
* ui: tape: handle tapes in changers without barcode
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:09:25 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.5-3) stable; urgency=medium
|
||||
|
||||
* file-restore: use 'norecovery' for XFS filesystem to allow mounting
|
||||
those which where not un-mounted during backup
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Apr 2021 15:26:13 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.5-2) stable; urgency=medium
|
||||
|
||||
* file-restore: strip .img.fidx suffix from drive serials to avoid running
|
||||
in the 20 character limit SCSI serial values have.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Apr 2021 11:15:08 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.5-1) unstable; urgency=medium
|
||||
|
||||
* tools/sgutils2: add size workaround for mode_sense
|
||||
|
||||
* tape: add read_medium_configuration_page() to detect WORM media
|
||||
|
||||
* file-restore: fix package name for kernel/initramfs image
|
||||
|
||||
* tape: remove MediumType struct, which is only valid on IBM drives
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 12:20:04 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.4-1) unstable; urgency=medium
|
||||
|
||||
* file-restore: add size to image files and components
|
||||
|
||||
* file-restore: exit with code 1 in case streaming fails
|
||||
|
||||
* file-restore: use less memory for VM (now 128 MiB) and reboot on panic
|
||||
|
||||
* ui: tape: improve reload drive-status logic on user actions
|
||||
|
||||
* tape backup: list the snapshots we could back up on failed backup
|
||||
notification
|
||||
|
||||
* Improve on a scheduling issue when updating the calendar event such, that
|
||||
it would had triggered between the last-run and now. Use the next future
|
||||
event as actual next trigger instead.
|
||||
|
||||
* SCSI mode sense: include the expected and unexpected sizes in the error
|
||||
message, to allow easier debugging
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 08:27:10 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.3-2) unstable; urgency=medium
|
||||
|
||||
* improve check for LTO4 tapes
|
||||
|
||||
* api: node status: return further information about SWAP, IO-wait, CPU info
|
||||
and Kernel version
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Apr 2021 10:52:08 +0200
|
||||
|
||||
rust-proxmox-backup (1.1.3-1) unstable; urgency=medium
|
||||
|
||||
* tape restore: improve datastore locking when GC runs at the same time
|
||||
|
40
debian/control
vendored
40
debian/control
vendored
@ -17,6 +17,7 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-env-logger-0.7+default-dev,
|
||||
librust-flate2-1+default-dev,
|
||||
librust-foreign-types-0.3+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.3+default-dev,
|
||||
librust-h2-0.3+stream-dev,
|
||||
@ -38,11 +39,15 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-1+default-dev,
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.11+api-macro-dev (>= 0.11.1-~~),
|
||||
librust-proxmox-0.11+default-dev (>= 0.11.1-~~),
|
||||
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.1-~~),
|
||||
librust-proxmox-0.11+websocket-dev (>= 0.11.1-~~),
|
||||
librust-proxmox-0.11+api-macro-dev (>= 0.11.5-~~),
|
||||
librust-proxmox-0.11+default-dev (>= 0.11.5-~~),
|
||||
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.5-~~),
|
||||
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-http-0.2+client-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-http-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-http-0.2+http-helpers-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-http-0.2+websocket-dev (>= 0.2.1-~~),
|
||||
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
@ -53,18 +58,18 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-4+default-dev,
|
||||
librust-thiserror-1+default-dev,
|
||||
librust-tokio-1+default-dev,
|
||||
librust-tokio-1+fs-dev,
|
||||
librust-tokio-1+io-std-dev,
|
||||
librust-tokio-1+io-util-dev,
|
||||
librust-tokio-1+macros-dev,
|
||||
librust-tokio-1+net-dev,
|
||||
librust-tokio-1+parking-lot-dev,
|
||||
librust-tokio-1+process-dev,
|
||||
librust-tokio-1+rt-dev,
|
||||
librust-tokio-1+rt-multi-thread-dev,
|
||||
librust-tokio-1+signal-dev,
|
||||
librust-tokio-1+time-dev,
|
||||
librust-tokio-1+default-dev (>= 1.6-~~),
|
||||
librust-tokio-1+fs-dev (>= 1.6-~~),
|
||||
librust-tokio-1+io-std-dev (>= 1.6-~~),
|
||||
librust-tokio-1+io-util-dev (>= 1.6-~~),
|
||||
librust-tokio-1+macros-dev (>= 1.6-~~),
|
||||
librust-tokio-1+net-dev (>= 1.6-~~),
|
||||
librust-tokio-1+parking-lot-dev (>= 1.6-~~),
|
||||
librust-tokio-1+process-dev (>= 1.6-~~),
|
||||
librust-tokio-1+rt-dev (>= 1.6-~~),
|
||||
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
||||
librust-tokio-1+signal-dev (>= 1.6-~~),
|
||||
librust-tokio-1+time-dev (>= 1.6-~~),
|
||||
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||
librust-tokio-stream-0.1+default-dev,
|
||||
librust-tokio-util-0.6+codec-dev,
|
||||
@ -111,6 +116,7 @@ Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libjs-qrcodejs (>= 1.20201119),
|
||||
libproxmox-acme-plugins,
|
||||
libsgutils2-2,
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
@ -119,7 +125,7 @@ Depends: fonts-font-awesome,
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.5-1),
|
||||
proxmox-widget-toolkit (>= 2.5-6),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
sg3-utils,
|
||||
smartmontools,
|
||||
|
3
debian/control.in
vendored
3
debian/control.in
vendored
@ -3,6 +3,7 @@ Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libjs-qrcodejs (>= 1.20201119),
|
||||
libproxmox-acme-plugins,
|
||||
libsgutils2-2,
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
@ -11,7 +12,7 @@ Depends: fonts-font-awesome,
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.5-1),
|
||||
proxmox-widget-toolkit (>= 2.5-6),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
sg3-utils,
|
||||
smartmontools,
|
||||
|
12
debian/proxmox-backup-file-restore.postinst
vendored
12
debian/proxmox-backup-file-restore.postinst
vendored
@ -6,6 +6,7 @@ update_initramfs() {
|
||||
# regenerate initramfs for single file restore VM
|
||||
INST_PATH="/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
|
||||
CACHE_PATH="/var/cache/proxmox-backup/file-restore-initramfs.img"
|
||||
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||
|
||||
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||
# not want an unuseable image lying around
|
||||
@ -20,7 +21,7 @@ update_initramfs() {
|
||||
|
||||
# avoid leftover temp file
|
||||
cleanup() {
|
||||
rm -f "$CACHE_PATH.tmp"
|
||||
rm -f "$CACHE_PATH.tmp" "$CACHE_PATH_DBG.tmp"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
@ -34,6 +35,15 @@ update_initramfs() {
|
||||
| cpio -o --format=newc -A -F "$CACHE_PATH.tmp" )
|
||||
mv -f "$CACHE_PATH.tmp" "$CACHE_PATH"
|
||||
|
||||
if [ -f "$INST_PATH/initramfs-debug.img" ]; then
|
||||
echo "Updating file-restore debug initramfs..."
|
||||
cp "$INST_PATH/initramfs-debug.img" "$CACHE_PATH_DBG.tmp"
|
||||
( cd "$INST_PATH"; \
|
||||
printf "./proxmox-restore-daemon" \
|
||||
| cpio -o --format=newc -A -F "$CACHE_PATH_DBG.tmp" )
|
||||
mv -f "$CACHE_PATH_DBG.tmp" "$CACHE_PATH_DBG"
|
||||
fi
|
||||
|
||||
trap - EXIT
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/desc
|
||||
.PHONY: onlinehelpinfo
|
||||
onlinehelpinfo:
|
||||
@echo "Generating OnlineHelpInfo.js..."
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs -Q $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||
|
@ -86,13 +86,9 @@ Ext.onReady(function() {
|
||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
||||
};
|
||||
|
||||
var render_format = function(value, metaData, record) {
|
||||
var pdef = record.data;
|
||||
|
||||
metaData.style = 'white-space:normal;'
|
||||
|
||||
let render_simple_format = function(pdef, type_fallback) {
|
||||
if (pdef.typetext)
|
||||
return Ext.htmlEncode(pdef.typetext);
|
||||
return pdef.typetext;
|
||||
|
||||
if (pdef['enum'])
|
||||
return pdef['enum'].join(' | ');
|
||||
@ -101,9 +97,28 @@ Ext.onReady(function() {
|
||||
return pdef.format;
|
||||
|
||||
if (pdef.pattern)
|
||||
return Ext.htmlEncode(pdef.pattern);
|
||||
return pdef.pattern;
|
||||
|
||||
return '';
|
||||
if (pdef.type === 'boolean')
|
||||
return `<true|false>`;
|
||||
|
||||
if (type_fallback && pdef.type)
|
||||
return `<${pdef.type}>`;
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
let render_format = function(value, metaData, record) {
|
||||
let pdef = record.data;
|
||||
|
||||
metaData.style = 'white-space:normal;'
|
||||
|
||||
if (pdef.type === 'array' && pdef.items) {
|
||||
let format = render_simple_format(pdef.items, true);
|
||||
return `[${Ext.htmlEncode(format)}, ...]`;
|
||||
}
|
||||
|
||||
return Ext.htmlEncode(render_simple_format(pdef) || '');
|
||||
};
|
||||
|
||||
var real_path = function(path) {
|
||||
|
@ -178,3 +178,19 @@ snipped
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
deb http://download.proxmox.com/debian/pbs-client buster main
|
||||
|
||||
.. _node_options_http_proxy:
|
||||
|
||||
Repository Access Behind HTTP Proxy
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some setups have restricted access to the internet, sometimes only through a
|
||||
central proxy. You can setup a HTTP proxy through the Proxmox Backup Server's
|
||||
web-interface in the `Configuration -> Authentication` tab.
|
||||
|
||||
Once configured this proxy will be used for apt network requests and for
|
||||
checking a Proxmox Backup Server support subscription.
|
||||
|
||||
Standard HTTP proxy configurations are accepted, `[http://]<host>[:port]` where
|
||||
the `<host>` part may include an authorization, for example:
|
||||
`http://user:pass@proxy.example.org:12345`
|
||||
|
@ -67,8 +67,10 @@ tape compression feature has no advantage.
|
||||
Supported Hardware
|
||||
------------------
|
||||
|
||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
||||
or later.
|
||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
|
||||
or later and has best-effort support for generation 4 (LTO-4). While
|
||||
many LTO-4 systems are known to work, some might need firmware updates or
|
||||
do not implement necessary features to work with Proxmox Backup Server.
|
||||
|
||||
Tape changing is carried out using the SCSI Medium Changer protocol,
|
||||
so all modern tape libraries should work.
|
||||
|
@ -360,7 +360,9 @@ WebAuthn
|
||||
For WebAuthn to work, you need to have two things:
|
||||
|
||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||
While it probably works with an untrusted certificate, some browsers may warn
|
||||
or refuse WebAuthn operations if it is not trusted.
|
||||
|
||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||
|
684
src/acme/client.rs
Normal file
684
src/acme/client.rs
Normal file
@ -0,0 +1,684 @@
|
||||
//! HTTP Client for the ACME protocol.
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use std::io;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
use anyhow::{bail, format_err};
|
||||
use bytes::Bytes;
|
||||
use hyper::{Body, Request};
|
||||
use nix::sys::stat::Mode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox_acme_rs::account::AccountCreator;
|
||||
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
||||
use proxmox_acme_rs::order::{Order, OrderData};
|
||||
use proxmox_acme_rs::Request as AcmeRequest;
|
||||
use proxmox_acme_rs::{Account, Authorization, Challenge, Directory, Error, ErrorResponse};
|
||||
use proxmox_http::client::SimpleHttp;
|
||||
|
||||
use crate::api2::types::AcmeAccountName;
|
||||
use crate::config::acme::account_path;
|
||||
use crate::tools::pbs_simple_http;
|
||||
|
||||
/// Our on-disk format inherited from PVE's proxmox-acme code.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AccountData {
|
||||
/// The account's location URL.
|
||||
location: String,
|
||||
|
||||
/// The account data.
|
||||
account: AcmeAccountData,
|
||||
|
||||
/// The private key as PEM formatted string.
|
||||
key: String,
|
||||
|
||||
/// ToS URL the user agreed to.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
tos: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "is_false", default)]
|
||||
debug: bool,
|
||||
|
||||
/// The directory's URL.
|
||||
directory_url: String,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!*b
|
||||
}
|
||||
|
||||
pub struct AcmeClient {
|
||||
directory_url: String,
|
||||
debug: bool,
|
||||
account_path: Option<String>,
|
||||
tos: Option<String>,
|
||||
account: Option<Account>,
|
||||
directory: Option<Directory>,
|
||||
nonce: Option<String>,
|
||||
http_client: SimpleHttp,
|
||||
}
|
||||
|
||||
impl AcmeClient {
|
||||
/// Create a new ACME client for a given ACME directory URL.
|
||||
pub fn new(directory_url: String) -> Self {
|
||||
Self {
|
||||
directory_url,
|
||||
debug: false,
|
||||
account_path: None,
|
||||
tos: None,
|
||||
account: None,
|
||||
directory: None,
|
||||
nonce: None,
|
||||
http_client: pbs_simple_http(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load an existing ACME account by name.
|
||||
pub async fn load(account_name: &AcmeAccountName) -> Result<Self, anyhow::Error> {
|
||||
let account_path = account_path(account_name.as_ref());
|
||||
let data = match tokio::fs::read(&account_path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||
bail!("acme account '{}' does not exist", account_name)
|
||||
}
|
||||
Err(err) => bail!(
|
||||
"failed to load acme account from '{}' - {}",
|
||||
account_path,
|
||||
err
|
||||
),
|
||||
};
|
||||
let data: AccountData = serde_json::from_slice(&data).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to parse acme account from '{}' - {}",
|
||||
account_path,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
let account = Account::from_parts(data.location, data.key, data.account);
|
||||
|
||||
let mut me = Self::new(data.directory_url);
|
||||
me.debug = data.debug;
|
||||
me.account_path = Some(account_path);
|
||||
me.tos = data.tos;
|
||||
me.account = Some(account);
|
||||
|
||||
Ok(me)
|
||||
}
|
||||
|
||||
pub async fn new_account<'a>(
|
||||
&'a mut self,
|
||||
account_name: &AcmeAccountName,
|
||||
tos_agreed: bool,
|
||||
contact: Vec<String>,
|
||||
rsa_bits: Option<u32>,
|
||||
) -> Result<&'a Account, anyhow::Error> {
|
||||
self.tos = if tos_agreed {
|
||||
self.terms_of_service_url().await?.map(str::to_owned)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let account = Account::creator()
|
||||
.set_contacts(contact)
|
||||
.agree_to_tos(tos_agreed);
|
||||
|
||||
let account = if let Some(bits) = rsa_bits {
|
||||
account.generate_rsa_key(bits)?
|
||||
} else {
|
||||
account.generate_ec_key()?
|
||||
};
|
||||
|
||||
let _ = self.register_account(account).await?;
|
||||
|
||||
crate::config::acme::make_acme_account_dir()?;
|
||||
let account_path = account_path(account_name.as_ref());
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.mode(0o600)
|
||||
.open(&account_path)
|
||||
.map_err(|err| format_err!("failed to open {:?} for writing: {}", account_path, err))?;
|
||||
self.write_to(file).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to write acme account to {:?}: {}",
|
||||
account_path,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
self.account_path = Some(account_path);
|
||||
|
||||
// unwrap: Setting `self.account` is literally this function's job, we just can't keep
|
||||
// the borrow from from `self.register_account()` active due to clashes.
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
fn save(&self) -> Result<(), anyhow::Error> {
|
||||
let mut data = Vec::<u8>::new();
|
||||
self.write_to(&mut data)?;
|
||||
let account_path = self.account_path.as_ref().ok_or_else(|| {
|
||||
format_err!("no account path set, cannot save upated account information")
|
||||
})?;
|
||||
crate::config::acme::make_acme_account_dir()?;
|
||||
replace_file(
|
||||
account_path,
|
||||
&data,
|
||||
CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o600))
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(nix::unistd::Gid::from_raw(0)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).key_authorization()`.
|
||||
pub fn key_authorization(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||
Ok(Self::need_account(&self.account)?.key_authorization(token)?)
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).dns_01_txt_value()`.
|
||||
/// the key authorization value.
|
||||
pub fn dns_01_txt_value(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||
Ok(Self::need_account(&self.account)?.dns_01_txt_value(token)?)
|
||||
}
|
||||
|
||||
async fn register_account(
|
||||
&mut self,
|
||||
account: AccountCreator,
|
||||
) -> Result<&Account, anyhow::Error> {
|
||||
let mut retry = retry();
|
||||
let mut response = loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
let request = account.request(directory, nonce)?;
|
||||
match self.run_request(request).await {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
};
|
||||
|
||||
let account = account.response(response.location_required()?, &response.body)?;
|
||||
|
||||
self.account = Some(account);
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
pub async fn update_account<T: Serialize>(
|
||||
&mut self,
|
||||
data: &T,
|
||||
) -> Result<&Account, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
let response = loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.post_request(&account.location, &nonce, data)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
};
|
||||
|
||||
// unwrap: we've been keeping an immutable reference to it from the top of the method
|
||||
let _ = account;
|
||||
self.account.as_mut().unwrap().data = response.json()?;
|
||||
self.save()?;
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
pub async fn new_order<I>(&mut self, domains: I) -> Result<Order, anyhow::Error>
|
||||
where
|
||||
I: IntoIterator<Item = String>,
|
||||
{
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let order = domains
|
||||
.into_iter()
|
||||
.fold(OrderData::new(), |order, domain| order.domain(domain));
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut new_order = account.new_order(&order, directory, nonce)?;
|
||||
let mut response = match Self::execute(
|
||||
&mut self.http_client,
|
||||
new_order.request.take().unwrap(),
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(response) => response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
return Ok(
|
||||
new_order.response(response.location_required()?, response.bytes().as_ref())?
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Low level "POST-as-GET" request.
|
||||
async fn post_as_get(&mut self, url: &str) -> Result<AcmeResponse, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.get_request(url, nonce)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Low level POST request.
|
||||
async fn post<T: Serialize>(
|
||||
&mut self,
|
||||
url: &str,
|
||||
data: &T,
|
||||
) -> Result<AcmeResponse, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.post_request(url, nonce, data)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request challenge validation. Afterwards, the challenge should be polled.
|
||||
pub async fn request_challenge_validation(
|
||||
&mut self,
|
||||
url: &str,
|
||||
) -> Result<Challenge, anyhow::Error> {
|
||||
Ok(self
|
||||
.post(url, &serde_json::Value::Object(Default::default()))
|
||||
.await?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Authorization' URL, get and deserialize it.
|
||||
pub async fn get_authorization(&mut self, url: &str) -> Result<Authorization, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.json()?)
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Order' URL, get and deserialize it.
|
||||
pub async fn get_order(&mut self, url: &str) -> Result<OrderData, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.json()?)
|
||||
}
|
||||
|
||||
/// Finalize an Order via its `finalize` URL property and the DER encoded CSR.
|
||||
pub async fn finalize(&mut self, url: &str, csr: &[u8]) -> Result<(), anyhow::Error> {
|
||||
let csr = base64::encode_config(csr, base64::URL_SAFE_NO_PAD);
|
||||
let data = serde_json::json!({ "csr": csr });
|
||||
self.post(url, &data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Download a certificate via its 'certificate' URL property.
|
||||
///
|
||||
/// The certificate will be a PEM certificate chain.
|
||||
pub async fn get_certificate(&mut self, url: &str) -> Result<Bytes, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.body)
|
||||
}
|
||||
|
||||
/// Revoke an existing certificate (PEM or DER formatted).
|
||||
pub async fn revoke_certificate(
|
||||
&mut self,
|
||||
certificate: &[u8],
|
||||
reason: Option<u32>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
// TODO: This can also work without an account.
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let revocation = account.revoke_certificate(certificate, reason)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = revocation.request(&directory, nonce)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(_response) => return Ok(()),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn need_account(account: &Option<Account>) -> Result<&Account, anyhow::Error> {
|
||||
account
|
||||
.as_ref()
|
||||
.ok_or_else(|| format_err!("cannot use client without an account"))
|
||||
}
|
||||
|
||||
pub(crate) fn account(&self) -> Result<&Account, anyhow::Error> {
|
||||
Self::need_account(&self.account)
|
||||
}
|
||||
|
||||
pub fn tos(&self) -> Option<&str> {
|
||||
self.tos.as_deref()
|
||||
}
|
||||
|
||||
pub fn directory_url(&self) -> &str {
|
||||
&self.directory_url
|
||||
}
|
||||
|
||||
fn to_account_data(&self) -> Result<AccountData, anyhow::Error> {
|
||||
let account = self.account()?;
|
||||
|
||||
Ok(AccountData {
|
||||
location: account.location.clone(),
|
||||
key: account.private_key.clone(),
|
||||
account: AcmeAccountData {
|
||||
only_return_existing: false, // don't actually write this out in case it's set
|
||||
..account.data.clone()
|
||||
},
|
||||
tos: self.tos.clone(),
|
||||
debug: self.debug,
|
||||
directory_url: self.directory_url.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn write_to<T: io::Write>(&self, out: T) -> Result<(), anyhow::Error> {
|
||||
let data = self.to_account_data()?;
|
||||
|
||||
Ok(serde_json::to_writer_pretty(out, &data)?)
|
||||
}
|
||||
}
|
||||
|
||||
struct AcmeResponse {
|
||||
body: Bytes,
|
||||
location: Option<String>,
|
||||
got_nonce: bool,
|
||||
}
|
||||
|
||||
impl AcmeResponse {
|
||||
/// Convenience helper to assert that a location header was part of the response.
|
||||
fn location_required(&mut self) -> Result<String, anyhow::Error> {
|
||||
self.location
|
||||
.take()
|
||||
.ok_or_else(|| format_err!("missing Location header"))
|
||||
}
|
||||
|
||||
/// Convenience shortcut to perform json deserialization of the returned body.
|
||||
fn json<T: for<'a> Deserialize<'a>>(&self) -> Result<T, Error> {
|
||||
Ok(serde_json::from_slice(&self.body)?)
|
||||
}
|
||||
|
||||
/// Convenience shortcut to get the body as bytes.
|
||||
fn bytes(&self) -> &[u8] {
|
||||
&self.body
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmeClient {
|
||||
/// Non-self-borrowing run_request version for borrow workarounds.
|
||||
async fn execute(
|
||||
http_client: &mut SimpleHttp,
|
||||
request: AcmeRequest,
|
||||
nonce: &mut Option<String>,
|
||||
) -> Result<AcmeResponse, Error> {
|
||||
let req_builder = Request::builder().method(request.method).uri(&request.url);
|
||||
|
||||
let http_request = if !request.content_type.is_empty() {
|
||||
req_builder
|
||||
.header("Content-Type", request.content_type)
|
||||
.header("Content-Length", request.body.len())
|
||||
.body(request.body.into())
|
||||
} else {
|
||||
req_builder.body(Body::empty())
|
||||
}
|
||||
.map_err(|err| Error::Custom(format!("failed to create http request: {}", err)))?;
|
||||
|
||||
let response = http_client
|
||||
.request(http_request)
|
||||
.await
|
||||
.map_err(|err| Error::Custom(err.to_string()))?;
|
||||
let (parts, body) = response.into_parts();
|
||||
|
||||
let status = parts.status.as_u16();
|
||||
let body = hyper::body::to_bytes(body)
|
||||
.await
|
||||
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?;
|
||||
|
||||
let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme_rs::REPLAY_NONCE) {
|
||||
let new_nonce = new_nonce.to_str().map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"received invalid replay-nonce header from ACME server: {}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
*nonce = Some(new_nonce.to_owned());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if parts.status.is_success() {
|
||||
if status != request.expected {
|
||||
return Err(Error::InvalidApi(format!(
|
||||
"ACME server responded with unexpected status code: {:?}",
|
||||
parts.status
|
||||
)));
|
||||
}
|
||||
|
||||
let location = parts
|
||||
.headers
|
||||
.get("Location")
|
||||
.map(|header| {
|
||||
header.to_str().map(str::to_owned).map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"received invalid location header from ACME server: {}",
|
||||
err
|
||||
))
|
||||
})
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
return Ok(AcmeResponse {
|
||||
body,
|
||||
location,
|
||||
got_nonce,
|
||||
});
|
||||
}
|
||||
|
||||
let error: ErrorResponse = serde_json::from_slice(&body).map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"error status with improper error ACME response: {}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
if error.ty == proxmox_acme_rs::error::BAD_NONCE {
|
||||
if !got_nonce {
|
||||
return Err(Error::InvalidApi(
|
||||
"badNonce without a new Replay-Nonce header".to_string(),
|
||||
));
|
||||
}
|
||||
return Err(Error::BadNonce);
|
||||
}
|
||||
|
||||
Err(Error::Api(error))
|
||||
}
|
||||
|
||||
/// Low-level API to run an n API request. This automatically updates the current nonce!
|
||||
async fn run_request(&mut self, request: AcmeRequest) -> Result<AcmeResponse, Error> {
|
||||
Self::execute(&mut self.http_client, request, &mut self.nonce).await
|
||||
}
|
||||
|
||||
async fn directory(&mut self) -> Result<&Directory, Error> {
|
||||
Ok(Self::get_directory(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?
|
||||
.0)
|
||||
}
|
||||
|
||||
async fn get_directory<'a, 'b>(
|
||||
http_client: &mut SimpleHttp,
|
||||
directory_url: &str,
|
||||
directory: &'a mut Option<Directory>,
|
||||
nonce: &'b mut Option<String>,
|
||||
) -> Result<(&'a Directory, Option<&'b str>), Error> {
|
||||
if let Some(d) = directory {
|
||||
return Ok((d, nonce.as_deref()));
|
||||
}
|
||||
|
||||
let response = Self::execute(
|
||||
http_client,
|
||||
AcmeRequest {
|
||||
url: directory_url.to_string(),
|
||||
method: "GET",
|
||||
content_type: "",
|
||||
body: String::new(),
|
||||
expected: 200,
|
||||
},
|
||||
nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
*directory = Some(Directory::from_parts(
|
||||
directory_url.to_string(),
|
||||
response.json()?,
|
||||
));
|
||||
|
||||
Ok((directory.as_ref().unwrap(), nonce.as_deref()))
|
||||
}
|
||||
|
||||
/// Like `get_directory`, but if the directory provides no nonce, also performs a `HEAD`
|
||||
/// request on the new nonce URL.
|
||||
async fn get_dir_nonce<'a, 'b>(
|
||||
http_client: &mut SimpleHttp,
|
||||
directory_url: &str,
|
||||
directory: &'a mut Option<Directory>,
|
||||
nonce: &'b mut Option<String>,
|
||||
) -> Result<(&'a Directory, &'b str), Error> {
|
||||
// this let construct is a lifetime workaround:
|
||||
let _ = Self::get_directory(http_client, directory_url, directory, nonce).await?;
|
||||
let dir = directory.as_ref().unwrap(); // the above fails if it couldn't fill this option
|
||||
if nonce.is_none() {
|
||||
// this is also a lifetime issue...
|
||||
let _ = Self::get_nonce(http_client, nonce, dir.new_nonce_url()).await?;
|
||||
};
|
||||
Ok((dir, nonce.as_deref().unwrap()))
|
||||
}
|
||||
|
||||
pub async fn terms_of_service_url(&mut self) -> Result<Option<&str>, Error> {
|
||||
Ok(self.directory().await?.terms_of_service_url())
|
||||
}
|
||||
|
||||
async fn get_nonce<'a>(
|
||||
http_client: &mut SimpleHttp,
|
||||
nonce: &'a mut Option<String>,
|
||||
new_nonce_url: &str,
|
||||
) -> Result<&'a str, Error> {
|
||||
let response = Self::execute(
|
||||
http_client,
|
||||
AcmeRequest {
|
||||
url: new_nonce_url.to_owned(),
|
||||
method: "HEAD",
|
||||
content_type: "",
|
||||
body: String::new(),
|
||||
expected: 200,
|
||||
},
|
||||
nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !response.got_nonce {
|
||||
return Err(Error::InvalidApi(
|
||||
"no new nonce received from new nonce URL".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
nonce
|
||||
.as_deref()
|
||||
.ok_or_else(|| Error::Client("failed to update nonce".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// bad nonce retry count helper
|
||||
struct Retry(usize);
|
||||
|
||||
const fn retry() -> Retry {
|
||||
Retry(0)
|
||||
}
|
||||
|
||||
impl Retry {
|
||||
fn tick(&mut self) -> Result<(), Error> {
|
||||
if self.0 >= 3 {
|
||||
Err(Error::Client(format!("kept getting a badNonce error!")))
|
||||
} else {
|
||||
self.0 += 1;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
5
src/acme/mod.rs
Normal file
5
src/acme/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
mod client;
|
||||
pub use client::AcmeClient;
|
||||
|
||||
pub(crate) mod plugin;
|
||||
pub(crate) use plugin::get_acme_plugin;
|
299
src/acme/plugin.rs
Normal file
299
src/acme/plugin.rs
Normal file
@ -0,0 +1,299 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::process::Stdio;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use hyper::{Body, Request, Response};
|
||||
use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio::process::Command;
|
||||
|
||||
use proxmox_acme_rs::{Authorization, Challenge};
|
||||
|
||||
use crate::acme::AcmeClient;
|
||||
use crate::api2::types::AcmeDomain;
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::config::acme::plugin::{DnsPlugin, PluginData};
|
||||
|
||||
const PROXMOX_ACME_SH_PATH: &str = "/usr/share/proxmox-acme/proxmox-acme";
|
||||
|
||||
pub(crate) fn get_acme_plugin(
|
||||
plugin_data: &PluginData,
|
||||
name: &str,
|
||||
) -> Result<Option<Box<dyn AcmePlugin + Send + Sync + 'static>>, Error> {
|
||||
let (ty, data) = match plugin_data.get(name) {
|
||||
Some(plugin) => plugin,
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
Ok(Some(match ty.as_str() {
|
||||
"dns" => {
|
||||
let plugin: DnsPlugin = serde_json::from_value(data.clone())?;
|
||||
Box::new(plugin)
|
||||
}
|
||||
"standalone" => {
|
||||
// this one has no config
|
||||
Box::new(StandaloneServer::default())
|
||||
}
|
||||
other => bail!("missing implementation for plugin type '{}'", other),
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) trait AcmePlugin {
|
||||
/// Setup everything required to trigger the validation and return the corresponding validation
|
||||
/// URL.
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>>;
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>>;
|
||||
}
|
||||
|
||||
fn extract_challenge<'a>(
|
||||
authorization: &'a Authorization,
|
||||
ty: &str,
|
||||
) -> Result<&'a Challenge, Error> {
|
||||
authorization
|
||||
.challenges
|
||||
.iter()
|
||||
.find(|ch| ch.ty == ty)
|
||||
.ok_or_else(|| format_err!("no supported challenge type (dns-01) found"))
|
||||
}
|
||||
|
||||
async fn pipe_to_tasklog<T: AsyncRead + Unpin>(
|
||||
pipe: T,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut pipe = BufReader::new(pipe);
|
||||
let mut line = String::new();
|
||||
loop {
|
||||
line.clear();
|
||||
match pipe.read_line(&mut line).await {
|
||||
Ok(0) => return Ok(()),
|
||||
Ok(_) => task.log(line.as_str()),
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsPlugin {
|
||||
async fn action<'a>(
|
||||
&self,
|
||||
client: &mut AcmeClient,
|
||||
authorization: &'a Authorization,
|
||||
domain: &AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
action: &str,
|
||||
) -> Result<&'a str, Error> {
|
||||
let challenge = extract_challenge(authorization, "dns-01")?;
|
||||
let mut stdin_data = client
|
||||
.dns_01_txt_value(
|
||||
challenge
|
||||
.token()
|
||||
.ok_or_else(|| format_err!("missing token in challenge"))?,
|
||||
)?
|
||||
.into_bytes();
|
||||
stdin_data.push(b'\n');
|
||||
stdin_data.extend(self.data.as_bytes());
|
||||
if stdin_data.last() != Some(&b'\n') {
|
||||
stdin_data.push(b'\n');
|
||||
}
|
||||
|
||||
let mut command = Command::new("/usr/bin/setpriv");
|
||||
|
||||
#[rustfmt::skip]
|
||||
command.args(&[
|
||||
"--reuid", "nobody",
|
||||
"--regid", "nogroup",
|
||||
"--clear-groups",
|
||||
"--reset-env",
|
||||
"--",
|
||||
"/bin/bash",
|
||||
PROXMOX_ACME_SH_PATH,
|
||||
action,
|
||||
&self.core.api,
|
||||
domain.alias.as_deref().unwrap_or(&domain.domain),
|
||||
]);
|
||||
|
||||
// We could use 1 socketpair, but tokio wraps them all in `File` internally causing `close`
|
||||
// to be called separately on all of them without exception, so we need 3 pipes :-(
|
||||
|
||||
let mut child = command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
let mut stdin = child.stdin.take().expect("Stdio::piped()");
|
||||
let stdout = child.stdout.take().expect("Stdio::piped() failed?");
|
||||
let stdout = pipe_to_tasklog(stdout, Arc::clone(&task));
|
||||
let stderr = child.stderr.take().expect("Stdio::piped() failed?");
|
||||
let stderr = pipe_to_tasklog(stderr, Arc::clone(&task));
|
||||
let stdin = async move {
|
||||
stdin.write_all(&stdin_data).await?;
|
||||
stdin.flush().await?;
|
||||
Ok::<_, std::io::Error>(())
|
||||
};
|
||||
match futures::try_join!(stdin, stdout, stderr) {
|
||||
Ok(((), (), ())) => (),
|
||||
Err(err) => {
|
||||
if let Err(err) = child.kill().await {
|
||||
task.log(format!(
|
||||
"failed to kill '{} {}' command: {}",
|
||||
PROXMOX_ACME_SH_PATH, action, err
|
||||
));
|
||||
}
|
||||
bail!("'{}' failed: {}", PROXMOX_ACME_SH_PATH, err);
|
||||
}
|
||||
}
|
||||
|
||||
let status = child.wait().await?;
|
||||
if !status.success() {
|
||||
bail!(
|
||||
"'{} {}' exited with error ({})",
|
||||
PROXMOX_ACME_SH_PATH,
|
||||
action,
|
||||
status.code().unwrap_or(-1)
|
||||
);
|
||||
}
|
||||
|
||||
Ok(&challenge.url)
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmePlugin for DnsPlugin {
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||
Box::pin(self.action(client, authorization, domain, task, "setup"))
|
||||
}
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||
Box::pin(async move {
|
||||
self.action(client, authorization, domain, task, "teardown")
|
||||
.await
|
||||
.map(drop)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct StandaloneServer {
|
||||
abort_handle: Option<futures::future::AbortHandle>,
|
||||
}
|
||||
|
||||
// In case the "order_certificates" future gets dropped between setup & teardown, let's also cancel
|
||||
// the HTTP listener on Drop:
|
||||
impl Drop for StandaloneServer {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
impl StandaloneServer {
|
||||
fn stop(&mut self) {
|
||||
if let Some(abort) = self.abort_handle.take() {
|
||||
abort.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn standalone_respond(
|
||||
req: Request<Body>,
|
||||
path: Arc<String>,
|
||||
key_auth: Arc<String>,
|
||||
) -> Result<Response<Body>, hyper::Error> {
|
||||
if req.method() == hyper::Method::GET && req.uri().path() == path.as_str() {
|
||||
Ok(Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body(key_auth.as_bytes().to_vec().into())
|
||||
.unwrap())
|
||||
} else {
|
||||
Ok(Response::builder()
|
||||
.status(http::StatusCode::NOT_FOUND)
|
||||
.body("Not found.".into())
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmePlugin for StandaloneServer {
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
_domain: &'d AcmeDomain,
|
||||
_task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||
use hyper::server::conn::AddrIncoming;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
|
||||
Box::pin(async move {
|
||||
self.stop();
|
||||
|
||||
let challenge = extract_challenge(authorization, "http-01")?;
|
||||
let token = challenge
|
||||
.token()
|
||||
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
||||
let key_auth = Arc::new(client.key_authorization(&token)?);
|
||||
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
||||
|
||||
let service = make_service_fn(move |_| {
|
||||
let path = Arc::clone(&path);
|
||||
let key_auth = Arc::clone(&key_auth);
|
||||
async move {
|
||||
Ok::<_, hyper::Error>(service_fn(move |request| {
|
||||
standalone_respond(request, Arc::clone(&path), Arc::clone(&key_auth))
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// `[::]:80` first, then `*:80`
|
||||
let incoming = AddrIncoming::bind(&(([0u16; 8], 80).into()))
|
||||
.or_else(|_| AddrIncoming::bind(&(([0u8; 4], 80).into())))?;
|
||||
|
||||
let server = hyper::Server::builder(incoming).serve(service);
|
||||
|
||||
let (future, abort) = futures::future::abortable(server);
|
||||
self.abort_handle = Some(abort);
|
||||
tokio::spawn(future);
|
||||
|
||||
Ok(challenge.url.as_str())
|
||||
})
|
||||
}
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
_client: &'b mut AcmeClient,
|
||||
_authorization: &'c Authorization,
|
||||
_domain: &'d AcmeDomain,
|
||||
_task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||
Box::pin(async move {
|
||||
if let Some(abort) = self.abort_handle.take() {
|
||||
abort.abort();
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
@ -18,8 +18,7 @@ use crate::api2::types::*;
|
||||
description: "User configuration (without password).",
|
||||
properties: {
|
||||
realm: {
|
||||
description: "Realm ID.",
|
||||
type: String,
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
|
@ -219,6 +219,48 @@ pub fn list_groups(
|
||||
Ok(group_info)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
|
||||
true),
|
||||
},
|
||||
)]
|
||||
/// Delete backup group including all snapshots.
|
||||
pub fn delete_group(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
|
||||
datastore.remove_backup_group(&group)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -1140,7 +1182,7 @@ pub fn download_file_decoded(
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
@ -1155,7 +1197,7 @@ pub fn download_file_decoded(
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
@ -1722,6 +1764,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
"groups",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_GROUPS)
|
||||
.delete(&API_METHOD_DELETE_GROUP)
|
||||
),
|
||||
(
|
||||
"notes",
|
||||
|
@ -4,6 +4,7 @@ use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod access;
|
||||
pub mod acme;
|
||||
pub mod datastore;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
@ -16,6 +17,7 @@ pub mod tape_backup_job;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("access", &access::ROUTER),
|
||||
("acme", &acme::ROUTER),
|
||||
("changer", &changer::ROUTER),
|
||||
("datastore", &datastore::ROUTER),
|
||||
("drive", &drive::ROUTER),
|
||||
|
727
src/api2/config/acme.rs
Normal file
727
src/api2/config/acme.rs
Normal file
@ -0,0 +1,727 @@
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::Updatable;
|
||||
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||
use proxmox::http_bail;
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
||||
use proxmox_acme_rs::Account;
|
||||
|
||||
use crate::acme::AcmeClient;
|
||||
use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, Authid, KnownAcmeDirectory};
|
||||
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||
use crate::config::acme::plugin::{
|
||||
DnsPlugin, DnsPluginCore, DnsPluginCoreUpdater, PLUGIN_ID_SCHEMA,
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools::ControlFlow;
|
||||
|
||||
pub(crate) const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"account",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_ACCOUNTS)
|
||||
.post(&API_METHOD_REGISTER_ACCOUNT)
|
||||
.match_all("name", &ACCOUNT_ITEM_ROUTER),
|
||||
),
|
||||
(
|
||||
"challenge-schema",
|
||||
&Router::new().get(&API_METHOD_GET_CHALLENGE_SCHEMA),
|
||||
),
|
||||
(
|
||||
"directories",
|
||||
&Router::new().get(&API_METHOD_GET_DIRECTORIES),
|
||||
),
|
||||
(
|
||||
"plugins",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_PLUGINS)
|
||||
.post(&API_METHOD_ADD_PLUGIN)
|
||||
.match_all("id", &PLUGIN_ITEM_ROUTER),
|
||||
),
|
||||
("tos", &Router::new().get(&API_METHOD_GET_TOS)),
|
||||
];
|
||||
|
||||
const ACCOUNT_ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_ACCOUNT)
|
||||
.put(&API_METHOD_UPDATE_ACCOUNT)
|
||||
.delete(&API_METHOD_DEACTIVATE_ACCOUNT);
|
||||
|
||||
const PLUGIN_ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_PLUGIN)
|
||||
.put(&API_METHOD_UPDATE_PLUGIN)
|
||||
.delete(&API_METHOD_DELETE_PLUGIN);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
},
|
||||
)]
|
||||
/// An ACME Account entry.
|
||||
///
|
||||
/// Currently only contains a 'name' property.
|
||||
#[derive(Serialize)]
|
||||
pub struct AccountEntry {
|
||||
name: AcmeAccountName,
|
||||
}
|
||||
|
||||
#[api(
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
returns: {
|
||||
type: Array,
|
||||
items: { type: AccountEntry },
|
||||
description: "List of ACME accounts.",
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// List ACME accounts.
|
||||
pub fn list_accounts() -> Result<Vec<AccountEntry>, Error> {
|
||||
let mut entries = Vec::new();
|
||||
crate::config::acme::foreach_acme_account(|name| {
|
||||
entries.push(AccountEntry { name });
|
||||
ControlFlow::Continue(())
|
||||
})?;
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
account: { type: Object, properties: {}, additional_properties: true },
|
||||
tos: {
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// ACME Account information.
|
||||
///
|
||||
/// This is what we return via the API.
|
||||
#[derive(Serialize)]
|
||||
pub struct AccountInfo {
|
||||
/// Raw account data.
|
||||
account: AcmeAccountData,
|
||||
|
||||
/// The ACME directory URL the account was created at.
|
||||
directory: String,
|
||||
|
||||
/// The account's own URL within the ACME directory.
|
||||
location: String,
|
||||
|
||||
/// The ToS URL, if the user agreed to one.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
tos: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
returns: { type: AccountInfo },
|
||||
protected: true,
|
||||
)]
|
||||
/// Return existing ACME account information.
|
||||
pub async fn get_account(name: AcmeAccountName) -> Result<AccountInfo, Error> {
|
||||
let client = AcmeClient::load(&name).await?;
|
||||
let account = client.account()?;
|
||||
Ok(AccountInfo {
|
||||
location: account.location.clone(),
|
||||
tos: client.tos().map(str::to_owned),
|
||||
directory: client.directory_url().to_owned(),
|
||||
account: AcmeAccountData {
|
||||
only_return_existing: false, // don't actually write this out in case it's set
|
||||
..account.data.clone()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn account_contact_from_string(s: &str) -> Vec<String> {
|
||||
s.split(&[' ', ';', ',', '\0'][..])
|
||||
.map(|s| format!("mailto:{}", s))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
type: AcmeAccountName,
|
||||
optional: true,
|
||||
},
|
||||
contact: {
|
||||
description: "List of email addresses.",
|
||||
},
|
||||
tos_url: {
|
||||
description: "URL of CA TermsOfService - setting this indicates agreement.",
|
||||
optional: true,
|
||||
},
|
||||
directory: {
|
||||
type: String,
|
||||
description: "The ACME Directory.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Register an ACME account.
|
||||
fn register_account(
|
||||
name: Option<AcmeAccountName>,
|
||||
// Todo: email & email-list schema
|
||||
contact: String,
|
||||
tos_url: Option<String>,
|
||||
directory: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let name = name.unwrap_or_else(|| unsafe {
|
||||
AcmeAccountName::from_string_unchecked("default".to_string())
|
||||
});
|
||||
|
||||
if Path::new(&crate::config::acme::account_path(&name)).exists() {
|
||||
http_bail!(BAD_REQUEST, "account {} already exists", name);
|
||||
}
|
||||
|
||||
let directory = directory.unwrap_or_else(|| {
|
||||
crate::config::acme::DEFAULT_ACME_DIRECTORY_ENTRY
|
||||
.url
|
||||
.to_owned()
|
||||
});
|
||||
|
||||
WorkerTask::spawn(
|
||||
"acme-register",
|
||||
Some(name.to_string()),
|
||||
auth_id,
|
||||
true,
|
||||
move |worker| async move {
|
||||
let mut client = AcmeClient::new(directory);
|
||||
|
||||
worker.log(format!("Registering ACME account '{}'...", &name));
|
||||
|
||||
let account =
|
||||
do_register_account(&mut client, &name, tos_url.is_some(), contact, None).await?;
|
||||
|
||||
worker.log(format!(
|
||||
"Registration successful, account URL: {}",
|
||||
account.location
|
||||
));
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn do_register_account<'a>(
|
||||
client: &'a mut AcmeClient,
|
||||
name: &AcmeAccountName,
|
||||
agree_to_tos: bool,
|
||||
contact: String,
|
||||
rsa_bits: Option<u32>,
|
||||
) -> Result<&'a Account, Error> {
|
||||
let contact = account_contact_from_string(&contact);
|
||||
Ok(client
|
||||
.new_account(name, agree_to_tos, contact, rsa_bits)
|
||||
.await?)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
contact: {
|
||||
description: "List of email addresses.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Update an ACME account.
|
||||
pub fn update_account(
|
||||
name: AcmeAccountName,
|
||||
// Todo: email & email-list schema
|
||||
contact: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
WorkerTask::spawn(
|
||||
"acme-update",
|
||||
Some(name.to_string()),
|
||||
auth_id,
|
||||
true,
|
||||
move |_worker| async move {
|
||||
let data = match contact {
|
||||
Some(data) => json!({
|
||||
"contact": account_contact_from_string(&data),
|
||||
}),
|
||||
None => json!({}),
|
||||
};
|
||||
|
||||
AcmeClient::load(&name).await?.update_account(&data).await?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
force: {
|
||||
description:
|
||||
"Delete account data even if the server refuses to deactivate the account.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Deactivate an ACME account.
|
||||
pub fn deactivate_account(
|
||||
name: AcmeAccountName,
|
||||
force: bool,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
WorkerTask::spawn(
|
||||
"acme-deactivate",
|
||||
Some(name.to_string()),
|
||||
auth_id,
|
||||
true,
|
||||
move |worker| async move {
|
||||
match AcmeClient::load(&name)
|
||||
.await?
|
||||
.update_account(&json!({"status": "deactivated"}))
|
||||
.await
|
||||
{
|
||||
Ok(_account) => (),
|
||||
Err(err) if !force => return Err(err),
|
||||
Err(err) => {
|
||||
worker.warn(format!(
|
||||
"error deactivating account {}, proceedeing anyway - {}",
|
||||
name, err,
|
||||
));
|
||||
}
|
||||
}
|
||||
crate::config::acme::mark_account_deactivated(&name)?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
directory: {
|
||||
type: String,
|
||||
description: "The ACME Directory.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
returns: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "The ACME Directory's ToS URL, if any.",
|
||||
},
|
||||
)]
|
||||
/// Get the Terms of Service URL for an ACME directory.
|
||||
async fn get_tos(directory: Option<String>) -> Result<Option<String>, Error> {
|
||||
let directory = directory.unwrap_or_else(|| {
|
||||
crate::config::acme::DEFAULT_ACME_DIRECTORY_ENTRY
|
||||
.url
|
||||
.to_owned()
|
||||
});
|
||||
Ok(AcmeClient::new(directory)
|
||||
.terms_of_service_url()
|
||||
.await?
|
||||
.map(str::to_owned))
|
||||
}
|
||||
|
||||
#[api(
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
returns: {
|
||||
description: "List of known ACME directories.",
|
||||
type: Array,
|
||||
items: { type: KnownAcmeDirectory },
|
||||
},
|
||||
)]
|
||||
/// Get named known ACME directory endpoints.
|
||||
fn get_directories() -> Result<&'static [KnownAcmeDirectory], Error> {
|
||||
Ok(crate::config::acme::KNOWN_ACME_DIRECTORIES)
|
||||
}
|
||||
|
||||
/// Wrapper for efficient Arc use when returning the ACME challenge-plugin schema for serializing
|
||||
struct ChallengeSchemaWrapper {
|
||||
inner: Arc<Vec<AcmeChallengeSchema>>,
|
||||
}
|
||||
|
||||
impl Serialize for ChallengeSchemaWrapper {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.inner.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cached_challenge_schemas() -> Result<ChallengeSchemaWrapper, Error> {
|
||||
lazy_static! {
|
||||
static ref CACHE: Mutex<Option<(Arc<Vec<AcmeChallengeSchema>>, SystemTime)>> =
|
||||
Mutex::new(None);
|
||||
}
|
||||
|
||||
// the actual loading code
|
||||
let mut last = CACHE.lock().unwrap();
|
||||
|
||||
let actual_mtime = fs::metadata(crate::config::acme::ACME_DNS_SCHEMA_FN)?.modified()?;
|
||||
|
||||
let schema = match &*last {
|
||||
Some((schema, cached_mtime)) if *cached_mtime >= actual_mtime => schema.clone(),
|
||||
_ => {
|
||||
let new_schema = Arc::new(crate::config::acme::load_dns_challenge_schema()?);
|
||||
*last = Some((Arc::clone(&new_schema), actual_mtime));
|
||||
new_schema
|
||||
}
|
||||
};
|
||||
|
||||
Ok(ChallengeSchemaWrapper { inner: schema })
|
||||
}
|
||||
|
||||
#[api(
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
returns: {
|
||||
description: "ACME Challenge Plugin Shema.",
|
||||
type: Array,
|
||||
items: { type: AcmeChallengeSchema },
|
||||
},
|
||||
)]
|
||||
/// Get named known ACME directory endpoints.
|
||||
fn get_challenge_schema() -> Result<ChallengeSchemaWrapper, Error> {
|
||||
get_cached_challenge_schemas()
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The API's format is inherited from PVE/PMG:
|
||||
pub struct PluginConfig {
|
||||
/// Plugin ID.
|
||||
plugin: String,
|
||||
|
||||
/// Plugin type.
|
||||
#[serde(rename = "type")]
|
||||
ty: String,
|
||||
|
||||
/// DNS Api name.
|
||||
api: Option<String>,
|
||||
|
||||
/// Plugin configuration data.
|
||||
data: Option<String>,
|
||||
|
||||
/// Extra delay in seconds to wait before requesting validation.
|
||||
///
|
||||
/// Allows to cope with long TTL of DNS records.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
validation_delay: Option<u32>,
|
||||
|
||||
/// Flag to disable the config.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
disable: Option<bool>,
|
||||
}
|
||||
|
||||
// See PMG/PVE's $modify_cfg_for_api sub
|
||||
fn modify_cfg_for_api(id: &str, ty: &str, data: &Value) -> PluginConfig {
|
||||
let mut entry = data.clone();
|
||||
|
||||
let obj = entry.as_object_mut().unwrap();
|
||||
obj.remove("id");
|
||||
obj.insert("plugin".to_string(), Value::String(id.to_owned()));
|
||||
obj.insert("type".to_string(), Value::String(ty.to_owned()));
|
||||
|
||||
// FIXME: This needs to go once the `Updater` is fixed.
|
||||
// None of these should be able to fail unless the user changed the files by hand, in which
|
||||
// case we leave the unmodified string in the Value for now. This will be handled with an error
|
||||
// later.
|
||||
if let Some(Value::String(ref mut data)) = obj.get_mut("data") {
|
||||
if let Ok(new) = base64::decode_config(&data, base64::URL_SAFE_NO_PAD) {
|
||||
if let Ok(utf8) = String::from_utf8(new) {
|
||||
*data = utf8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PVE/PMG do this explicitly for ACME plugins...
|
||||
// obj.insert("digest".to_string(), Value::String(digest.clone()));
|
||||
|
||||
serde_json::from_value(entry).unwrap_or_else(|_| PluginConfig {
|
||||
plugin: "*Error*".to_string(),
|
||||
ty: "*Error*".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
returns: {
|
||||
type: Array,
|
||||
description: "List of ACME plugin configurations.",
|
||||
items: { type: PluginConfig },
|
||||
},
|
||||
)]
|
||||
/// List ACME challenge plugins.
|
||||
pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>, Error> {
|
||||
use crate::config::acme::plugin;
|
||||
|
||||
let (plugins, digest) = plugin::config()?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(plugins
|
||||
.iter()
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
|
||||
.collect())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
returns: { type: PluginConfig },
|
||||
)]
|
||||
/// List ACME challenge plugins.
|
||||
pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<PluginConfig, Error> {
|
||||
use crate::config::acme::plugin;
|
||||
|
||||
let (plugins, digest) = plugin::config()?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
match plugins.get(&id) {
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
}
|
||||
|
||||
// Currently we only have "the" standalone plugin and DNS plugins so we can just flatten a
|
||||
// DnsPluginUpdater:
|
||||
//
|
||||
// FIXME: The 'id' parameter should not be "optional" in the schema.
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
type: {
|
||||
type: String,
|
||||
description: "The ACME challenge plugin type.",
|
||||
},
|
||||
core: {
|
||||
type: DnsPluginCoreUpdater,
|
||||
flatten: true,
|
||||
},
|
||||
data: {
|
||||
type: String,
|
||||
// This is different in the API!
|
||||
description: "DNS plugin data (base64 encoded with padding).",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Add ACME plugin configuration.
|
||||
pub fn add_plugin(r#type: String, core: DnsPluginCoreUpdater, data: String) -> Result<(), Error> {
|
||||
use crate::config::acme::plugin;
|
||||
|
||||
// Currently we only support DNS plugins and the standalone plugin is "fixed":
|
||||
if r#type != "dns" {
|
||||
bail!("invalid ACME plugin type: {:?}", r#type);
|
||||
}
|
||||
|
||||
let data = String::from_utf8(base64::decode(&data)?)
|
||||
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||
//core.api_fixup()?;
|
||||
|
||||
// FIXME: Solve the Updater with non-optional fields thing...
|
||||
let id = core
|
||||
.id
|
||||
.clone()
|
||||
.ok_or_else(|| format_err!("missing required 'id' parameter"))?;
|
||||
|
||||
let _lock = plugin::lock()?;
|
||||
|
||||
let (mut plugins, _digest) = plugin::config()?;
|
||||
if plugins.contains_key(&id) {
|
||||
bail!("ACME plugin ID {:?} already exists", id);
|
||||
}
|
||||
|
||||
let plugin = serde_json::to_value(DnsPlugin {
|
||||
core: DnsPluginCore::try_build_from(core)?,
|
||||
data,
|
||||
})?;
|
||||
|
||||
plugins.insert(id, r#type, plugin);
|
||||
|
||||
plugin::save_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Delete an ACME plugin configuration.
|
||||
pub fn delete_plugin(id: String) -> Result<(), Error> {
|
||||
use crate::config::acme::plugin;
|
||||
|
||||
let _lock = plugin::lock()?;
|
||||
|
||||
let (mut plugins, _digest) = plugin::config()?;
|
||||
if plugins.remove(&id).is_none() {
|
||||
http_bail!(NOT_FOUND, "no such plugin");
|
||||
}
|
||||
plugin::save_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
core_update: {
|
||||
type: DnsPluginCoreUpdater,
|
||||
flatten: true,
|
||||
},
|
||||
data: {
|
||||
type: String,
|
||||
optional: true,
|
||||
// This is different in the API!
|
||||
description: "DNS plugin data (base64 encoded with padding).",
|
||||
},
|
||||
digest: {
|
||||
description: "Digest to protect against concurrent updates",
|
||||
optional: true,
|
||||
},
|
||||
delete: {
|
||||
description: "Options to remove from the configuration",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Update an ACME plugin configuration.
|
||||
pub fn update_plugin(
|
||||
core_update: DnsPluginCoreUpdater,
|
||||
data: Option<String>,
|
||||
delete: Option<String>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
use crate::config::acme::plugin;
|
||||
|
||||
let data = data
|
||||
.as_deref()
|
||||
.map(base64::decode)
|
||||
.transpose()?
|
||||
.map(String::from_utf8)
|
||||
.transpose()
|
||||
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||
//core_update.api_fixup()?;
|
||||
|
||||
// unwrap: the id is matched by this method's API path
|
||||
let id = core_update.id.clone().unwrap();
|
||||
|
||||
let delete: Vec<&str> = delete
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.split(&[' ', ',', ';', '\0'][..])
|
||||
.collect();
|
||||
|
||||
let _lock = plugin::lock()?;
|
||||
|
||||
let (mut plugins, expected_digest) = plugin::config()?;
|
||||
|
||||
if let Some(digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(&digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match plugins.get_mut(&id) {
|
||||
Some((ty, ref mut entry)) => {
|
||||
if ty != "dns" {
|
||||
bail!("cannot update plugin of type {:?}", ty);
|
||||
}
|
||||
|
||||
let mut plugin: DnsPlugin = serde_json::from_value(entry.clone())?;
|
||||
plugin.core.update_from(core_update, &delete)?;
|
||||
if let Some(data) = data {
|
||||
plugin.data = data;
|
||||
}
|
||||
*entry = serde_json::to_value(plugin)?;
|
||||
}
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
|
||||
plugin::save_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -5,15 +5,15 @@ use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::schema::parse_property_string;
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
use crate::server::jobstate;
|
||||
use crate::server::{jobstate, WorkerTask};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -50,6 +50,26 @@ pub fn list_datastores(
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
}
|
||||
|
||||
pub(crate) fn do_create_datastore(
|
||||
_lock: std::fs::File,
|
||||
mut config: SectionConfigData,
|
||||
datastore: DataStoreConfig,
|
||||
worker: Option<&dyn crate::task::TaskState>,
|
||||
) -> Result<(), Error> {
|
||||
let path: PathBuf = datastore.path.clone().into();
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?;
|
||||
|
||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
jobstate::create_state_file("prune", &datastore.name)?;
|
||||
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
|
||||
// but this need support for match inside const fn
|
||||
@ -116,31 +136,30 @@ pub fn list_datastores(
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
pub fn create_datastore(
|
||||
param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
let lock = datastore::lock_config()?;
|
||||
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
let path: PathBuf = datastore.path.clone().into();
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid)?;
|
||||
|
||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
jobstate::create_state_file("prune", &datastore.name)?;
|
||||
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
WorkerTask::new_thread(
|
||||
"create-datastore",
|
||||
Some(datastore.name.to_string()),
|
||||
auth_id,
|
||||
false,
|
||||
move |worker| do_create_datastore(lock, config, datastore, Some(&worker)),
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -296,7 +315,7 @@ pub fn update_datastore(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
let _lock = datastore::lock_config()?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
@ -375,11 +394,11 @@ pub fn update_datastore(
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
if gc_schedule_changed {
|
||||
jobstate::create_state_file("garbage_collection", &name)?;
|
||||
jobstate::update_job_last_run_time("garbage_collection", &name)?;
|
||||
}
|
||||
|
||||
if prune_schedule_changed {
|
||||
jobstate::create_state_file("prune", &name)?;
|
||||
jobstate::update_job_last_run_time("prune", &name)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -403,9 +422,9 @@ pub fn update_datastore(
|
||||
},
|
||||
)]
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
pub async fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
let _lock = datastore::lock_config()?;
|
||||
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
@ -425,6 +444,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
||||
let _ = jobstate::remove_state_file("prune", &name);
|
||||
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
||||
|
||||
crate::server::notify_datastore_removed().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -333,6 +333,7 @@ pub fn update_sync_job(
|
||||
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
|
||||
if let Some(owner) = owner { data.owner = Some(owner); }
|
||||
|
||||
let schedule_changed = data.schedule != schedule;
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
|
||||
|
||||
@ -344,6 +345,10 @@ pub fn update_sync_job(
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
if schedule_changed {
|
||||
crate::server::jobstate::update_job_last_run_time("syncjob", &id)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -266,6 +266,7 @@ pub fn update_tape_backup_job(
|
||||
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
||||
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
||||
|
||||
let schedule_changed = data.schedule != schedule;
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
|
||||
if let Some(comment) = comment {
|
||||
@ -281,6 +282,10 @@ pub fn update_tape_backup_job(
|
||||
|
||||
config::tape_job::save_config(&config)?;
|
||||
|
||||
if schedule_changed {
|
||||
crate::server::jobstate::update_job_last_run_time("tape-backup-job", &id)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -274,12 +274,17 @@ pub fn update_verification_job(
|
||||
|
||||
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
|
||||
if outdated_after.is_some() { data.outdated_after = outdated_after; }
|
||||
let schedule_changed = data.schedule != schedule;
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
|
||||
config.set_data(&id, "verification", &data)?;
|
||||
|
||||
verify::save_config(&config)?;
|
||||
|
||||
if schedule_changed {
|
||||
crate::server::jobstate::update_job_last_run_time("verificationjob", &id)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ use proxmox::api::{
|
||||
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||
};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::tools::websocket::WebSocket;
|
||||
use proxmox_http::websocket::WebSocket;
|
||||
use proxmox::{identity, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
@ -27,6 +27,8 @@ use crate::tools;
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
|
||||
pub mod apt;
|
||||
pub mod certificates;
|
||||
pub mod config;
|
||||
pub mod disks;
|
||||
pub mod dns;
|
||||
pub mod network;
|
||||
@ -314,6 +316,8 @@ fn upgrade_to_websocket(
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("apt", &apt::ROUTER),
|
||||
("certificates", &certificates::ROUTER),
|
||||
("config", &config::ROUTER),
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
|
@ -5,10 +5,17 @@ use std::collections::HashMap;
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
|
||||
use proxmox_http::ProxyConfig;
|
||||
|
||||
use crate::config::node;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools::{apt, http::SimpleHttp, subscription};
|
||||
|
||||
use crate::tools::{
|
||||
apt,
|
||||
pbs_simple_http,
|
||||
subscription,
|
||||
};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||
|
||||
@ -46,10 +53,38 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||
Ok(json!(cache.package_status))
|
||||
}
|
||||
|
||||
pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> {
|
||||
|
||||
const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE
|
||||
|
||||
if let Some(proxy_config) = proxy_config {
|
||||
let proxy = proxy_config.to_proxy_string()?;
|
||||
let data = format!("Acquire::http::Proxy \"{}\";\n", proxy);
|
||||
replace_file(PROXY_CFG_FN, data.as_bytes(), CreateOptions::new())
|
||||
} else {
|
||||
match std::fs::remove_file(PROXY_CFG_FN) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
|
||||
Err(err) => bail!("failed to remove proxy config '{}' - {}", PROXY_CFG_FN, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
|
||||
let proxy_config = if let Ok((node_config, _digest)) = node::config() {
|
||||
node_config.http_proxy()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
update_apt_proxy_config(proxy_config.as_ref())?;
|
||||
|
||||
Ok(proxy_config)
|
||||
}
|
||||
|
||||
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||
if !quiet { worker.log("starting apt-get update") }
|
||||
|
||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||
read_and_update_proxy_config()?;
|
||||
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
command.arg("update");
|
||||
@ -152,6 +187,7 @@ pub fn apt_update_database(
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
@ -194,7 +230,8 @@ fn apt_get_changelog(
|
||||
bail!("Package '{}' not found", name);
|
||||
}
|
||||
|
||||
let mut client = SimpleHttp::new(None); // TODO: pass proxy_config
|
||||
let proxy_config = read_and_update_proxy_config()?;
|
||||
let mut client = pbs_simple_http(proxy_config);
|
||||
|
||||
let changelog_url = &pkg_info[0].change_log_url;
|
||||
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
||||
|
579
src/api2/node/certificates.rs
Normal file
579
src/api2/node/certificates.rs
Normal file
@ -0,0 +1,579 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use openssl::pkey::PKey;
|
||||
use openssl::x509::X509;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use crate::acme::AcmeClient;
|
||||
use crate::api2::types::Authid;
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
use crate::api2::types::AcmeDomain;
|
||||
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||
use crate::config::node::NodeConfig;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools::cert;
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("acme", &ACME_ROUTER),
|
||||
(
|
||||
"custom",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_UPLOAD_CUSTOM_CERTIFICATE)
|
||||
.delete(&API_METHOD_DELETE_CUSTOM_CERTIFICATE),
|
||||
),
|
||||
("info", &Router::new().get(&API_METHOD_GET_INFO)),
|
||||
];
|
||||
|
||||
const ACME_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(ACME_SUBDIRS))
|
||||
.subdirs(ACME_SUBDIRS);
|
||||
|
||||
const ACME_SUBDIRS: SubdirMap = &[(
|
||||
"certificate",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_NEW_ACME_CERT)
|
||||
.put(&API_METHOD_RENEW_ACME_CERT),
|
||||
)];
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
san: {
|
||||
type: Array,
|
||||
items: {
|
||||
description: "A SubjectAlternateName entry.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Certificate information.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct CertificateInfo {
|
||||
/// Certificate file name.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
filename: Option<String>,
|
||||
|
||||
/// Certificate subject name.
|
||||
subject: String,
|
||||
|
||||
/// List of certificate's SubjectAlternativeName entries.
|
||||
san: Vec<String>,
|
||||
|
||||
/// Certificate issuer name.
|
||||
issuer: String,
|
||||
|
||||
/// Certificate's notBefore timestamp (UNIX epoch).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
notbefore: Option<i64>,
|
||||
|
||||
/// Certificate's notAfter timestamp (UNIX epoch).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
notafter: Option<i64>,
|
||||
|
||||
/// Certificate in PEM format.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pem: Option<String>,
|
||||
|
||||
/// Certificate's public key algorithm.
|
||||
public_key_type: String,
|
||||
|
||||
/// Certificate's public key size if available.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
public_key_bits: Option<u32>,
|
||||
|
||||
/// The SSL Fingerprint.
|
||||
fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
impl TryFrom<&cert::CertInfo> for CertificateInfo {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(info: &cert::CertInfo) -> Result<Self, Self::Error> {
|
||||
let pubkey = info.public_key()?;
|
||||
|
||||
Ok(Self {
|
||||
filename: None,
|
||||
subject: info.subject_name()?,
|
||||
san: info
|
||||
.subject_alt_names()
|
||||
.map(|san| {
|
||||
san.into_iter()
|
||||
// FIXME: Support `.ipaddress()`?
|
||||
.filter_map(|name| name.dnsname().map(str::to_owned))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
issuer: info.issuer_name()?,
|
||||
notbefore: info.not_before_unix().ok(),
|
||||
notafter: info.not_after_unix().ok(),
|
||||
pem: None,
|
||||
public_key_type: openssl::nid::Nid::from_raw(pubkey.id().as_raw())
|
||||
.long_name()
|
||||
.unwrap_or("<unsupported key type>")
|
||||
.to_owned(),
|
||||
public_key_bits: Some(pubkey.bits()),
|
||||
fingerprint: Some(info.fingerprint()?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn get_certificate_pem() -> Result<String, Error> {
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||
String::from_utf8(cert_pem)
|
||||
.map_err(|_| format_err!("certificate in {:?} is not a valid PEM file", cert_path))
|
||||
}
|
||||
|
||||
// to deduplicate error messages
|
||||
fn pem_to_cert_info(pem: &[u8]) -> Result<cert::CertInfo, Error> {
|
||||
cert::CertInfo::from_pem(pem)
|
||||
.map_err(|err| format_err!("error loading proxy certificate: {}", err))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
returns: {
|
||||
type: Array,
|
||||
items: { type: CertificateInfo },
|
||||
description: "List of certificate infos.",
|
||||
},
|
||||
)]
|
||||
/// Get certificate info.
|
||||
pub fn get_info() -> Result<Vec<CertificateInfo>, Error> {
|
||||
let cert_pem = get_certificate_pem()?;
|
||||
let cert = pem_to_cert_info(cert_pem.as_bytes())?;
|
||||
|
||||
Ok(vec![CertificateInfo {
|
||||
filename: Some("proxy.pem".to_string()), // we only have the one
|
||||
pem: Some(cert_pem),
|
||||
..CertificateInfo::try_from(&cert)?
|
||||
}])
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
certificates: { description: "PEM encoded certificate (chain)." },
|
||||
key: { description: "PEM encoded private key." },
|
||||
// FIXME: widget-toolkit should have an option to disable using these 2 parameters...
|
||||
restart: {
|
||||
description: "UI compatibility parameter, ignored",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
force: {
|
||||
description: "Force replacement of existing files.",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
returns: {
|
||||
type: Array,
|
||||
items: { type: CertificateInfo },
|
||||
description: "List of certificate infos.",
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Upload a custom certificate.
|
||||
pub async fn upload_custom_certificate(
|
||||
certificates: String,
|
||||
key: String,
|
||||
) -> Result<Vec<CertificateInfo>, Error> {
|
||||
let certificates = X509::stack_from_pem(certificates.as_bytes())
|
||||
.map_err(|err| format_err!("failed to decode certificate chain: {}", err))?;
|
||||
let key = PKey::private_key_from_pem(key.as_bytes())
|
||||
.map_err(|err| format_err!("failed to parse private key: {}", err))?;
|
||||
|
||||
let certificates = certificates
|
||||
.into_iter()
|
||||
.try_fold(Vec::<u8>::new(), |mut stack, cert| -> Result<_, Error> {
|
||||
if !stack.is_empty() {
|
||||
stack.push(b'\n');
|
||||
}
|
||||
stack.extend(cert.to_pem()?);
|
||||
Ok(stack)
|
||||
})
|
||||
.map_err(|err| format_err!("error formatting certificate chain as PEM: {}", err))?;
|
||||
|
||||
let key = key.private_key_to_pem_pkcs8()?;
|
||||
|
||||
crate::config::set_proxy_certificate(&certificates, &key)?;
|
||||
crate::server::reload_proxy_certificate().await?;
|
||||
|
||||
get_info()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
restart: {
|
||||
description: "UI compatibility parameter, ignored",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Delete the current certificate and regenerate a self signed one.
|
||||
pub async fn delete_custom_certificate() -> Result<(), Error> {
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
// Here we fail since if this fails nothing else breaks anyway
|
||||
std::fs::remove_file(&cert_path)
|
||||
.map_err(|err| format_err!("failed to unlink {:?} - {}", cert_path, err))?;
|
||||
|
||||
let key_path = configdir!("/proxy.key");
|
||||
if let Err(err) = std::fs::remove_file(&key_path) {
|
||||
// Here we just log since the certificate is already gone and we'd rather try to generate
|
||||
// the self-signed certificate even if this fails:
|
||||
log::error!(
|
||||
"failed to remove certificate private key {:?} - {}",
|
||||
key_path,
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
crate::config::update_self_signed_cert(true)?;
|
||||
crate::server::reload_proxy_certificate().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct OrderedCertificate {
|
||||
certificate: hyper::body::Bytes,
|
||||
private_key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
async fn order_certificate(
|
||||
worker: Arc<WorkerTask>,
|
||||
node_config: &NodeConfig,
|
||||
) -> Result<Option<OrderedCertificate>, Error> {
|
||||
use proxmox_acme_rs::authorization::Status;
|
||||
use proxmox_acme_rs::order::Identifier;
|
||||
|
||||
let domains = node_config.acme_domains().try_fold(
|
||||
Vec::<AcmeDomain>::new(),
|
||||
|mut acc, domain| -> Result<_, Error> {
|
||||
let mut domain = domain?;
|
||||
domain.domain.make_ascii_lowercase();
|
||||
if let Some(alias) = &mut domain.alias {
|
||||
alias.make_ascii_lowercase();
|
||||
}
|
||||
acc.push(domain);
|
||||
Ok(acc)
|
||||
},
|
||||
)?;
|
||||
|
||||
let get_domain_config = |domain: &str| {
|
||||
domains
|
||||
.iter()
|
||||
.find(|d| d.domain == domain)
|
||||
.ok_or_else(|| format_err!("no config for domain '{}'", domain))
|
||||
};
|
||||
|
||||
if domains.is_empty() {
|
||||
worker.log("No domains configured to be ordered from an ACME server.");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let (plugins, _) = crate::config::acme::plugin::config()?;
|
||||
|
||||
let mut acme = node_config.acme_client().await?;
|
||||
|
||||
worker.log("Placing ACME order");
|
||||
let order = acme
|
||||
.new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase()))
|
||||
.await?;
|
||||
worker.log(format!("Order URL: {}", order.location));
|
||||
|
||||
let identifiers: Vec<String> = order
|
||||
.data
|
||||
.identifiers
|
||||
.iter()
|
||||
.map(|identifier| match identifier {
|
||||
Identifier::Dns(domain) => domain.clone(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
for auth_url in &order.data.authorizations {
|
||||
worker.log(format!("Getting authorization details from '{}'", auth_url));
|
||||
let mut auth = acme.get_authorization(&auth_url).await?;
|
||||
|
||||
let domain = match &mut auth.identifier {
|
||||
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||
};
|
||||
|
||||
if auth.status == Status::Valid {
|
||||
worker.log(format!("{} is already validated!", domain));
|
||||
continue;
|
||||
}
|
||||
|
||||
worker.log(format!("The validation for {} is pending", domain));
|
||||
let domain_config: &AcmeDomain = get_domain_config(&domain)?;
|
||||
let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone");
|
||||
let mut plugin_cfg =
|
||||
crate::acme::get_acme_plugin(&plugins, plugin_id)?.ok_or_else(|| {
|
||||
format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain)
|
||||
})?;
|
||||
|
||||
worker.log("Setting up validation plugin");
|
||||
let validation_url = plugin_cfg
|
||||
.setup(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||
.await?;
|
||||
|
||||
let result = request_validation(&worker, &mut acme, auth_url, validation_url).await;
|
||||
|
||||
if let Err(err) = plugin_cfg
|
||||
.teardown(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||
.await
|
||||
{
|
||||
worker.warn(format!(
|
||||
"Failed to teardown plugin '{}' for domain '{}' - {}",
|
||||
plugin_id, domain, err
|
||||
));
|
||||
}
|
||||
|
||||
let _: () = result?;
|
||||
}
|
||||
|
||||
worker.log("All domains validated");
|
||||
worker.log("Creating CSR");
|
||||
|
||||
let csr = proxmox_acme_rs::util::Csr::generate(&identifiers, &Default::default())?;
|
||||
let mut finalize_error_cnt = 0u8;
|
||||
let order_url = &order.location;
|
||||
let mut order;
|
||||
loop {
|
||||
use proxmox_acme_rs::order::Status;
|
||||
|
||||
order = acme.get_order(order_url).await?;
|
||||
|
||||
match order.status {
|
||||
Status::Pending => {
|
||||
worker.log("still pending, trying to finalize anyway");
|
||||
let finalize = order
|
||||
.finalize
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||
if let Err(err) = acme.finalize(finalize, &csr.data).await {
|
||||
if finalize_error_cnt >= 5 {
|
||||
return Err(err.into());
|
||||
}
|
||||
|
||||
finalize_error_cnt += 1;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
Status::Ready => {
|
||||
worker.log("order is ready, finalizing");
|
||||
let finalize = order
|
||||
.finalize
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||
acme.finalize(finalize, &csr.data).await?;
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
Status::Processing => {
|
||||
worker.log("still processing, trying again in 30 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
Status::Valid => {
|
||||
worker.log("valid");
|
||||
break;
|
||||
}
|
||||
other => bail!("order status: {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
worker.log("Downloading certificate");
|
||||
let certificate = acme
|
||||
.get_certificate(
|
||||
order
|
||||
.certificate
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing certificate url in finalized order"))?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Some(OrderedCertificate {
|
||||
certificate,
|
||||
private_key_pem: csr.private_key_pem,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn request_validation(
|
||||
worker: &WorkerTask,
|
||||
acme: &mut AcmeClient,
|
||||
auth_url: &str,
|
||||
validation_url: &str,
|
||||
) -> Result<(), Error> {
|
||||
worker.log("Triggering validation");
|
||||
acme.request_challenge_validation(&validation_url).await?;
|
||||
|
||||
worker.log("Sleeping for 5 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
loop {
|
||||
use proxmox_acme_rs::authorization::Status;
|
||||
|
||||
let auth = acme.get_authorization(&auth_url).await?;
|
||||
match auth.status {
|
||||
Status::Pending => {
|
||||
worker.log("Status is still 'pending', trying again in 10 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
Status::Valid => return Ok(()),
|
||||
other => bail!(
|
||||
"validating challenge '{}' failed - status: {:?}",
|
||||
validation_url,
|
||||
other
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
force: {
|
||||
description: "Force replacement of existing files.",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Order a new ACME certificate.
|
||||
pub fn new_acme_cert(force: bool, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||
spawn_certificate_worker("acme-new-cert", force, rpcenv)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
force: {
|
||||
description: "Force replacement of existing files.",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Renew the current ACME certificate if it expires within 30 days (or always if the `force`
|
||||
/// parameter is set).
|
||||
pub fn renew_acme_cert(force: bool, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||
if !cert_expires_soon()? && !force {
|
||||
bail!("Certificate does not expire within the next 30 days and 'force' is not set.")
|
||||
}
|
||||
|
||||
spawn_certificate_worker("acme-renew-cert", force, rpcenv)
|
||||
}
|
||||
|
||||
/// Check whether the current certificate expires within the next 30 days.
|
||||
pub fn cert_expires_soon() -> Result<bool, Error> {
|
||||
let cert = pem_to_cert_info(get_certificate_pem()?.as_bytes())?;
|
||||
cert.is_expired_after_epoch(proxmox::tools::time::epoch_i64() + 30 * 24 * 60 * 60)
|
||||
.map_err(|err| format_err!("Failed to check certificate expiration date: {}", err))
|
||||
}
|
||||
|
||||
fn spawn_certificate_worker(
|
||||
name: &'static str,
|
||||
force: bool,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
// We only have 1 certificate path in PBS which makes figuring out whether or not it is a
|
||||
// custom one too hard... We keep the parameter because the widget-toolkit may be using it...
|
||||
let _ = force;
|
||||
|
||||
let (node_config, _digest) = crate::config::node::config()?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
WorkerTask::spawn(name, None, auth_id, true, move |worker| async move {
|
||||
if let Some(cert) = order_certificate(worker, &node_config).await? {
|
||||
crate::config::set_proxy_certificate(&cert.certificate, &cert.private_key_pem)?;
|
||||
crate::server::reload_proxy_certificate().await?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Renew the current ACME certificate if it expires within 30 days (or always if the `force`
|
||||
/// parameter is set).
|
||||
pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||
let (node_config, _digest) = crate::config::node::config()?;
|
||||
|
||||
let cert_pem = get_certificate_pem()?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
WorkerTask::spawn(
|
||||
"acme-revoke-cert",
|
||||
None,
|
||||
auth_id,
|
||||
true,
|
||||
move |worker| async move {
|
||||
worker.log("Loading ACME account");
|
||||
let mut acme = node_config.acme_client().await?;
|
||||
worker.log("Revoking old certificate");
|
||||
acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
|
||||
worker.log("Deleting certificate and regenerating a self-signed one");
|
||||
delete_custom_certificate().await?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
87
src/api2/node/config.rs
Normal file
87
src/api2/node/config.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox::api::schema::Updatable;
|
||||
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
use crate::api2::node::apt::update_apt_proxy_config;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::node::{NodeConfig, NodeConfigUpdater};
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_NODE_CONFIG)
|
||||
.put(&API_METHOD_UPDATE_NODE_CONFIG);
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
returns: {
|
||||
type: NodeConfig,
|
||||
},
|
||||
)]
|
||||
/// Get the node configuration
|
||||
pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig, Error> {
|
||||
let (config, digest) = crate::config::node::config()?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: { schema: NODE_SCHEMA },
|
||||
digest: {
|
||||
description: "Digest to protect against concurrent updates",
|
||||
optional: true,
|
||||
},
|
||||
updater: {
|
||||
type: NodeConfigUpdater,
|
||||
flatten: true,
|
||||
},
|
||||
delete: {
|
||||
description: "Options to remove from the configuration",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
protected: true,
|
||||
)]
|
||||
/// Update the node configuration
|
||||
pub fn update_node_config(
|
||||
updater: NodeConfigUpdater,
|
||||
delete: Option<String>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let _lock = crate::config::node::lock()?;
|
||||
let (mut config, expected_digest) = crate::config::node::config()?;
|
||||
if let Some(digest) = digest {
|
||||
// FIXME: GUI doesn't handle our non-inlined digest part here properly...
|
||||
if !digest.is_empty() {
|
||||
let digest = proxmox::tools::hex_to_digest(&digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
}
|
||||
|
||||
let delete: Vec<&str> = delete
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.split(&[' ', ',', ';', '\0'][..])
|
||||
.collect();
|
||||
|
||||
config.update_from(updater, &delete)?;
|
||||
|
||||
crate::config::node::save_config(&config)?;
|
||||
|
||||
update_apt_proxy_config(config.http_proxy().as_ref())?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -5,6 +5,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::router::Router;
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
@ -16,7 +17,7 @@ use crate::tools::systemd::{self, types::*};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::datastore::DataStoreConfig;
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
@ -179,7 +180,17 @@ pub fn create_datastore_disk(
|
||||
systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
let lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
let datastore: DataStoreConfig =
|
||||
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -20,6 +20,7 @@ use crate::tools::disks::{
|
||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
@ -372,7 +373,17 @@ pub fn create_zpool(
|
||||
}
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
let lock = datastore::lock_config()?;
|
||||
let datastore: DataStoreConfig =
|
||||
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -60,36 +60,41 @@ use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
)]
|
||||
/// Read syslog entries.
|
||||
fn get_journal(
|
||||
param: Value,
|
||||
since: Option<i64>,
|
||||
until: Option<i64>,
|
||||
lastentries: Option<u64>,
|
||||
startcursor: Option<String>,
|
||||
endcursor: Option<String>,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut args = vec![];
|
||||
|
||||
if let Some(lastentries) = param["lastentries"].as_u64() {
|
||||
if let Some(lastentries) = lastentries {
|
||||
args.push(String::from("-n"));
|
||||
args.push(format!("{}", lastentries));
|
||||
}
|
||||
|
||||
if let Some(since) = param["since"].as_str() {
|
||||
if let Some(since) = since {
|
||||
args.push(String::from("-b"));
|
||||
args.push(since.to_owned());
|
||||
args.push(since.to_string());
|
||||
}
|
||||
|
||||
if let Some(until) = param["until"].as_str() {
|
||||
if let Some(until) = until {
|
||||
args.push(String::from("-e"));
|
||||
args.push(until.to_owned());
|
||||
args.push(until.to_string());
|
||||
}
|
||||
|
||||
if let Some(startcursor) = param["startcursor"].as_str() {
|
||||
if let Some(startcursor) = startcursor {
|
||||
args.push(String::from("-f"));
|
||||
args.push(startcursor.to_owned());
|
||||
args.push(startcursor);
|
||||
}
|
||||
|
||||
if let Some(endcursor) = param["endcursor"].as_str() {
|
||||
if let Some(endcursor) = endcursor {
|
||||
args.push(String::from("-t"));
|
||||
args.push(endcursor.to_owned());
|
||||
args.push(endcursor);
|
||||
}
|
||||
|
||||
let mut lines: Vec<String> = vec![];
|
||||
|
@ -2,7 +2,7 @@ use std::process::Command;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
@ -12,6 +12,16 @@ use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
use crate::tools::cert::CertInfo;
|
||||
|
||||
impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
|
||||
fn from(info: procfs::ProcFsCPUInfo) -> Self {
|
||||
Self {
|
||||
model: info.model,
|
||||
sockets: info.sockets,
|
||||
cpus: info.cpus,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -21,43 +31,7 @@ use crate::tools::cert::CertInfo;
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: Object,
|
||||
description: "Returns node memory, CPU and (root) disk usage",
|
||||
properties: {
|
||||
memory: {
|
||||
type: Object,
|
||||
description: "node memory usage counters",
|
||||
properties: {
|
||||
total: {
|
||||
description: "total memory",
|
||||
type: Integer,
|
||||
},
|
||||
used: {
|
||||
description: "total memory",
|
||||
type: Integer,
|
||||
},
|
||||
free: {
|
||||
description: "free memory",
|
||||
type: Integer,
|
||||
},
|
||||
},
|
||||
},
|
||||
cpu: {
|
||||
type: Number,
|
||||
description: "Total CPU usage since last query.",
|
||||
optional: true,
|
||||
},
|
||||
info: {
|
||||
type: Object,
|
||||
description: "contains node information",
|
||||
properties: {
|
||||
fingerprint: {
|
||||
description: "The SSL Fingerprint",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type: NodeStatus,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
@ -68,32 +42,52 @@ fn get_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
) -> Result<NodeStatus, Error> {
|
||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||
let memory = NodeMemoryCounters {
|
||||
total: meminfo.memtotal,
|
||||
used: meminfo.memused,
|
||||
free: meminfo.memfree,
|
||||
};
|
||||
|
||||
let swap = NodeSwapCounters {
|
||||
total: meminfo.swaptotal,
|
||||
used: meminfo.swapused,
|
||||
free: meminfo.swapfree,
|
||||
};
|
||||
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
let cpu = kstat.cpu;
|
||||
let wait = kstat.iowait_percent;
|
||||
|
||||
// get fingerprint
|
||||
let cert = CertInfo::new()?;
|
||||
let fp = cert.fingerprint()?;
|
||||
let loadavg = procfs::Loadavg::read()?;
|
||||
let loadavg = [loadavg.one(), loadavg.five(), loadavg.fifteen()];
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
"total": meminfo.memtotal,
|
||||
"used": meminfo.memused,
|
||||
"free": meminfo.memfree,
|
||||
let cpuinfo = procfs::read_cpuinfo()?;
|
||||
let cpuinfo = cpuinfo.into();
|
||||
|
||||
let uname = nix::sys::utsname::uname();
|
||||
let kversion = format!(
|
||||
"{} {} {}",
|
||||
uname.sysname(),
|
||||
uname.release(),
|
||||
uname.version()
|
||||
);
|
||||
|
||||
Ok(NodeStatus {
|
||||
memory,
|
||||
swap,
|
||||
root: crate::tools::disks::disk_usage(Path::new("/"))?,
|
||||
uptime: procfs::read_proc_uptime()?.0 as u64,
|
||||
loadavg,
|
||||
kversion,
|
||||
cpuinfo,
|
||||
cpu,
|
||||
wait,
|
||||
info: NodeInformation {
|
||||
fingerprint: CertInfo::new()?.fingerprint()?,
|
||||
},
|
||||
"cpu": kstat.cpu,
|
||||
"root": {
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
},
|
||||
"info": {
|
||||
"fingerprint": fp,
|
||||
},
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -256,7 +256,7 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
description: "Users can access their own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
@ -326,7 +326,7 @@ async fn read_task_log(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can stop there own tasks, or need Sys.Modify on /system/tasks.",
|
||||
description: "Users can stop their own tasks, or need Sys.Modify on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
@ -420,7 +420,7 @@ fn stop_task(
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
description: "Users can only see their own tasks, unless they have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
|
@ -21,7 +21,7 @@ use crate::api2::types::{
|
||||
Authid,
|
||||
};
|
||||
|
||||
use crate::backup::{DataStore};
|
||||
use crate::backup::DataStore;
|
||||
use crate::config::datastore;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
@ -55,6 +55,7 @@ use crate::config::acl::{
|
||||
},
|
||||
history: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "A list of usages of the past (last Month).",
|
||||
items: {
|
||||
type: Number,
|
||||
@ -69,6 +70,11 @@ use crate::config::acl::{
|
||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||
If the estimate lies in the past, the usage is decreasing.",
|
||||
},
|
||||
"error": {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "An error description, for example, when the datastore could not be looked up.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -97,7 +103,19 @@ pub fn datastore_status(
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
list.push(json!({
|
||||
"store": store,
|
||||
"total": -1,
|
||||
"used": -1,
|
||||
"avail": -1,
|
||||
"error": err.to_string()
|
||||
}));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
|
||||
let mut entry = json!({
|
||||
@ -110,24 +128,17 @@ pub fn datastore_status(
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
let total_res = crate::rrd::extract_cached_data(
|
||||
let get_rrd = |what: &str| crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"total",
|
||||
what,
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
RRDTimeFrameResolution::Month,
|
||||
RRDMode::Average,
|
||||
);
|
||||
|
||||
let used_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"used",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
let total_res = get_rrd("total");
|
||||
let used_res = get_rrd("used");
|
||||
|
||||
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
@ -160,13 +171,10 @@ pub fn datastore_status(
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
}
|
||||
}
|
||||
entry["estimated-full-date"] = match linear_regression(&time_list, &usage_list) {
|
||||
Some((a, b)) if b != 0.0 => Value::from(((1.0 - a) / b).floor() as u64),
|
||||
_ => Value::from(0),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,7 @@ use crate::{
|
||||
drive::{
|
||||
media_changer,
|
||||
lock_tape_device,
|
||||
TapeLockError,
|
||||
set_tape_device_state,
|
||||
},
|
||||
changer::update_changer_online_status,
|
||||
@ -197,17 +198,21 @@ pub fn do_tape_backup_job(
|
||||
job.start(&worker.upid().to_string())?;
|
||||
let mut drive_lock = drive_lock;
|
||||
|
||||
let (job_result, summary) = match try_block!({
|
||||
let mut summary = Default::default();
|
||||
let job_result = try_block!({
|
||||
if schedule.is_some() {
|
||||
// for scheduled tape backup jobs, we wait indefinitely for the lock
|
||||
task_log!(worker, "waiting for drive lock...");
|
||||
loop {
|
||||
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
|
||||
worker.check_abort()?;
|
||||
match lock_tape_device(&drive_config, &setup.drive) {
|
||||
Ok(lock) => {
|
||||
drive_lock = Some(lock);
|
||||
break;
|
||||
} // ignore errors
|
||||
|
||||
worker.check_abort()?;
|
||||
}
|
||||
Err(TapeLockError::TimeOut) => continue,
|
||||
Err(TapeLockError::Other(err)) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
@ -217,17 +222,17 @@ pub fn do_tape_backup_job(
|
||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
|
||||
backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
&mut summary,
|
||||
false,
|
||||
)
|
||||
}) {
|
||||
Ok(summary) => (Ok(()), summary),
|
||||
Err(err) => (Err(err), Default::default()),
|
||||
};
|
||||
});
|
||||
|
||||
let status = worker.create_state(&job_result);
|
||||
|
||||
@ -312,6 +317,12 @@ pub fn run_tape_backup_job(
|
||||
type: TapeBackupJobSetup,
|
||||
flatten: true,
|
||||
},
|
||||
"force-media-set": {
|
||||
description: "Ignore the allocation policy and start a new media-set.",
|
||||
optional: true,
|
||||
type: bool,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -327,6 +338,7 @@ pub fn run_tape_backup_job(
|
||||
/// Backup datastore to tape media pool
|
||||
pub fn backup(
|
||||
setup: TapeBackupJobSetup,
|
||||
force_media_set: bool,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
@ -365,16 +377,16 @@ pub fn backup(
|
||||
let _drive_lock = drive_lock; // keep lock guard
|
||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||
|
||||
let (job_result, summary) = match backup_worker(
|
||||
let mut summary = Default::default();
|
||||
let job_result = backup_worker(
|
||||
&worker,
|
||||
datastore,
|
||||
&pool_config,
|
||||
&setup,
|
||||
email.clone(),
|
||||
) {
|
||||
Ok(summary) => (Ok(()), summary),
|
||||
Err(err) => (Err(err), Default::default()),
|
||||
};
|
||||
&mut summary,
|
||||
force_media_set,
|
||||
);
|
||||
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_tape_backup_status(
|
||||
@ -403,18 +415,25 @@ fn backup_worker(
|
||||
pool_config: &MediaPoolConfig,
|
||||
setup: &TapeBackupJobSetup,
|
||||
email: Option<String>,
|
||||
) -> Result<TapeBackupJobSummary, Error> {
|
||||
summary: &mut TapeBackupJobSummary,
|
||||
force_media_set: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let start = std::time::Instant::now();
|
||||
let mut summary: TapeBackupJobSummary = Default::default();
|
||||
|
||||
task_log!(worker, "update media online status");
|
||||
let changer_name = update_media_online_status(&setup.drive)?;
|
||||
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||
|
||||
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
||||
let mut pool_writer = PoolWriter::new(
|
||||
pool,
|
||||
&setup.drive,
|
||||
worker,
|
||||
email,
|
||||
force_media_set
|
||||
)?;
|
||||
|
||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||
|
||||
@ -531,7 +550,7 @@ fn backup_worker(
|
||||
|
||||
summary.duration = start.elapsed();
|
||||
|
||||
Ok(summary)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Try to update the the media online status
|
||||
|
@ -1336,7 +1336,7 @@ pub fn catalog_media(
|
||||
drive.read_label()?; // skip over labels - we already read them above
|
||||
|
||||
let mut checked_chunks = HashMap::new();
|
||||
restore_media(&worker, &mut drive, &media_id, None, &mut checked_chunks, verbose)?;
|
||||
restore_media(worker, &mut drive, &media_id, None, &mut checked_chunks, verbose)?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
@ -1453,7 +1453,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"load-slot",
|
||||
&Router::new()
|
||||
.put(&API_METHOD_LOAD_SLOT)
|
||||
.post(&API_METHOD_LOAD_SLOT)
|
||||
),
|
||||
(
|
||||
"cartridge-memory",
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::path::Path;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde::{Serialize, Deserialize};
|
||||
@ -28,6 +29,7 @@ use crate::{
|
||||
CHANGER_NAME_SCHEMA,
|
||||
MediaPoolConfig,
|
||||
MediaListEntry,
|
||||
MediaSetListEntry,
|
||||
MediaStatus,
|
||||
MediaContentEntry,
|
||||
VAULT_NAME_SCHEMA,
|
||||
@ -44,6 +46,74 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List of media sets.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: MediaSetListEntry,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "List of media sets filtered by Tape.Audit privileges on pool",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List Media sets
|
||||
pub async fn list_media_sets(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<MediaSetListEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let (config, _digest) = config::media_pool::config()?;
|
||||
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
|
||||
let mut media_sets: HashSet<Uuid> = HashSet::new();
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_section_type, data) in config.sections.values() {
|
||||
let pool_name = match data["name"].as_str() {
|
||||
None => continue,
|
||||
Some(name) => name,
|
||||
};
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", pool_name]);
|
||||
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||
|
||||
let changer_name = None; // assume standalone drive
|
||||
let pool = MediaPool::with_config(status_path, &config, changer_name, true)?;
|
||||
|
||||
for media in pool.list_media() {
|
||||
if let Some(label) = media.media_set_label() {
|
||||
if media_sets.contains(&label.uuid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let media_set_uuid = label.uuid.clone();
|
||||
let media_set_ctime = label.ctime;
|
||||
let media_set_name = pool
|
||||
.generate_media_set_name(&media_set_uuid, config.template.clone())
|
||||
.unwrap_or_else(|_| media_set_uuid.to_string());
|
||||
|
||||
media_sets.insert(media_set_uuid.clone());
|
||||
list.push(MediaSetListEntry {
|
||||
media_set_name,
|
||||
media_set_uuid,
|
||||
media_set_ctime,
|
||||
pool: pool_name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -129,7 +199,7 @@ pub async fn list_media(
|
||||
// Call start_write_session, so that we show the same status a
|
||||
// backup job would see.
|
||||
pool.force_media_availability();
|
||||
pool.start_write_session(current_time)?;
|
||||
pool.start_write_session(current_time, false)?;
|
||||
|
||||
for media in pool.list_media() {
|
||||
let expired = pool.media_is_expired(&media, current_time);
|
||||
@ -546,6 +616,11 @@ const SUBDIRS: SubdirMap = &[
|
||||
.get(&API_METHOD_DESTROY_MEDIA)
|
||||
),
|
||||
( "list", &MEDIA_LIST_ROUTER ),
|
||||
(
|
||||
"media-sets",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_MEDIA_SETS)
|
||||
),
|
||||
(
|
||||
"move",
|
||||
&Router::new()
|
||||
|
File diff suppressed because it is too large
Load Diff
100
src/api2/types/acme.rs
Normal file
100
src/api2/types/acme.rs
Normal file
@ -0,0 +1,100 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, schema::{Schema, StringSchema, ApiStringFormat}};
|
||||
|
||||
use crate::api2::types::{
|
||||
DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"domain": { format: &DNS_NAME_FORMAT },
|
||||
"alias": {
|
||||
optional: true,
|
||||
format: &DNS_ALIAS_FORMAT,
|
||||
},
|
||||
"plugin": {
|
||||
optional: true,
|
||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||
},
|
||||
},
|
||||
default_key: "domain",
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// A domain entry for an ACME certificate.
|
||||
pub struct AcmeDomain {
|
||||
/// The domain to certify for.
|
||||
pub domain: String,
|
||||
|
||||
/// The domain to use for challenges instead of the default acme challenge domain.
|
||||
///
|
||||
/// This is useful if you use CNAME entries to redirect `_acme-challenge.*` domains to a
|
||||
/// different DNS server.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alias: Option<String>,
|
||||
|
||||
/// The plugin to use to validate this domain.
|
||||
///
|
||||
/// Empty means standalone HTTP validation is used.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub plugin: Option<String>,
|
||||
}
|
||||
|
||||
pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema = StringSchema::new(
|
||||
"ACME domain configuration string")
|
||||
.format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: { type: String },
|
||||
url: { type: String },
|
||||
},
|
||||
)]
|
||||
/// An ACME directory endpoint with a name and URL.
|
||||
#[derive(Serialize)]
|
||||
pub struct KnownAcmeDirectory {
|
||||
/// The ACME directory's name.
|
||||
pub name: &'static str,
|
||||
|
||||
/// The ACME directory's endpoint URL.
|
||||
pub url: &'static str,
|
||||
}
|
||||
|
||||
proxmox::api_string_type! {
|
||||
#[api(format: &PROXMOX_SAFE_ID_FORMAT)]
|
||||
/// ACME account name.
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct AcmeAccountName(String);
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
schema: {
|
||||
type: Object,
|
||||
additional_properties: true,
|
||||
properties: {},
|
||||
},
|
||||
type: {
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize)]
|
||||
/// Schema for an ACME challenge plugin.
|
||||
pub struct AcmeChallengeSchema {
|
||||
/// Plugin ID.
|
||||
pub id: String,
|
||||
|
||||
/// Human readable name, falls back to id.
|
||||
pub name: String,
|
||||
|
||||
/// Plugin Type.
|
||||
#[serde(rename = "type")]
|
||||
pub ty: &'static str,
|
||||
|
||||
/// The plugin's parameter schema.
|
||||
pub schema: Value,
|
||||
}
|
@ -11,7 +11,6 @@ use crate::{
|
||||
backup::{
|
||||
CryptMode,
|
||||
Fingerprint,
|
||||
BACKUP_ID_REGEX,
|
||||
DirEntryAttribute,
|
||||
CatalogEntryType,
|
||||
},
|
||||
@ -37,6 +36,9 @@ pub use tape::*;
|
||||
mod file_restore;
|
||||
pub use file_restore::*;
|
||||
|
||||
mod acme;
|
||||
pub use acme::*;
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
@ -48,9 +50,25 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
Ok(())
|
||||
});
|
||||
|
||||
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
||||
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
||||
macro_rules! BACKUP_TIME_RE {
|
||||
() => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z")
|
||||
}
|
||||
macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||
() => (
|
||||
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||
|
||||
macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||
macro_rules! DNS_ALIAS_NAME {
|
||||
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
|
||||
}
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
|
||||
@ -87,6 +105,8 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||
|
||||
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
@ -103,7 +123,21 @@ const_regex!{
|
||||
|
||||
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||
|
||||
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||
|
||||
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
||||
|
||||
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||
|
||||
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
||||
|
||||
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||
|
||||
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
|
||||
|
||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -142,6 +176,9 @@ pub const HOSTNAME_FORMAT: ApiStringFormat =
|
||||
pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
|
||||
|
||||
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
|
||||
|
||||
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||
|
||||
@ -172,6 +209,9 @@ pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||
pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
@ -383,6 +423,12 @@ pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
|
||||
"A snapshot in the format: 'store:type/id/time")
|
||||
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||
.type_text("store:type/id/time")
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||
.format(&UUID_FORMAT)
|
||||
@ -468,6 +514,12 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
#[api(
|
||||
@ -751,9 +803,8 @@ impl Default for GarbageCollectionStatus {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
/// Storage space usage information.
|
||||
pub struct StorageStatus {
|
||||
/// Total space (bytes).
|
||||
@ -1355,6 +1406,18 @@ pub struct ArchiveEntry {
|
||||
|
||||
impl ArchiveEntry {
|
||||
pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
|
||||
let size = match entry_type {
|
||||
Some(DirEntryAttribute::File { size, .. }) => Some(*size),
|
||||
_ => None,
|
||||
};
|
||||
Self::new_with_size(filepath, entry_type, size)
|
||||
}
|
||||
|
||||
pub fn new_with_size(
|
||||
filepath: &[u8],
|
||||
entry_type: Option<&DirEntryAttribute>,
|
||||
size: Option<u64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
filepath: base64::encode(filepath),
|
||||
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
||||
@ -1364,13 +1427,10 @@ impl ArchiveEntry {
|
||||
None => "v".to_owned(),
|
||||
},
|
||||
leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
|
||||
size: match entry_type {
|
||||
Some(DirEntryAttribute::File { size, .. }) => Some(*size),
|
||||
_ => None
|
||||
},
|
||||
size,
|
||||
mtime: match entry_type {
|
||||
Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
|
||||
_ => None
|
||||
_ => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1494,8 +1554,8 @@ impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubK
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize,Default)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Job Scheduling Status
|
||||
pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
@ -1507,3 +1567,109 @@ pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node memory usage counters
|
||||
pub struct NodeMemoryCounters {
|
||||
/// Total memory
|
||||
pub total: u64,
|
||||
/// Used memory
|
||||
pub used: u64,
|
||||
/// Free memory
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node swap usage counters
|
||||
pub struct NodeSwapCounters {
|
||||
/// Total swap
|
||||
pub total: u64,
|
||||
/// Used swap
|
||||
pub used: u64,
|
||||
/// Free swap
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize,Deserialize,Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Contains general node information such as the fingerprint`
|
||||
pub struct NodeInformation {
|
||||
/// The SSL Fingerprint
|
||||
pub fingerprint: String,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Information about the CPU
|
||||
pub struct NodeCpuInformation {
|
||||
/// The CPU model
|
||||
pub model: String,
|
||||
/// The number of CPU sockets
|
||||
pub sockets: usize,
|
||||
/// The number of CPU cores (incl. threads)
|
||||
pub cpus: usize,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
memory: {
|
||||
type: NodeMemoryCounters,
|
||||
},
|
||||
root: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
swap: {
|
||||
type: NodeSwapCounters,
|
||||
},
|
||||
loadavg: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: Number,
|
||||
description: "the load",
|
||||
}
|
||||
},
|
||||
cpuinfo: {
|
||||
type: NodeCpuInformation,
|
||||
},
|
||||
info: {
|
||||
type: NodeInformation,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The Node status
|
||||
pub struct NodeStatus {
|
||||
pub memory: NodeMemoryCounters,
|
||||
pub root: StorageStatus,
|
||||
pub swap: NodeSwapCounters,
|
||||
/// The current uptime of the server.
|
||||
pub uptime: u64,
|
||||
/// Load for 1, 5 and 15 minutes.
|
||||
pub loadavg: [f64; 3],
|
||||
/// The current kernel version.
|
||||
pub kversion: String,
|
||||
/// Total CPU usage since last query.
|
||||
pub cpu: f64,
|
||||
/// Total IO wait since last query.
|
||||
pub wait: f64,
|
||||
pub cpuinfo: NodeCpuInformation,
|
||||
pub info: NodeInformation,
|
||||
}
|
||||
|
||||
pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
|
||||
"HTTP proxy configuration [http://]<host>[:port]")
|
||||
.format(&ApiStringFormat::VerifyFn(|s| {
|
||||
proxmox_http::ProxyConfig::parse_proxy_url(s)?;
|
||||
Ok(())
|
||||
}))
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.type_text("[http://]<host>[:port]")
|
||||
.schema();
|
||||
|
@ -12,6 +12,26 @@ use crate::api2::types::{
|
||||
MediaLocation,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media Set list entry
|
||||
pub struct MediaSetListEntry {
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
location: {
|
||||
|
@ -13,7 +13,7 @@ pub const PROXMOX_PKG_VERSION: &str =
|
||||
env!("CARGO_PKG_VERSION_MINOR"),
|
||||
);
|
||||
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||
pub const PROXMOX_PKG_REPOID: &str = env!("CARGO_PKG_REPOSITORY");
|
||||
pub const PROXMOX_PKG_REPOID: &str = env!("REPOID");
|
||||
|
||||
fn get_version(
|
||||
_param: Value,
|
||||
|
@ -238,6 +238,7 @@ pub use fixed_index::*;
|
||||
mod dynamic_index;
|
||||
pub use dynamic_index::*;
|
||||
|
||||
#[macro_use]
|
||||
mod backup_info;
|
||||
pub use backup_info::*;
|
||||
|
||||
@ -256,5 +257,5 @@ pub use verify::*;
|
||||
mod catalog_shell;
|
||||
pub use catalog_shell::*;
|
||||
|
||||
mod async_index_reader;
|
||||
pub use async_index_reader::*;
|
||||
mod cached_chunk_reader;
|
||||
pub use cached_chunk_reader::*;
|
||||
|
@ -1,215 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
use std::io::SeekFrom;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
use super::index::ChunkReadInfo;
|
||||
|
||||
type ReadFuture<S> = dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static;
|
||||
|
||||
// FIXME: This enum may not be required?
|
||||
// - Put the `WaitForData` case directly into a `read_future: Option<>`
|
||||
// - make the read loop as follows:
|
||||
// * if read_buffer is not empty:
|
||||
// use it
|
||||
// * else if read_future is there:
|
||||
// poll it
|
||||
// if read: move data to read_buffer
|
||||
// * else
|
||||
// create read future
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<ReadFuture<S>>>),
|
||||
HaveData,
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_offset: u64,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_info: Option<ChunkReadInfo>,
|
||||
position: u64,
|
||||
seek_to_pos: i64,
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
// ok because the only public interfaces operates on &mut Self
|
||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||
|
||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
pub fn new(index: I, store: S) -> Self {
|
||||
Self {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
current_chunk_offset: 0,
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_info: None,
|
||||
position: 0,
|
||||
seek_to_pos: 0,
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
||||
where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin,
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut ReadBuf,
|
||||
) -> Poll<tokio::io::Result<()>> {
|
||||
let this = Pin::get_mut(self);
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
||||
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
||||
{
|
||||
// optimization for sequential chunk read
|
||||
let next_idx = this.current_chunk_idx + 1;
|
||||
(next_idx, 0)
|
||||
} else {
|
||||
match this.index.chunk_from_offset(this.position) {
|
||||
Some(res) => res,
|
||||
None => return Poll::Ready(Ok(()))
|
||||
}
|
||||
};
|
||||
|
||||
if idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
let info = this
|
||||
.index
|
||||
.chunk_info(idx)
|
||||
.ok_or_else(|| io_format_err!("could not get digest"))?;
|
||||
|
||||
this.current_chunk_offset = offset;
|
||||
this.current_chunk_idx = idx;
|
||||
let old_info = this.current_chunk_info.replace(info.clone());
|
||||
|
||||
if let Some(old_info) = old_info {
|
||||
if old_info.digest == info.digest {
|
||||
// hit, chunk is currently in cache
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// miss, need to download new chunk
|
||||
let store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||
}
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&info.digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
|
||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||
}
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, chunk_data)) => {
|
||||
this.read_buffer = chunk_data;
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
this.store = Some(store);
|
||||
}
|
||||
Err(err) => {
|
||||
return Poll::Ready(Err(io_err_other(err)));
|
||||
}
|
||||
};
|
||||
}
|
||||
AsyncIndexReaderState::HaveData => {
|
||||
let offset = this.current_chunk_offset as usize;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.remaining() {
|
||||
len - offset
|
||||
} else {
|
||||
buf.remaining()
|
||||
};
|
||||
|
||||
buf.put_slice(&this.read_buffer[offset..(offset + n)]);
|
||||
this.position += n as u64;
|
||||
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
} else {
|
||||
this.current_chunk_offset += n as u64;
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
||||
where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin,
|
||||
{
|
||||
fn start_seek(
|
||||
self: Pin<&mut Self>,
|
||||
pos: SeekFrom,
|
||||
) -> tokio::io::Result<()> {
|
||||
let this = Pin::get_mut(self);
|
||||
this.seek_to_pos = match pos {
|
||||
SeekFrom::Start(offset) => {
|
||||
offset as i64
|
||||
},
|
||||
SeekFrom::End(offset) => {
|
||||
this.index.index_bytes() as i64 + offset
|
||||
},
|
||||
SeekFrom::Current(offset) => {
|
||||
this.position as i64 + offset
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn poll_complete(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<tokio::io::Result<u64>> {
|
||||
let this = Pin::get_mut(self);
|
||||
|
||||
let index_bytes = this.index.index_bytes();
|
||||
if this.seek_to_pos < 0 {
|
||||
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
||||
} else if this.seek_to_pos > index_bytes as i64 {
|
||||
this.position = index_bytes;
|
||||
} else {
|
||||
this.position = this.seek_to_pos as u64;
|
||||
}
|
||||
|
||||
// even if seeking within one chunk, we need to go to NoData to
|
||||
// recalculate the current_chunk_offset (data is cached anyway)
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
|
||||
Poll::Ready(Ok(this.position))
|
||||
}
|
||||
}
|
@ -5,41 +5,17 @@ use std::os::unix::io::RawFd;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use proxmox::const_regex;
|
||||
use crate::api2::types::{
|
||||
BACKUP_ID_REGEX,
|
||||
BACKUP_TYPE_REGEX,
|
||||
BACKUP_DATE_REGEX,
|
||||
GROUP_PATH_REGEX,
|
||||
SNAPSHOT_PATH_REGEX,
|
||||
BACKUP_FILE_REGEX,
|
||||
};
|
||||
|
||||
use super::manifest::MANIFEST_BLOB_NAME;
|
||||
|
||||
macro_rules! BACKUP_ID_RE {
|
||||
() => {
|
||||
r"[A-Za-z0-9_][A-Za-z0-9._\-]*"
|
||||
};
|
||||
}
|
||||
macro_rules! BACKUP_TYPE_RE {
|
||||
() => {
|
||||
r"(?:host|vm|ct)"
|
||||
};
|
||||
}
|
||||
macro_rules! BACKUP_TIME_RE {
|
||||
() => {
|
||||
r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"
|
||||
};
|
||||
}
|
||||
|
||||
const_regex! {
|
||||
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||
|
||||
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||
|
||||
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
||||
|
||||
BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||
|
||||
GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
||||
|
||||
SNAPSHOT_PATH_REGEX = concat!(
|
||||
r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")$");
|
||||
}
|
||||
|
||||
/// BackupGroup is a directory containing a list of BackupDir
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||
pub struct BackupGroup {
|
||||
|
189
src/backup/cached_chunk_reader.rs
Normal file
189
src/backup/cached_chunk_reader.rs
Normal file
@ -0,0 +1,189 @@
|
||||
//! An async and concurrency safe data reader backed by a local LRU cache.
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::Future;
|
||||
use futures::ready;
|
||||
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
|
||||
|
||||
use std::io::SeekFrom;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use super::{AsyncReadChunk, IndexFile};
|
||||
use crate::tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
|
||||
use proxmox::io_format_err;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
struct AsyncChunkCacher<T> {
|
||||
reader: Arc<T>,
|
||||
}
|
||||
|
||||
impl<T: AsyncReadChunk + Send + Sync + 'static> AsyncCacher<[u8; 32], Arc<Vec<u8>>>
|
||||
for AsyncChunkCacher<T>
|
||||
{
|
||||
fn fetch(
|
||||
&self,
|
||||
key: [u8; 32],
|
||||
) -> Box<dyn Future<Output = Result<Option<Arc<Vec<u8>>>, Error>> + Send> {
|
||||
let reader = Arc::clone(&self.reader);
|
||||
Box::new(async move {
|
||||
AsyncReadChunk::read_chunk(reader.as_ref(), &key)
|
||||
.await
|
||||
.map(|x| Some(Arc::new(x)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows arbitrary data reads from an Index via an AsyncReadChunk implementation, using an LRU
|
||||
/// cache internally to cache chunks and provide support for multiple concurrent reads (potentially
|
||||
/// to the same chunk).
|
||||
pub struct CachedChunkReader<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> {
|
||||
cache: Arc<AsyncLruCache<[u8; 32], Arc<Vec<u8>>>>,
|
||||
cacher: AsyncChunkCacher<R>,
|
||||
index: I,
|
||||
}
|
||||
|
||||
impl<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> CachedChunkReader<I, R> {
|
||||
/// Create a new reader with a local LRU cache containing 'capacity' chunks.
|
||||
pub fn new(reader: R, index: I, capacity: usize) -> Self {
|
||||
let cache = Arc::new(AsyncLruCache::new(capacity));
|
||||
Self::new_with_cache(reader, index, cache)
|
||||
}
|
||||
|
||||
/// Create a new reader with a custom LRU cache. Use this to share a cache between multiple
|
||||
/// readers.
|
||||
pub fn new_with_cache(
|
||||
reader: R,
|
||||
index: I,
|
||||
cache: Arc<AsyncLruCache<[u8; 32], Arc<Vec<u8>>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache,
|
||||
cacher: AsyncChunkCacher {
|
||||
reader: Arc::new(reader),
|
||||
},
|
||||
index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read data at a given byte offset into a variable size buffer. Returns the amount of bytes
|
||||
/// read, which will always be the size of the buffer except when reaching EOF.
|
||||
pub async fn read_at(&self, buf: &mut [u8], offset: u64) -> Result<usize, Error> {
|
||||
let size = buf.len();
|
||||
let mut read: usize = 0;
|
||||
while read < size {
|
||||
let cur_offset = offset + read as u64;
|
||||
if let Some(chunk) = self.index.chunk_from_offset(cur_offset) {
|
||||
// chunk indices retrieved from chunk_from_offset always resolve to Some(_)
|
||||
let info = self.index.chunk_info(chunk.0).unwrap();
|
||||
|
||||
// will never be None, see AsyncChunkCacher
|
||||
let data = self.cache.access(info.digest, &self.cacher).await?.unwrap();
|
||||
|
||||
let want_bytes = ((info.range.end - cur_offset) as usize).min(size - read);
|
||||
let slice = &mut buf[read..(read + want_bytes)];
|
||||
let intra_chunk = chunk.1 as usize;
|
||||
slice.copy_from_slice(&data[intra_chunk..(intra_chunk + want_bytes)]);
|
||||
read += want_bytes;
|
||||
} else {
|
||||
// EOF
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(read)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: IndexFile + Send + Sync + 'static, R: AsyncReadChunk + Send + Sync + 'static>
|
||||
CachedChunkReader<I, R>
|
||||
{
|
||||
/// Returns a SeekableCachedChunkReader based on this instance, which implements AsyncSeek and
|
||||
/// AsyncRead for use in interfaces which require that. Direct use of read_at is preferred
|
||||
/// otherwise.
|
||||
pub fn seekable(self) -> SeekableCachedChunkReader<I, R> {
|
||||
SeekableCachedChunkReader {
|
||||
index_bytes: self.index.index_bytes(),
|
||||
reader: Arc::new(self),
|
||||
position: 0,
|
||||
read_future: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SeekableCachedChunkReader<
|
||||
I: IndexFile + Send + Sync + 'static,
|
||||
R: AsyncReadChunk + Send + Sync + 'static,
|
||||
> {
|
||||
reader: Arc<CachedChunkReader<I, R>>,
|
||||
index_bytes: u64,
|
||||
position: u64,
|
||||
read_future: Option<Pin<Box<dyn Future<Output = Result<(Vec<u8>, usize), Error>> + Send>>>,
|
||||
}
|
||||
|
||||
impl<I, R> AsyncSeek for SeekableCachedChunkReader<I, R>
|
||||
where
|
||||
I: IndexFile + Send + Sync + 'static,
|
||||
R: AsyncReadChunk + Send + Sync + 'static,
|
||||
{
|
||||
fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> tokio::io::Result<()> {
|
||||
let this = Pin::get_mut(self);
|
||||
let seek_to_pos = match pos {
|
||||
SeekFrom::Start(offset) => offset as i64,
|
||||
SeekFrom::End(offset) => this.index_bytes as i64 + offset,
|
||||
SeekFrom::Current(offset) => this.position as i64 + offset,
|
||||
};
|
||||
if seek_to_pos < 0 {
|
||||
return Err(io_format_err!("cannot seek to negative values"));
|
||||
} else if seek_to_pos > this.index_bytes as i64 {
|
||||
this.position = this.index_bytes;
|
||||
} else {
|
||||
this.position = seek_to_pos as u64;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn poll_complete(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<tokio::io::Result<u64>> {
|
||||
Poll::Ready(Ok(self.position))
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, R> AsyncRead for SeekableCachedChunkReader<I, R>
|
||||
where
|
||||
I: IndexFile + Send + Sync + 'static,
|
||||
R: AsyncReadChunk + Send + Sync + 'static,
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut ReadBuf,
|
||||
) -> Poll<tokio::io::Result<()>> {
|
||||
let this = Pin::get_mut(self);
|
||||
|
||||
let offset = this.position;
|
||||
let wanted = buf.capacity();
|
||||
let reader = Arc::clone(&this.reader);
|
||||
|
||||
let fut = this.read_future.get_or_insert_with(|| {
|
||||
Box::pin(async move {
|
||||
let mut read_buf = vec![0u8; wanted];
|
||||
let read = reader.read_at(&mut read_buf[..wanted], offset).await?;
|
||||
Ok((read_buf, read))
|
||||
})
|
||||
});
|
||||
|
||||
let ret = match ready!(fut.as_mut().poll(cx)) {
|
||||
Ok((read_buf, read)) => {
|
||||
buf.put_slice(&read_buf[..read]);
|
||||
this.position += read as u64;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(io_err_other(err)),
|
||||
};
|
||||
|
||||
// future completed, drop
|
||||
this.read_future = None;
|
||||
|
||||
Poll::Ready(ret)
|
||||
}
|
||||
}
|
@ -19,9 +19,10 @@ use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
use pxar::{EntryKind, Metadata};
|
||||
|
||||
use crate::backup::catalog::{self, DirEntryAttribute};
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||
use crate::pxar::Flags;
|
||||
use crate::tools::runtime::block_in_place;
|
||||
use crate::tools::ControlFlow;
|
||||
|
||||
type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
|
||||
|
||||
@ -998,11 +999,6 @@ impl Shell {
|
||||
}
|
||||
}
|
||||
|
||||
enum LoopState {
|
||||
Break,
|
||||
Continue,
|
||||
}
|
||||
|
||||
struct ExtractorState<'a> {
|
||||
path: Vec<u8>,
|
||||
path_len: usize,
|
||||
@ -1060,8 +1056,8 @@ impl<'a> ExtractorState<'a> {
|
||||
let entry = match self.read_dir.next() {
|
||||
Some(entry) => entry,
|
||||
None => match self.handle_end_of_directory()? {
|
||||
LoopState::Break => break, // done with root directory
|
||||
LoopState::Continue => continue,
|
||||
ControlFlow::Break(()) => break, // done with root directory
|
||||
ControlFlow::Continue(()) => continue,
|
||||
},
|
||||
};
|
||||
|
||||
@ -1079,11 +1075,11 @@ impl<'a> ExtractorState<'a> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_end_of_directory(&mut self) -> Result<LoopState, Error> {
|
||||
fn handle_end_of_directory(&mut self) -> Result<ControlFlow<()>, Error> {
|
||||
// go up a directory:
|
||||
self.read_dir = match self.read_dir_stack.pop() {
|
||||
Some(r) => r,
|
||||
None => return Ok(LoopState::Break), // out of root directory
|
||||
None => return Ok(ControlFlow::Break(())), // out of root directory
|
||||
};
|
||||
|
||||
self.matches = self
|
||||
@ -1102,7 +1098,7 @@ impl<'a> ExtractorState<'a> {
|
||||
|
||||
self.extractor.leave_directory()?;
|
||||
|
||||
Ok(LoopState::Continue)
|
||||
Ok(ControlFlow::CONTINUE)
|
||||
}
|
||||
|
||||
async fn handle_new_directory(
|
||||
|
@ -7,6 +7,7 @@ use std::os::unix::io::AsRawFd;
|
||||
|
||||
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
||||
|
||||
use crate::task_log;
|
||||
use crate::tools;
|
||||
use crate::api2::types::GarbageCollectionStatus;
|
||||
|
||||
@ -61,7 +62,7 @@ impl ChunkStore {
|
||||
chunk_dir
|
||||
}
|
||||
|
||||
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
|
||||
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result<Self, Error>
|
||||
where
|
||||
P: Into<PathBuf>,
|
||||
{
|
||||
@ -104,7 +105,9 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i*100)/(64*1024);
|
||||
if percentage != last_percentage {
|
||||
// eprintln!("ChunkStore::create {}%", percentage);
|
||||
if let Some(worker) = worker {
|
||||
task_log!(worker, "Chunkstore create: {}%", percentage)
|
||||
}
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -461,7 +464,7 @@ fn test_chunk_store1() {
|
||||
assert!(chunk_store.is_err());
|
||||
|
||||
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
|
||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
|
||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None).unwrap();
|
||||
|
||||
let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
|
||||
|
||||
@ -472,7 +475,7 @@ fn test_chunk_store1() {
|
||||
assert!(exists);
|
||||
|
||||
|
||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
|
||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None);
|
||||
assert!(chunk_store.is_err());
|
||||
|
||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||
|
@ -69,6 +69,18 @@ impl DataStore {
|
||||
Ok(datastore)
|
||||
}
|
||||
|
||||
/// removes all datastores that are not configured anymore
|
||||
pub fn remove_unused_datastores() -> Result<(), Error>{
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||
// removes all elements that are not in the config
|
||||
map.retain(|key, _| {
|
||||
config.sections.contains_key(key)
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||
|
||||
|
@ -233,6 +233,14 @@ impl IndexFile for DynamicIndexReader {
|
||||
})
|
||||
}
|
||||
|
||||
fn index_ctime(&self) -> i64 {
|
||||
self.ctime
|
||||
}
|
||||
|
||||
fn index_size(&self) -> usize {
|
||||
self.size as usize
|
||||
}
|
||||
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||
let end_idx = self.index.len() - 1;
|
||||
let end = self.chunk_end(end_idx);
|
||||
|
@ -193,6 +193,14 @@ impl IndexFile for FixedIndexReader {
|
||||
})
|
||||
}
|
||||
|
||||
fn index_ctime(&self) -> i64 {
|
||||
self.ctime
|
||||
}
|
||||
|
||||
fn index_size(&self) -> usize {
|
||||
self.size as usize
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
|
@ -22,6 +22,8 @@ pub trait IndexFile {
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
fn index_ctime(&self) -> i64;
|
||||
fn index_size(&self) -> usize;
|
||||
|
||||
/// Get the chunk index and the relative offset within it for a byte offset
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||
|
@ -33,10 +33,16 @@ impl StoreProgress {
|
||||
|
||||
impl std::fmt::Display for StoreProgress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let current_group = if self.done_groups < self.total_groups {
|
||||
self.done_groups + 1
|
||||
} else {
|
||||
self.done_groups
|
||||
};
|
||||
|
||||
if self.group_snapshots == 0 {
|
||||
write!(
|
||||
f,
|
||||
"{:.2}% ({} of {} groups)",
|
||||
"{:.2}% ({}/{} groups)",
|
||||
self.percentage() * 100.0,
|
||||
self.done_groups,
|
||||
self.total_groups,
|
||||
@ -44,20 +50,29 @@ impl std::fmt::Display for StoreProgress {
|
||||
} else if self.total_groups == 1 {
|
||||
write!(
|
||||
f,
|
||||
"{:.2}% ({} of {} snapshots)",
|
||||
"{:.2}% ({}/{} snapshots)",
|
||||
self.percentage() * 100.0,
|
||||
self.done_snapshots,
|
||||
self.group_snapshots,
|
||||
)
|
||||
} else if self.done_snapshots == self.group_snapshots {
|
||||
write!(
|
||||
f,
|
||||
"{:.2}% ({}/{} groups)",
|
||||
self.percentage() * 100.0,
|
||||
current_group,
|
||||
self.total_groups,
|
||||
)
|
||||
} else {
|
||||
write!(
|
||||
f,
|
||||
"{:.2}% ({} of {} groups, {} of {} group snapshots)",
|
||||
"{:.2}% ({}/{} groups, {}/{} snapshots in group #{})",
|
||||
self.percentage() * 100.0,
|
||||
self.done_groups,
|
||||
self.total_groups,
|
||||
self.done_snapshots,
|
||||
self.group_snapshots,
|
||||
current_group,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -39,6 +39,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::tools::create_run_dir()?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
proxmox_backup::server::jobstate::create_jobstate_dir()?;
|
||||
proxmox_backup::tape::create_tape_status_dir()?;
|
||||
|
@ -1266,13 +1266,12 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
let group = tools::required_string_param(¶m, "group")?;
|
||||
let group: BackupGroup = group.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let quiet = param["quiet"].as_bool().unwrap_or(false);
|
||||
|
||||
param.as_object_mut().unwrap().remove("repository");
|
||||
param.as_object_mut().unwrap().remove("group");
|
||||
param.as_object_mut().unwrap().remove("output-format");
|
||||
param.as_object_mut().unwrap().remove("quiet");
|
||||
|
||||
param["backup-type"] = group.backup_type().into();
|
||||
|
@ -352,9 +352,11 @@ fn main() {
|
||||
.insert("disk", disk_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("node", node_commands())
|
||||
.insert("user", user_commands())
|
||||
.insert("remote", remote_commands())
|
||||
.insert("garbage-collection", garbage_collection_commands())
|
||||
.insert("acme", acme_mgmt_cli())
|
||||
.insert("cert", cert_mgmt_cli())
|
||||
.insert("subscription", subscription_commands())
|
||||
.insert("sync-job", sync_job_commands())
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex, Arc};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
@ -7,9 +7,11 @@ use futures::*;
|
||||
|
||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
use proxmox::sys::linux::socket::set_tcp_keepalive;
|
||||
|
||||
use proxmox_backup::{
|
||||
backup::DataStore,
|
||||
@ -37,6 +39,7 @@ use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::{
|
||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||
daemon,
|
||||
disks::{
|
||||
DiskManage,
|
||||
@ -44,10 +47,6 @@ use proxmox_backup::tools::{
|
||||
get_pool_from_dataset,
|
||||
},
|
||||
logrotate::LogRotate,
|
||||
socket::{
|
||||
set_tcp_keepalive,
|
||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||
},
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::pull::do_sync_job;
|
||||
@ -113,21 +112,44 @@ async fn run() -> Result<(), Error> {
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||
let key_path = configdir!("/proxy.key");
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
|
||||
let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
|
||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||
acceptor.set_certificate_chain_file(cert_path)
|
||||
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
||||
acceptor.check_private_key().unwrap();
|
||||
// we build the initial acceptor here as we cannot start if this fails
|
||||
let acceptor = make_tls_acceptor()?;
|
||||
let acceptor = Arc::new(Mutex::new(acceptor));
|
||||
|
||||
let acceptor = Arc::new(acceptor.build());
|
||||
// to renew the acceptor we just add a command-socket handler
|
||||
commando_sock.register_command(
|
||||
"reload-certificate".to_string(),
|
||||
{
|
||||
let acceptor = Arc::clone(&acceptor);
|
||||
move |_value| -> Result<_, Error> {
|
||||
log::info!("reloading certificate");
|
||||
match make_tls_acceptor() {
|
||||
Err(err) => log::error!("error reloading certificate: {}", err),
|
||||
Ok(new_acceptor) => {
|
||||
let mut guard = acceptor.lock().unwrap();
|
||||
*guard = new_acceptor;
|
||||
}
|
||||
}
|
||||
Ok(Value::Null)
|
||||
}
|
||||
},
|
||||
)?;
|
||||
|
||||
// to remove references for not configured datastores
|
||||
commando_sock.register_command(
|
||||
"datastore-removed".to_string(),
|
||||
|_value| {
|
||||
if let Err(err) = proxmox_backup::backup::DataStore::remove_unused_datastores() {
|
||||
log::error!("could not refresh datastores: {}", err);
|
||||
}
|
||||
Ok(Value::Null)
|
||||
}
|
||||
)?;
|
||||
|
||||
let server = daemon::create_daemon(
|
||||
([0,0,0,0,0,0,0,0], 8007).into(),
|
||||
|listener, ready| {
|
||||
move |listener, ready| {
|
||||
|
||||
let connections = accept_connections(listener, acceptor, debug);
|
||||
let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections));
|
||||
@ -170,36 +192,70 @@ async fn run() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_tls_acceptor() -> Result<SslAcceptor, Error> {
|
||||
let key_path = configdir!("/proxy.key");
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
|
||||
let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
|
||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||
acceptor.set_certificate_chain_file(cert_path)
|
||||
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
||||
acceptor.check_private_key().unwrap();
|
||||
|
||||
Ok(acceptor.build())
|
||||
}
|
||||
|
||||
type ClientStreamResult =
|
||||
Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>;
|
||||
const MAX_PENDING_ACCEPTS: usize = 1024;
|
||||
|
||||
fn accept_connections(
|
||||
listener: tokio::net::TcpListener,
|
||||
acceptor: Arc<openssl::ssl::SslAcceptor>,
|
||||
acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>,
|
||||
debug: bool,
|
||||
) -> tokio::sync::mpsc::Receiver<Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>> {
|
||||
|
||||
const MAX_PENDING_ACCEPTS: usize = 1024;
|
||||
) -> tokio::sync::mpsc::Receiver<ClientStreamResult> {
|
||||
|
||||
let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS);
|
||||
|
||||
tokio::spawn(accept_connection(listener, acceptor, debug, sender));
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
async fn accept_connection(
|
||||
listener: tokio::net::TcpListener,
|
||||
acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>,
|
||||
debug: bool,
|
||||
sender: tokio::sync::mpsc::Sender<ClientStreamResult>,
|
||||
) {
|
||||
let accept_counter = Arc::new(());
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
let (sock, _addr) = match listener.accept().await {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
eprintln!("error accepting tcp connection: {}", err);
|
||||
continue;
|
||||
}
|
||||
Ok((sock, _addr)) => {
|
||||
};
|
||||
|
||||
sock.set_nodelay(true).unwrap();
|
||||
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
let acceptor = Arc::clone(&acceptor);
|
||||
|
||||
let ssl = match openssl::ssl::Ssl::new(acceptor.context()) {
|
||||
let ssl = { // limit acceptor_guard scope
|
||||
// Acceptor can be reloaded using the command socket "reload-certificate" command
|
||||
let acceptor_guard = acceptor.lock().unwrap();
|
||||
|
||||
match openssl::ssl::Ssl::new(acceptor_guard.context()) {
|
||||
Ok(ssl) => ssl,
|
||||
Err(err) => {
|
||||
eprintln!("failed to create Ssl object from Acceptor context - {}", err);
|
||||
continue;
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
let stream = match tokio_openssl::SslStream::new(ssl, sock) {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
@ -216,7 +272,7 @@ fn accept_connections(
|
||||
continue;
|
||||
}
|
||||
|
||||
let accept_counter = accept_counter.clone();
|
||||
let accept_counter = Arc::clone(&accept_counter);
|
||||
tokio::spawn(async move {
|
||||
let accept_future = tokio::time::timeout(
|
||||
Duration::new(10, 0), stream.as_mut().accept());
|
||||
@ -245,11 +301,6 @@ fn accept_connections(
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
fn start_stat_generator() {
|
||||
let abort_future = server::shutdown_future();
|
||||
@ -593,7 +644,7 @@ async fn schedule_tape_backup_jobs() {
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) {
|
||||
eprintln!("unable to start tape bvackup job {} - {}", &job_id, err);
|
||||
eprintln!("unable to start tape backup job {} - {}", &job_id, err);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -686,15 +737,11 @@ async fn command_reopen_logfiles() -> Result<(), Error> {
|
||||
// only care about the most recent daemon instance for each, proxy & api, as other older ones
|
||||
// should not respond to new requests anyway, but only finish their current one and then exit.
|
||||
let sock = server::our_ctrl_sock();
|
||||
let f1 = server::send_command(sock, serde_json::json!({
|
||||
"command": "api-access-log-reopen",
|
||||
}));
|
||||
let f1 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
|
||||
|
||||
let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
|
||||
let sock = server::ctrl_sock_from_pid(pid);
|
||||
let f2 = server::send_command(sock, serde_json::json!({
|
||||
"command": "api-access-log-reopen",
|
||||
}));
|
||||
let f2 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
|
||||
|
||||
match futures::join!(f1, f2) {
|
||||
(Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
|
||||
|
@ -50,13 +50,41 @@ async fn do_update(
|
||||
};
|
||||
wait_for_local_worker(upid.as_str().unwrap()).await?;
|
||||
|
||||
// TODO: certificate checks/renewal/... ?
|
||||
match check_acme_certificates(rpcenv).await {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
eprintln!("error checking certificates: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: cleanup tasks like in PVE?
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
async fn check_acme_certificates(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let (config, _) = proxmox_backup::config::node::config()?;
|
||||
|
||||
// do we even have any acme domains configures?
|
||||
if config.acme_domains().next().is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !api2::node::certificates::cert_expires_soon()? {
|
||||
println!("Certificate does not expire within the next 30 days, not renewing.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let info = &api2::node::certificates::API_METHOD_RENEW_ACME_CERT;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(json!({}), info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
|
@ -47,7 +47,8 @@ enum ExtractPath {
|
||||
|
||||
fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
|
||||
let mut bytes = if base64 {
|
||||
base64::decode(path)?
|
||||
base64::decode(&path)
|
||||
.map_err(|err| format_err!("Failed base64-decoding path '{}' - {}", path, err))?
|
||||
} else {
|
||||
path.into_bytes()
|
||||
};
|
||||
@ -193,7 +194,7 @@ async fn list(
|
||||
} else {
|
||||
None
|
||||
};
|
||||
entries.push(ArchiveEntry::new(path.as_bytes(), attr));
|
||||
entries.push(ArchiveEntry::new_with_size(path.as_bytes(), attr, Some(file.size)));
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
|
@ -34,6 +34,7 @@ use proxmox_backup::{
|
||||
MEDIA_LABEL_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA,
|
||||
Userid,
|
||||
TAPE_RESTORE_SNAPSHOT_SCHEMA,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
@ -51,6 +52,7 @@ use proxmox_backup::{
|
||||
},
|
||||
complete_media_label_text,
|
||||
complete_media_set_uuid,
|
||||
complete_media_set_snapshots,
|
||||
file_formats::{
|
||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||
MediaContentHeader,
|
||||
@ -119,7 +121,7 @@ pub fn extract_drive_name(
|
||||
/// Format media
|
||||
async fn format_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -152,7 +154,7 @@ async fn format_media(mut param: Value) -> Result<(), Error> {
|
||||
/// Rewind tape
|
||||
async fn rewind(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -185,7 +187,7 @@ async fn rewind(mut param: Value) -> Result<(), Error> {
|
||||
/// Eject/Unload drive media
|
||||
async fn eject_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -221,7 +223,7 @@ async fn eject_media(mut param: Value) -> Result<(), Error> {
|
||||
/// Load media with specified label
|
||||
async fn load_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -318,7 +320,7 @@ async fn load_media_from_slot(mut param: Value) -> Result<(), Error> {
|
||||
/// Unload media via changer
|
||||
async fn unload_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -358,7 +360,7 @@ async fn unload_media(mut param: Value) -> Result<(), Error> {
|
||||
/// Label media
|
||||
async fn label_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -396,7 +398,7 @@ async fn label_media(mut param: Value) -> Result<(), Error> {
|
||||
/// Read media label
|
||||
async fn read_label(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -456,7 +458,7 @@ async fn inventory(
|
||||
mut param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
let drive = extract_drive_name(&mut param, &config)?;
|
||||
@ -514,7 +516,7 @@ async fn inventory(
|
||||
/// Label media with barcodes from changer device
|
||||
async fn barcode_label_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -653,7 +655,7 @@ fn debug_scan(mut param: Value) -> Result<(), Error> {
|
||||
/// Read Cartridge Memory (Medium auxiliary memory attributes)
|
||||
async fn cartridge_memory(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -694,7 +696,7 @@ async fn cartridge_memory(mut param: Value) -> Result<(), Error> {
|
||||
/// Read Volume Statistics (SCSI log page 17h)
|
||||
async fn volume_statistics(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -732,7 +734,7 @@ async fn volume_statistics(mut param: Value) -> Result<(), Error> {
|
||||
/// Get drive/media status
|
||||
async fn status(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -792,7 +794,7 @@ async fn status(mut param: Value) -> Result<(), Error> {
|
||||
/// Clean drive
|
||||
async fn clean_drive(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -853,7 +855,7 @@ async fn clean_drive(mut param: Value) -> Result<(), Error> {
|
||||
/// Backup datastore to tape media pool
|
||||
async fn backup(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -886,6 +888,14 @@ async fn backup(mut param: Value) -> Result<(), Error> {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
"snapshots": {
|
||||
description: "List of snapshots.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
schema: TAPE_RESTORE_SNAPSHOT_SCHEMA,
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Authid,
|
||||
optional: true,
|
||||
@ -900,7 +910,7 @@ async fn backup(mut param: Value) -> Result<(), Error> {
|
||||
/// Restore data from media-set
|
||||
async fn restore(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -947,7 +957,7 @@ async fn restore(mut param: Value) -> Result<(), Error> {
|
||||
/// Scan media and record content
|
||||
async fn catalog_media(mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
let output_format = extract_output_format(&mut param);
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
@ -977,9 +987,10 @@ fn main() {
|
||||
.insert(
|
||||
"restore",
|
||||
CliCommand::new(&API_METHOD_RESTORE)
|
||||
.arg_param(&["media-set", "store"])
|
||||
.arg_param(&["media-set", "store", "snapshots"])
|
||||
.completion_cb("store", complete_datastore_name)
|
||||
.completion_cb("media-set", complete_media_set_uuid)
|
||||
.completion_cb("snapshots", complete_media_set_snapshots)
|
||||
)
|
||||
.insert(
|
||||
"barcode-label",
|
||||
|
@ -25,7 +25,7 @@ use proxmox_backup::backup::{
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
AsyncIndexReader,
|
||||
CachedChunkReader,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -281,7 +281,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||
let size = index.index_bytes();
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new());
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
|
||||
|
||||
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
|
||||
let name_escaped = tools::systemd::escape_unit(name, false);
|
||||
|
416
src/bin/proxmox_backup_manager/acme.rs
Normal file
416
src/bin/proxmox_backup_manager/acme.rs
Normal file
@ -0,0 +1,416 @@
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, ApiHandler, RpcEnvironment};
|
||||
use proxmox::tools::fs::file_get_contents;
|
||||
|
||||
use proxmox_backup::acme::AcmeClient;
|
||||
use proxmox_backup::api2;
|
||||
use proxmox_backup::api2::types::AcmeAccountName;
|
||||
use proxmox_backup::config::acme::plugin::DnsPluginCoreUpdater;
|
||||
use proxmox_backup::config::acme::KNOWN_ACME_DIRECTORIES;
|
||||
|
||||
pub fn acme_mgmt_cli() -> CommandLineInterface {
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("account", account_cli())
|
||||
.insert("cert", cert_cli())
|
||||
.insert("plugin", plugin_cli());
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List acme accounts.
|
||||
fn list_accounts(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::acme::API_METHOD_LIST_ACCOUNTS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show acme account information.
|
||||
async fn get_account(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::acme::API_METHOD_GET_ACCOUNT;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(
|
||||
ColumnConfig::new("account")
|
||||
.renderer(|value, _record| Ok(serde_json::to_string_pretty(value)?)),
|
||||
)
|
||||
.column(ColumnConfig::new("directory"))
|
||||
.column(ColumnConfig::new("location"))
|
||||
.column(ColumnConfig::new("tos"));
|
||||
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
contact: {
|
||||
description: "List of email addresses.",
|
||||
},
|
||||
directory: {
|
||||
type: String,
|
||||
description: "The ACME Directory.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Register an ACME account.
|
||||
async fn register_account(
|
||||
name: AcmeAccountName,
|
||||
contact: String,
|
||||
directory: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let directory = match directory {
|
||||
Some(directory) => directory,
|
||||
None => {
|
||||
println!("Directory endpoints:");
|
||||
for (i, dir) in KNOWN_ACME_DIRECTORIES.iter().enumerate() {
|
||||
println!("{}) {}", i, dir.url);
|
||||
}
|
||||
|
||||
println!("{}) Custom", KNOWN_ACME_DIRECTORIES.len());
|
||||
let mut attempt = 0;
|
||||
loop {
|
||||
print!("Enter selection: ");
|
||||
std::io::stdout().flush()?;
|
||||
|
||||
let mut input = String::new();
|
||||
std::io::stdin().read_line(&mut input)?;
|
||||
|
||||
match input.trim().parse::<usize>() {
|
||||
Ok(n) if n < KNOWN_ACME_DIRECTORIES.len() => {
|
||||
break KNOWN_ACME_DIRECTORIES[n].url.to_owned();
|
||||
}
|
||||
Ok(n) if n == KNOWN_ACME_DIRECTORIES.len() => {
|
||||
input.clear();
|
||||
std::io::stdin().read_line(&mut input)?;
|
||||
break input.trim().to_owned();
|
||||
}
|
||||
_ => eprintln!("Invalid selection."),
|
||||
}
|
||||
|
||||
attempt += 1;
|
||||
if attempt >= 3 {
|
||||
bail!("Aborting.");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
println!("Attempting to fetch Terms of Service from {:?}", directory);
|
||||
let mut client = AcmeClient::new(directory.clone());
|
||||
let tos_agreed = if let Some(tos_url) = client.terms_of_service_url().await? {
|
||||
println!("Terms of Service: {}", tos_url);
|
||||
print!("Do you agree to the above terms? [y|N]: ");
|
||||
std::io::stdout().flush()?;
|
||||
let mut input = String::new();
|
||||
std::io::stdin().read_line(&mut input)?;
|
||||
if input.trim().eq_ignore_ascii_case("y") {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
println!("Attempting to register account with {:?}...", directory);
|
||||
|
||||
let account =
|
||||
api2::config::acme::do_register_account(&mut client, &name, tos_agreed, contact, None)
|
||||
.await?;
|
||||
|
||||
println!("Registration successful, account URL: {}", account.location);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
contact: {
|
||||
description: "List of email addresses.",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Update an ACME account.
|
||||
async fn update_account(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let info = &api2::config::acme::API_METHOD_UPDATE_ACCOUNT;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
force: {
|
||||
description:
|
||||
"Delete account data even if the server refuses to deactivate the account.",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Deactivate an ACME account.
|
||||
async fn deactivate_account(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let info = &api2::config::acme::API_METHOD_DEACTIVATE_ACCOUNT;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn account_cli() -> CommandLineInterface {
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_ACCOUNTS))
|
||||
.insert(
|
||||
"register",
|
||||
CliCommand::new(&API_METHOD_REGISTER_ACCOUNT).arg_param(&["name", "contact"]),
|
||||
)
|
||||
.insert(
|
||||
"deactivate",
|
||||
CliCommand::new(&API_METHOD_DEACTIVATE_ACCOUNT)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", crate::config::acme::complete_acme_account),
|
||||
)
|
||||
.insert(
|
||||
"info",
|
||||
CliCommand::new(&API_METHOD_GET_ACCOUNT)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", crate::config::acme::complete_acme_account),
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&API_METHOD_UPDATE_ACCOUNT)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", crate::config::acme::complete_acme_account),
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List acme plugins.
|
||||
fn list_plugins(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::acme::API_METHOD_LIST_PLUGINS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
type: String,
|
||||
description: "Plugin ID",
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show acme account information.
|
||||
fn get_plugin(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::acme::API_METHOD_GET_PLUGIN;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
type: {
|
||||
type: String,
|
||||
description: "The ACME challenge plugin type.",
|
||||
},
|
||||
core: {
|
||||
type: DnsPluginCoreUpdater,
|
||||
flatten: true,
|
||||
},
|
||||
data: {
|
||||
type: String,
|
||||
description: "File containing the plugin data.",
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show acme account information.
|
||||
fn add_plugin(r#type: String, core: DnsPluginCoreUpdater, data: String) -> Result<(), Error> {
|
||||
let data = base64::encode(&file_get_contents(&data)?);
|
||||
api2::config::acme::add_plugin(r#type, core, data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn plugin_cli() -> CommandLineInterface {
|
||||
use proxmox_backup::api2::config::acme;
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_PLUGINS))
|
||||
.insert(
|
||||
"config", // name comes from pve/pmg
|
||||
CliCommand::new(&API_METHOD_GET_PLUGIN)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", crate::config::acme::complete_acme_plugin),
|
||||
)
|
||||
.insert(
|
||||
"add",
|
||||
CliCommand::new(&API_METHOD_ADD_PLUGIN)
|
||||
.arg_param(&["type", "id"])
|
||||
.completion_cb("api", crate::config::acme::complete_acme_api_challenge_type)
|
||||
.completion_cb("type", crate::config::acme::complete_acme_plugin_type),
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&acme::API_METHOD_DELETE_PLUGIN)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", crate::config::acme::complete_acme_plugin),
|
||||
)
|
||||
.insert(
|
||||
"set",
|
||||
CliCommand::new(&acme::API_METHOD_UPDATE_PLUGIN)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", crate::config::acme::complete_acme_plugin),
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
force: {
|
||||
description: "Force renewal even if the certificate does not expire soon.",
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Order a new ACME certificate.
|
||||
async fn order_acme_cert(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
if !param["force"].as_bool().unwrap_or(false) && !api2::node::certificates::cert_expires_soon()?
|
||||
{
|
||||
println!("Certificate does not expire within the next 30 days, not renewing.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let info = &api2::node::certificates::API_METHOD_RENEW_ACME_CERT;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Order a new ACME certificate.
|
||||
async fn revoke_acme_cert(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
||||
let info = &api2::node::certificates::API_METHOD_REVOKE_ACME_CERT;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cert_cli() -> CommandLineInterface {
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("order", CliCommand::new(&API_METHOD_ORDER_ACME_CERT))
|
||||
.insert("revoke", CliCommand::new(&API_METHOD_REVOKE_ACME_CERT));
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
mod acl;
|
||||
pub use acl::*;
|
||||
mod acme;
|
||||
pub use acme::*;
|
||||
mod cert;
|
||||
pub use cert::*;
|
||||
mod datastore;
|
||||
@ -20,3 +22,5 @@ mod subscription;
|
||||
pub use subscription::*;
|
||||
mod disk;
|
||||
pub use disk::*;
|
||||
mod node;
|
||||
pub use node::*;
|
||||
|
47
src/bin/proxmox_backup_manager/node.rs
Normal file
47
src/bin/proxmox_backup_manager/node.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use proxmox::api::{api, cli::*, ApiHandler, RpcEnvironment};
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show node configuration
|
||||
fn get_node_config(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::node::config::API_METHOD_GET_NODE_CONFIG;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn node_commands() -> CommandLineInterface {
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert(
|
||||
"show",
|
||||
CliCommand::new(&API_METHOD_GET_NODE_CONFIG),
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::node::config::API_METHOD_UPDATE_NODE_CONFIG)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -98,6 +98,7 @@ async fn cleanup_map(map: &mut HashMap<String, VMState>) -> bool {
|
||||
"VM '{}' (pid: {}, cid: {}) was not reachable, removing from map",
|
||||
name, state.pid, state.cid
|
||||
);
|
||||
let _ = super::qemu_helper::try_kill_vm(state.pid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,6 +132,7 @@ async fn ensure_running(details: &SnapRestoreDetails) -> Result<VsockClient, Err
|
||||
Err(err) => {
|
||||
eprintln!("stale VM detected, restarting ({})", err);
|
||||
// VM is dead, restart
|
||||
let _ = super::qemu_helper::try_kill_vm(vm.pid);
|
||||
let vms = start_vm(vm.cid, details).await?;
|
||||
new_cid = vms.cid;
|
||||
state.map.insert(name, vms.clone());
|
||||
@ -228,6 +230,7 @@ impl BlockRestoreDriver for QemuBlockDriver {
|
||||
.await
|
||||
{
|
||||
eprintln!("reading file extraction stream failed - {}", err);
|
||||
std::process::exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -47,16 +47,20 @@ fn create_restore_log_dir() -> Result<String, Error> {
|
||||
Ok(logpath)
|
||||
}
|
||||
|
||||
fn validate_img_existance() -> Result<(), Error> {
|
||||
fn validate_img_existance(debug: bool) -> Result<(), Error> {
|
||||
let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN);
|
||||
let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN);
|
||||
let initramfs = PathBuf::from(if debug {
|
||||
buildcfg::PROXMOX_BACKUP_INITRAMFS_DBG_FN
|
||||
} else {
|
||||
buildcfg::PROXMOX_BACKUP_INITRAMFS_FN
|
||||
});
|
||||
if !kernel.exists() || !initramfs.exists() {
|
||||
bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
|
||||
bail!("cannot run file-restore VM: package 'proxmox-backup-restore-image' is not (correctly) installed");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn try_kill_vm(pid: i32) -> Result<(), Error> {
|
||||
pub fn try_kill_vm(pid: i32) -> Result<(), Error> {
|
||||
let pid = Pid::from_raw(pid);
|
||||
if let Ok(()) = kill(pid, None) {
|
||||
// process is running (and we could kill it), check if it is actually ours
|
||||
@ -79,7 +83,7 @@ fn try_kill_vm(pid: i32) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_temp_initramfs(ticket: &str) -> Result<(Fd, String), Error> {
|
||||
async fn create_temp_initramfs(ticket: &str, debug: bool) -> Result<(Fd, String), Error> {
|
||||
use std::ffi::CString;
|
||||
use tokio::fs::File;
|
||||
|
||||
@ -88,8 +92,14 @@ async fn create_temp_initramfs(ticket: &str) -> Result<(Fd, String), Error> {
|
||||
nix::unistd::unlink(&tmp_path)?;
|
||||
tools::fd_change_cloexec(tmp_fd.0, false)?;
|
||||
|
||||
let initramfs = if debug {
|
||||
buildcfg::PROXMOX_BACKUP_INITRAMFS_DBG_FN
|
||||
} else {
|
||||
buildcfg::PROXMOX_BACKUP_INITRAMFS_FN
|
||||
};
|
||||
|
||||
let mut f = File::from_std(unsafe { std::fs::File::from_raw_fd(tmp_fd.0) });
|
||||
let mut base = File::open(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN).await?;
|
||||
let mut base = File::open(initramfs).await?;
|
||||
|
||||
tokio::io::copy(&mut base, &mut f).await?;
|
||||
|
||||
@ -122,18 +132,24 @@ pub async fn start_vm(
|
||||
files: impl Iterator<Item = String>,
|
||||
ticket: &str,
|
||||
) -> Result<(i32, i32), Error> {
|
||||
validate_img_existance()?;
|
||||
|
||||
if let Err(_) = std::env::var("PBS_PASSWORD") {
|
||||
bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
|
||||
}
|
||||
|
||||
let debug = if let Ok(val) = std::env::var("PBS_QEMU_DEBUG") {
|
||||
!val.is_empty()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
validate_img_existance(debug)?;
|
||||
|
||||
let pid;
|
||||
let (pid_fd, pid_path) = make_tmp_file("/tmp/file-restore-qemu.pid.tmp", CreateOptions::new())?;
|
||||
nix::unistd::unlink(&pid_path)?;
|
||||
tools::fd_change_cloexec(pid_fd.0, false)?;
|
||||
|
||||
let (_ramfs_pid, ramfs_path) = create_temp_initramfs(ticket).await?;
|
||||
let (_ramfs_pid, ramfs_path) = create_temp_initramfs(ticket, debug).await?;
|
||||
|
||||
let logpath = create_restore_log_dir()?;
|
||||
let logfile = &format!("{}/qemu.log", logpath);
|
||||
@ -167,14 +183,16 @@ pub async fn start_vm(
|
||||
"-vnc",
|
||||
"none",
|
||||
"-enable-kvm",
|
||||
"-m",
|
||||
"512",
|
||||
"-kernel",
|
||||
buildcfg::PROXMOX_BACKUP_KERNEL_FN,
|
||||
"-initrd",
|
||||
&ramfs_path,
|
||||
"-append",
|
||||
"quiet",
|
||||
if debug {
|
||||
"debug panic=1"
|
||||
} else {
|
||||
"quiet panic=1"
|
||||
},
|
||||
"-daemonize",
|
||||
"-pidfile",
|
||||
&format!("/dev/fd/{}", pid_fd.as_raw_fd()),
|
||||
@ -199,17 +217,42 @@ pub async fn start_vm(
|
||||
"file=pbs:repository={},,snapshot={},,archive={}{},read-only=on,if=none,id=drive{}",
|
||||
details.repo, details.snapshot, file, keyfile, id
|
||||
));
|
||||
|
||||
// a PCI bus can only support 32 devices, so add a new one every 32
|
||||
let bus = (id / 32) + 2;
|
||||
if id % 32 == 0 {
|
||||
drives.push("-device".to_owned());
|
||||
drives.push(format!("pci-bridge,id=bridge{},chassis_nr={}", bus, bus));
|
||||
}
|
||||
|
||||
drives.push("-device".to_owned());
|
||||
// drive serial is used by VM to map .fidx files to /dev paths
|
||||
drives.push(format!("virtio-blk-pci,drive=drive{},serial={}", id, file));
|
||||
let serial = file.strip_suffix(".img.fidx").unwrap_or(&file);
|
||||
drives.push(format!(
|
||||
"virtio-blk-pci,drive=drive{},serial={},bus=bridge{}",
|
||||
id, serial, bus
|
||||
));
|
||||
id += 1;
|
||||
}
|
||||
|
||||
let ram = if debug {
|
||||
1024
|
||||
} else {
|
||||
// add more RAM if many drives are given
|
||||
match id {
|
||||
f if f < 10 => 128,
|
||||
f if f < 20 => 192,
|
||||
_ => 256,
|
||||
}
|
||||
};
|
||||
|
||||
// Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64");
|
||||
qemu_cmd.args(base_args.iter());
|
||||
qemu_cmd.arg("-m");
|
||||
qemu_cmd.arg(ram.to_string());
|
||||
qemu_cmd.args(&drives);
|
||||
qemu_cmd.arg("-device");
|
||||
qemu_cmd.arg(format!(
|
||||
@ -217,6 +260,19 @@ pub async fn start_vm(
|
||||
cid
|
||||
));
|
||||
|
||||
if debug {
|
||||
let debug_args = [
|
||||
"-chardev",
|
||||
&format!(
|
||||
"socket,id=debugser,path=/run/proxmox-backup/file-restore-serial-{}.sock,server,nowait",
|
||||
cid
|
||||
),
|
||||
"-serial",
|
||||
"chardev:debugser",
|
||||
];
|
||||
qemu_cmd.args(debug_args.iter());
|
||||
}
|
||||
|
||||
qemu_cmd.stdout(std::process::Stdio::null());
|
||||
qemu_cmd.stderr(std::process::Stdio::piped());
|
||||
|
||||
@ -259,6 +315,12 @@ pub async fn start_vm(
|
||||
if let Ok(Ok(_)) =
|
||||
time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await
|
||||
{
|
||||
if debug {
|
||||
eprintln!(
|
||||
"Connect to '/run/proxmox-backup/file-restore-serial-{}.sock' for shell access",
|
||||
cid
|
||||
)
|
||||
}
|
||||
return Ok((pid, cid as i32));
|
||||
}
|
||||
if kill(pid_t, None).is_err() {
|
||||
|
@ -6,6 +6,7 @@ use hyper::{header, Body, Response, StatusCode};
|
||||
use log::error;
|
||||
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
@ -25,7 +26,7 @@ use proxmox_backup::tools::{self, fs::read_subdir, zip::zip_directory};
|
||||
|
||||
use pxar::encoder::aio::TokioWriter;
|
||||
|
||||
use super::{disk::ResolveResult, watchdog_remaining, watchdog_ping};
|
||||
use super::{disk::ResolveResult, watchdog_remaining, watchdog_inhibit, watchdog_ping};
|
||||
|
||||
// NOTE: All API endpoints must have Permission::Superuser, as the configs for authentication do
|
||||
// not exist within the restore VM. Safety is guaranteed by checking a ticket via a custom ApiAuth.
|
||||
@ -41,6 +42,8 @@ pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
static DOWNLOAD_SEM: Semaphore = Semaphore::const_new(8);
|
||||
|
||||
fn read_uptime() -> Result<f32, Error> {
|
||||
let uptime = fs::read_to_string("/proc/uptime")?;
|
||||
// unwrap the Option, if /proc/uptime is empty we have bigger problems
|
||||
@ -200,11 +203,12 @@ fn list(
|
||||
for c in comps {
|
||||
let mut c_path = path.clone();
|
||||
c_path.push(b'/');
|
||||
c_path.extend(c.as_bytes());
|
||||
res.push(ArchiveEntry::new(
|
||||
c_path.extend(c.0.as_bytes());
|
||||
res.push(ArchiveEntry::new_with_size(
|
||||
&c_path[..],
|
||||
// this marks the beginning of a filesystem, i.e. '/', so this is a Directory
|
||||
Some(&DirEntryAttribute::Directory { start: 0 }),
|
||||
Some(c.1),
|
||||
));
|
||||
}
|
||||
}
|
||||
@ -247,8 +251,16 @@ fn extract(
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
watchdog_ping();
|
||||
// download can take longer than watchdog timeout, inhibit until done
|
||||
let _inhibitor = watchdog_inhibit();
|
||||
async move {
|
||||
let _inhibitor = _inhibitor;
|
||||
|
||||
let _permit = match DOWNLOAD_SEM.try_acquire() {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => bail!("maximum concurrent download limit reached, please wait for another restore to finish before attempting a new one"),
|
||||
};
|
||||
|
||||
let path = tools::required_string_param(¶m, "path")?;
|
||||
let mut path = base64::decode(path)?;
|
||||
if let Some(b'/') = path.last() {
|
||||
@ -278,6 +290,8 @@ fn extract(
|
||||
|
||||
if pxar {
|
||||
tokio::spawn(async move {
|
||||
let _inhibitor = _inhibitor;
|
||||
let _permit = _permit;
|
||||
let result = async move {
|
||||
// pxar always expects a directory as it's root, so to accommodate files as
|
||||
// well we encode the parent dir with a filter only matching the target instead
|
||||
@ -335,6 +349,8 @@ fn extract(
|
||||
});
|
||||
} else {
|
||||
tokio::spawn(async move {
|
||||
let _inhibitor = _inhibitor;
|
||||
let _permit = _permit;
|
||||
let result = async move {
|
||||
if vm_path.is_dir() {
|
||||
zip_directory(&mut writer, &vm_path).await?;
|
||||
|
@ -25,10 +25,14 @@ lazy_static! {
|
||||
m.insert("ext3", "noload");
|
||||
m.insert("ext4", "noload");
|
||||
|
||||
m.insert("xfs", "norecovery");
|
||||
|
||||
// ufs2 is used as default since FreeBSD 5.0 released in 2003, so let's assume that
|
||||
// whatever the user is trying to restore is not using anything older...
|
||||
m.insert("ufs", "ufstype=ufs2");
|
||||
|
||||
m.insert("ntfs", "utf8");
|
||||
|
||||
m
|
||||
};
|
||||
}
|
||||
@ -36,13 +40,14 @@ lazy_static! {
|
||||
pub enum ResolveResult {
|
||||
Path(PathBuf),
|
||||
BucketTypes(Vec<&'static str>),
|
||||
BucketComponents(Vec<String>),
|
||||
BucketComponents(Vec<(String, u64)>),
|
||||
}
|
||||
|
||||
struct PartitionBucketData {
|
||||
dev_node: String,
|
||||
number: i32,
|
||||
mountpoint: Option<PathBuf>,
|
||||
size: u64,
|
||||
}
|
||||
|
||||
/// A "Bucket" represents a mapping found on a disk, e.g. a partition, a zfs dataset or an LV. A
|
||||
@ -57,30 +62,62 @@ struct PartitionBucketData {
|
||||
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
|
||||
enum Bucket {
|
||||
Partition(PartitionBucketData),
|
||||
RawFs(PartitionBucketData),
|
||||
}
|
||||
|
||||
impl Bucket {
|
||||
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
|
||||
haystack: &'a mut Vec<Bucket>,
|
||||
ty: A,
|
||||
comp: B,
|
||||
comp: &[B],
|
||||
) -> Option<&'a mut Bucket> {
|
||||
let ty = ty.as_ref();
|
||||
let comp = comp.as_ref();
|
||||
haystack.iter_mut().find(|b| match b {
|
||||
Bucket::Partition(data) => ty == "part" && comp.parse::<i32>().unwrap() == data.number,
|
||||
Bucket::Partition(data) => {
|
||||
if let Some(comp) = comp.get(0) {
|
||||
ty == "part" && comp.as_ref().parse::<i32>().unwrap() == data.number
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
Bucket::RawFs(_) => ty == "raw",
|
||||
})
|
||||
}
|
||||
|
||||
fn type_string(&self) -> &'static str {
|
||||
match self {
|
||||
Bucket::Partition(_) => "part",
|
||||
Bucket::RawFs(_) => "raw",
|
||||
}
|
||||
}
|
||||
|
||||
fn component_string(&self) -> String {
|
||||
match self {
|
||||
fn component_string(&self, idx: usize) -> Result<String, Error> {
|
||||
let max_depth = Self::component_depth(self.type_string())?;
|
||||
if idx >= max_depth {
|
||||
bail!(
|
||||
"internal error: component index out of range {}/{} ({})",
|
||||
idx,
|
||||
max_depth,
|
||||
self.type_string()
|
||||
);
|
||||
}
|
||||
Ok(match self {
|
||||
Bucket::Partition(data) => data.number.to_string(),
|
||||
Bucket::RawFs(_) => "raw".to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
fn component_depth(type_string: &str) -> Result<usize, Error> {
|
||||
Ok(match type_string {
|
||||
"part" => 1,
|
||||
"raw" => 0,
|
||||
_ => bail!("invalid bucket type for component depth: {}", type_string),
|
||||
})
|
||||
}
|
||||
|
||||
fn size(&self) -> u64 {
|
||||
match self {
|
||||
Bucket::Partition(data) | Bucket::RawFs(data) => data.size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,13 +143,15 @@ impl Filesystems {
|
||||
}
|
||||
}
|
||||
|
||||
info!("Supported FS: {}", supported_fs.join(", "));
|
||||
|
||||
Ok(Self { supported_fs })
|
||||
}
|
||||
|
||||
fn ensure_mounted(&self, bucket: &mut Bucket) -> Result<PathBuf, Error> {
|
||||
match bucket {
|
||||
Bucket::Partition(data) => {
|
||||
// regular data partition à la "/dev/vdxN"
|
||||
Bucket::Partition(data) | Bucket::RawFs(data) => {
|
||||
// regular data partition à la "/dev/vdxN" or FS directly on a disk
|
||||
if let Some(mp) = &data.mountpoint {
|
||||
return Ok(mp.clone());
|
||||
}
|
||||
@ -144,6 +183,7 @@ impl Filesystems {
|
||||
info!("mounting '{}' succeeded, fstype: '{}'", source, fs);
|
||||
return Ok(());
|
||||
}
|
||||
Err(nix::Error::Sys(nix::errno::Errno::EINVAL)) => {}
|
||||
Err(err) => {
|
||||
warn!("mount error on '{}' ({}) - {}", source, fs, err);
|
||||
}
|
||||
@ -162,6 +202,8 @@ pub struct DiskState {
|
||||
impl DiskState {
|
||||
/// Scan all disks for supported buckets.
|
||||
pub fn scan() -> Result<Self, Error> {
|
||||
let filesystems = Filesystems::scan()?;
|
||||
|
||||
// create mapping for virtio drives and .fidx files (via serial description)
|
||||
// note: disks::DiskManager relies on udev, which we don't have
|
||||
let mut disk_map = HashMap::new();
|
||||
@ -188,6 +230,25 @@ impl DiskState {
|
||||
}
|
||||
};
|
||||
|
||||
// attempt to mount device directly
|
||||
let dev_node = format!("/dev/{}", name);
|
||||
let size = Self::make_dev_node(&dev_node, &sys_path)?;
|
||||
let mut dfs_bucket = Bucket::RawFs(PartitionBucketData {
|
||||
dev_node: dev_node.clone(),
|
||||
number: 0,
|
||||
mountpoint: None,
|
||||
size,
|
||||
});
|
||||
if let Ok(_) = filesystems.ensure_mounted(&mut dfs_bucket) {
|
||||
// mount succeeded, add bucket and skip any other checks for the disk
|
||||
info!(
|
||||
"drive '{}' ('{}', '{}') contains fs directly ({}B)",
|
||||
name, fidx, dev_node, size
|
||||
);
|
||||
disk_map.insert(fidx, vec![dfs_bucket]);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut parts = Vec::new();
|
||||
for entry in proxmox_backup::tools::fs::scan_subdir(
|
||||
libc::AT_FDCWD,
|
||||
@ -197,37 +258,36 @@ impl DiskState {
|
||||
.filter_map(Result::ok)
|
||||
{
|
||||
let part_name = unsafe { entry.file_name_utf8_unchecked() };
|
||||
let devnode = format!("/dev/{}", part_name);
|
||||
let dev_node = format!("/dev/{}", part_name);
|
||||
let part_path = format!("/sys/block/{}/{}", name, part_name);
|
||||
|
||||
// create partition device node for further use
|
||||
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", part_path))?;
|
||||
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
|
||||
Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
|
||||
let size = Self::make_dev_node(&dev_node, &part_path)?;
|
||||
|
||||
let number = fs::file_read_firstline(&format!("{}/partition", part_path))?
|
||||
.trim()
|
||||
.parse::<i32>()?;
|
||||
|
||||
info!(
|
||||
"drive '{}' ('{}'): found partition '{}' ({})",
|
||||
name, fidx, devnode, number
|
||||
"drive '{}' ('{}'): found partition '{}' ({}, {}B)",
|
||||
name, fidx, dev_node, number, size
|
||||
);
|
||||
|
||||
let bucket = Bucket::Partition(PartitionBucketData {
|
||||
dev_node: devnode,
|
||||
dev_node,
|
||||
mountpoint: None,
|
||||
number,
|
||||
size,
|
||||
});
|
||||
|
||||
parts.push(bucket);
|
||||
}
|
||||
|
||||
disk_map.insert(fidx.to_owned(), parts);
|
||||
disk_map.insert(fidx, parts);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
filesystems: Filesystems::scan()?,
|
||||
filesystems,
|
||||
disk_map,
|
||||
})
|
||||
}
|
||||
@ -253,7 +313,11 @@ impl DiskState {
|
||||
_ => bail!("no or invalid image in path"),
|
||||
};
|
||||
|
||||
let buckets = match self.disk_map.get_mut(req_fidx.as_ref()) {
|
||||
let buckets = match self.disk_map.get_mut(
|
||||
req_fidx
|
||||
.strip_suffix(".img.fidx")
|
||||
.unwrap_or_else(|| req_fidx.as_ref()),
|
||||
) {
|
||||
Some(x) => x,
|
||||
None => bail!("given image '{}' not found", req_fidx),
|
||||
};
|
||||
@ -273,27 +337,41 @@ impl DiskState {
|
||||
}
|
||||
};
|
||||
|
||||
let mut components = Vec::new();
|
||||
let component_count = Bucket::component_depth(&bucket_type)?;
|
||||
|
||||
while components.len() < component_count {
|
||||
let component = match cmp.next() {
|
||||
Some(Component::Normal(x)) => x.to_string_lossy(),
|
||||
Some(c) => bail!("invalid bucket component in path: {:?}", c),
|
||||
None => {
|
||||
// list bucket components available
|
||||
// list bucket components available at this level
|
||||
let comps = buckets
|
||||
.iter()
|
||||
.filter(|b| b.type_string() == bucket_type)
|
||||
.map(Bucket::component_string)
|
||||
.filter_map(|b| {
|
||||
if b.type_string() != bucket_type {
|
||||
return None;
|
||||
}
|
||||
match b.component_string(components.len()) {
|
||||
Ok(cs) => Some((cs.to_owned(), b.size())),
|
||||
Err(_) => None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
return Ok(ResolveResult::BucketComponents(comps));
|
||||
}
|
||||
};
|
||||
|
||||
let mut bucket = match Bucket::filter_mut(buckets, &bucket_type, &component) {
|
||||
components.push(component);
|
||||
}
|
||||
|
||||
let mut bucket = match Bucket::filter_mut(buckets, &bucket_type, &components) {
|
||||
Some(bucket) => bucket,
|
||||
None => bail!(
|
||||
"bucket/component path not found: {}/{}/{}",
|
||||
"bucket/component path not found: {}/{}/{:?}",
|
||||
req_fidx,
|
||||
bucket_type,
|
||||
component
|
||||
components
|
||||
),
|
||||
};
|
||||
|
||||
@ -303,10 +381,10 @@ impl DiskState {
|
||||
.ensure_mounted(&mut bucket)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"mounting '{}/{}/{}' failed: {}",
|
||||
"mounting '{}/{}/{:?}' failed: {}",
|
||||
req_fidx,
|
||||
bucket_type,
|
||||
component,
|
||||
components,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
@ -320,6 +398,21 @@ impl DiskState {
|
||||
Ok(ResolveResult::Path(local_path))
|
||||
}
|
||||
|
||||
fn make_dev_node(devnode: &str, sys_path: &str) -> Result<u64, Error> {
|
||||
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", sys_path))?;
|
||||
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
|
||||
Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
|
||||
|
||||
// this *always* contains the number of 512-byte sectors, regardless of the true
|
||||
// blocksize of this disk - which should always be 512 here anyway
|
||||
let size = fs::file_read_firstline(&format!("{}/size", sys_path))?
|
||||
.trim()
|
||||
.parse::<u64>()?
|
||||
* 512;
|
||||
|
||||
Ok(size)
|
||||
}
|
||||
|
||||
fn mknod_blk(path: &str, maj: u64, min: u64) -> Result<(), Error> {
|
||||
use nix::sys::stat;
|
||||
let dev = stat::makedev(maj, min);
|
||||
|
@ -4,6 +4,9 @@ use proxmox::tools::time::epoch_i64;
|
||||
|
||||
const TIMEOUT: i64 = 600; // seconds
|
||||
static TRIGGERED: AtomicI64 = AtomicI64::new(0);
|
||||
static INHIBITORS: AtomicI64 = AtomicI64::new(0);
|
||||
|
||||
pub struct WatchdogInhibitor {}
|
||||
|
||||
fn handle_expired() -> ! {
|
||||
use nix::sys::reboot;
|
||||
@ -37,5 +40,24 @@ pub fn watchdog_ping() {
|
||||
|
||||
/// Returns the remaining time before watchdog expiry in seconds
|
||||
pub fn watchdog_remaining() -> i64 {
|
||||
if INHIBITORS.load(Ordering::Acquire) > 0 {
|
||||
TIMEOUT
|
||||
} else {
|
||||
TIMEOUT - (epoch_i64() - TRIGGERED.load(Ordering::Acquire))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an object that inhibts watchdog expiry for its lifetime, it will issue a ping on Drop
|
||||
pub fn watchdog_inhibit() -> WatchdogInhibitor {
|
||||
let prev = INHIBITORS.fetch_add(1, Ordering::AcqRel);
|
||||
log::info!("Inhibit added: {}", prev + 1);
|
||||
WatchdogInhibitor {}
|
||||
}
|
||||
|
||||
impl Drop for WatchdogInhibitor {
|
||||
fn drop(&mut self) {
|
||||
watchdog_ping();
|
||||
let prev = INHIBITORS.fetch_sub(1, Ordering::AcqRel);
|
||||
log::info!("Inhibit dropped: {}", prev - 1);
|
||||
}
|
||||
}
|
||||
|
@ -43,6 +43,10 @@ pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(),
|
||||
pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
|
||||
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
|
||||
|
||||
/// filename of the cached initramfs to use for debugging single file restore
|
||||
pub const PROXMOX_BACKUP_INITRAMFS_DBG_FN: &str =
|
||||
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs-debug.img");
|
||||
|
||||
/// filename of the kernel to use for booting single file restore VMs
|
||||
pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
||||
concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
|
||||
|
@ -20,13 +20,16 @@ use proxmox::{
|
||||
tools::fs::{file_get_json, replace_file, CreateOptions},
|
||||
};
|
||||
|
||||
use proxmox_http::client::HttpsConnector;
|
||||
use proxmox_http::uri::build_authority;
|
||||
|
||||
use super::pipe_to_stream::PipeToSendStream;
|
||||
use crate::api2::types::{Authid, Userid};
|
||||
use crate::tools::{
|
||||
self,
|
||||
BroadcastFuture,
|
||||
DEFAULT_ENCODE_SET,
|
||||
http::HttpsConnector,
|
||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
||||
};
|
||||
|
||||
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
||||
@ -273,6 +276,18 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
||||
}
|
||||
}
|
||||
|
||||
fn build_uri(server: &str, port: u16, path: &str, query: Option<String>) -> Result<Uri, Error> {
|
||||
Uri::builder()
|
||||
.scheme("https")
|
||||
.authority(build_authority(server, port)?)
|
||||
.path_and_query(match query {
|
||||
Some(query) => format!("/{}?{}", path, query),
|
||||
None => format!("/{}", path),
|
||||
})
|
||||
.build()
|
||||
.map_err(|err| format_err!("error building uri - {}", err))
|
||||
}
|
||||
|
||||
impl HttpClient {
|
||||
pub fn new(
|
||||
server: &str,
|
||||
@ -283,13 +298,13 @@ impl HttpClient {
|
||||
|
||||
let verified_fingerprint = Arc::new(Mutex::new(None));
|
||||
|
||||
let mut fingerprint = options.fingerprint.take();
|
||||
let mut expected_fingerprint = options.fingerprint.take();
|
||||
|
||||
if fingerprint.is_some() {
|
||||
if expected_fingerprint.is_some() {
|
||||
// do not store fingerprints passed via options in cache
|
||||
options.fingerprint_cache = false;
|
||||
} else if options.fingerprint_cache && options.prefix.is_some() {
|
||||
fingerprint = load_fingerprint(options.prefix.as_ref().unwrap(), server);
|
||||
expected_fingerprint = load_fingerprint(options.prefix.as_ref().unwrap(), server);
|
||||
}
|
||||
|
||||
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||
@ -301,9 +316,9 @@ impl HttpClient {
|
||||
let fingerprint_cache = options.fingerprint_cache;
|
||||
let prefix = options.prefix.clone();
|
||||
ssl_connector_builder.set_verify_callback(openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| {
|
||||
let (valid, fingerprint) = Self::verify_callback(valid, ctx, fingerprint.clone(), interactive);
|
||||
if valid {
|
||||
if let Some(fingerprint) = fingerprint {
|
||||
match Self::verify_callback(valid, ctx, expected_fingerprint.as_ref(), interactive) {
|
||||
Ok(None) => true,
|
||||
Ok(Some(fingerprint)) => {
|
||||
if fingerprint_cache && prefix.is_some() {
|
||||
if let Err(err) = store_fingerprint(
|
||||
prefix.as_ref().unwrap(), &server, &fingerprint) {
|
||||
@ -311,9 +326,13 @@ impl HttpClient {
|
||||
}
|
||||
}
|
||||
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
|
||||
true
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("certificate validation failed - {}", err);
|
||||
false
|
||||
},
|
||||
}
|
||||
}
|
||||
valid
|
||||
});
|
||||
} else {
|
||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||
@ -324,7 +343,7 @@ impl HttpClient {
|
||||
httpc.enforce_http(false); // we want https...
|
||||
|
||||
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
|
||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
|
||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
|
||||
let client = Client::builder()
|
||||
//.http2_initial_stream_window_size( (1 << 31) - 2)
|
||||
@ -459,42 +478,47 @@ impl HttpClient {
|
||||
}
|
||||
|
||||
fn verify_callback(
|
||||
valid: bool, ctx:
|
||||
&mut X509StoreContextRef,
|
||||
expected_fingerprint: Option<String>,
|
||||
openssl_valid: bool,
|
||||
ctx: &mut X509StoreContextRef,
|
||||
expected_fingerprint: Option<&String>,
|
||||
interactive: bool,
|
||||
) -> (bool, Option<String>) {
|
||||
if valid { return (true, None); }
|
||||
) -> Result<Option<String>, Error> {
|
||||
|
||||
if openssl_valid {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let cert = match ctx.current_cert() {
|
||||
Some(cert) => cert,
|
||||
None => return (false, None),
|
||||
None => bail!("context lacks current certificate."),
|
||||
};
|
||||
|
||||
let depth = ctx.error_depth();
|
||||
if depth != 0 { return (false, None); }
|
||||
if depth != 0 { bail!("context depth != 0") }
|
||||
|
||||
let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
|
||||
Ok(fp) => fp,
|
||||
Err(_) => return (false, None), // should not happen
|
||||
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
|
||||
};
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
|
||||
if let Some(expected_fingerprint) = expected_fingerprint {
|
||||
if expected_fingerprint.to_lowercase() == fp_string {
|
||||
return (true, Some(fp_string));
|
||||
let expected_fingerprint = expected_fingerprint.to_lowercase();
|
||||
if expected_fingerprint == fp_string {
|
||||
return Ok(Some(fp_string));
|
||||
} else {
|
||||
return (false, None);
|
||||
eprintln!("WARNING: certificate fingerprint does not match expected fingerprint!");
|
||||
eprintln!("expected: {}", expected_fingerprint);
|
||||
}
|
||||
}
|
||||
|
||||
// If we're on a TTY, query the user
|
||||
if interactive && tty::stdin_isatty() {
|
||||
println!("fingerprint: {}", fp_string);
|
||||
eprintln!("fingerprint: {}", fp_string);
|
||||
loop {
|
||||
print!("Are you sure you want to continue connecting? (y/n): ");
|
||||
eprint!("Are you sure you want to continue connecting? (y/n): ");
|
||||
let _ = std::io::stdout().flush();
|
||||
use std::io::{BufRead, BufReader};
|
||||
let mut line = String::new();
|
||||
@ -502,18 +526,19 @@ impl HttpClient {
|
||||
Ok(_) => {
|
||||
let trimmed = line.trim();
|
||||
if trimmed == "y" || trimmed == "Y" {
|
||||
return (true, Some(fp_string));
|
||||
return Ok(Some(fp_string));
|
||||
} else if trimmed == "n" || trimmed == "N" {
|
||||
return (false, None);
|
||||
bail!("Certificate fingerprint was not confirmed.");
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(_) => return (false, None),
|
||||
Err(err) => bail!("Certificate fingerprint was not confirmed - {}.", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
(false, None)
|
||||
|
||||
bail!("Certificate fingerprint was not confirmed.");
|
||||
}
|
||||
|
||||
pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> {
|
||||
@ -614,16 +639,11 @@ impl HttpClient {
|
||||
data: Option<Value>,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let path = path.trim_matches('/');
|
||||
let mut url = format!("https://{}:{}/{}", &self.server, self.port, path);
|
||||
|
||||
if let Some(data) = data {
|
||||
let query = tools::json_object_to_query(data).unwrap();
|
||||
url.push('?');
|
||||
url.push_str(&query);
|
||||
}
|
||||
|
||||
let url: Uri = url.parse().unwrap();
|
||||
let query = match data {
|
||||
Some(data) => Some(tools::json_object_to_query(data)?),
|
||||
None => None,
|
||||
};
|
||||
let url = build_uri(&self.server, self.port, path, query)?;
|
||||
|
||||
let req = Request::builder()
|
||||
.method("POST")
|
||||
@ -757,31 +777,29 @@ impl HttpClient {
|
||||
}
|
||||
|
||||
pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
||||
let path = path.trim_matches('/');
|
||||
let url: Uri = format!("https://{}:{}/{}", server, port, path).parse()?;
|
||||
|
||||
if let Some(data) = data {
|
||||
if method == "POST" {
|
||||
let url = build_uri(server, port, path, None)?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, "application/json")
|
||||
.body(Body::from(data.to_string()))?;
|
||||
return Ok(request);
|
||||
Ok(request)
|
||||
} else {
|
||||
let query = tools::json_object_to_query(data)?;
|
||||
let url: Uri = format!("https://{}:{}/{}?{}", server, port, path, query).parse()?;
|
||||
let url = build_uri(server, port, path, Some(query))?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
|
||||
.body(Body::empty())?;
|
||||
return Ok(request);
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
let url = build_uri(server, port, path, None)?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
@ -792,6 +810,7 @@ impl HttpClient {
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for HttpClient {
|
||||
fn drop(&mut self) {
|
||||
@ -970,29 +989,25 @@ impl H2Client {
|
||||
let path = path.trim_matches('/');
|
||||
|
||||
let content_type = content_type.unwrap_or("application/x-www-form-urlencoded");
|
||||
|
||||
if let Some(param) = param {
|
||||
let query = match param {
|
||||
Some(param) => {
|
||||
let query = tools::json_object_to_query(param)?;
|
||||
// We detected problem with hyper around 6000 characters - seo we try to keep on the safe side
|
||||
if query.len() > 4096 { bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len()); }
|
||||
let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, content_type)
|
||||
.body(())?;
|
||||
Ok(request)
|
||||
} else {
|
||||
let url: Uri = format!("https://{}:8007/{}", server, path).parse()?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, content_type)
|
||||
.body(())?;
|
||||
// We detected problem with hyper around 6000 characters - so we try to keep on the safe side
|
||||
if query.len() > 4096 {
|
||||
bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
|
||||
}
|
||||
Some(query)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
let url = build_uri(server, 8007, path, query)?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, content_type)
|
||||
.body(())?;
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ use crate::{
|
||||
backup::*,
|
||||
client::*,
|
||||
server::WorkerTask,
|
||||
task_log,
|
||||
tools::{compute_file_csum, ParallelHandler},
|
||||
};
|
||||
use proxmox::api::error::{HttpError, StatusCode};
|
||||
@ -443,6 +444,51 @@ pub async fn pull_snapshot_from(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct SkipInfo {
|
||||
oldest: i64,
|
||||
newest: i64,
|
||||
count: u64,
|
||||
}
|
||||
|
||||
impl SkipInfo {
|
||||
fn update(&mut self, backup_time: i64) {
|
||||
self.count += 1;
|
||||
|
||||
if backup_time < self.oldest {
|
||||
self.oldest = backup_time;
|
||||
}
|
||||
|
||||
if backup_time > self.newest {
|
||||
self.newest = backup_time;
|
||||
}
|
||||
}
|
||||
|
||||
fn affected(&self) -> Result<String, Error> {
|
||||
match self.count {
|
||||
0 => Ok(String::new()),
|
||||
1 => proxmox::tools::time::epoch_to_rfc3339_utc(self.oldest),
|
||||
_ => {
|
||||
Ok(format!(
|
||||
"{} .. {}",
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(self.oldest)?,
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(self.newest)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SkipInfo {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"skipped: {} snapshot(s) ({}) older than the newest local snapshot",
|
||||
self.count,
|
||||
self.affected().map_err(|_| std::fmt::Error)?
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn pull_group(
|
||||
worker: &WorkerTask,
|
||||
client: &HttpClient,
|
||||
@ -477,6 +523,12 @@ pub async fn pull_group(
|
||||
|
||||
progress.group_snapshots = list.len() as u64;
|
||||
|
||||
let mut skip_info = SkipInfo {
|
||||
oldest: i64::MAX,
|
||||
newest: i64::MIN,
|
||||
count: 0,
|
||||
};
|
||||
|
||||
for (pos, item) in list.into_iter().enumerate() {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
|
||||
@ -495,6 +547,7 @@ pub async fn pull_group(
|
||||
|
||||
if let Some(last_sync_time) = last_sync {
|
||||
if last_sync_time > backup_time {
|
||||
skip_info.update(backup_time);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -552,6 +605,10 @@ pub async fn pull_group(
|
||||
}
|
||||
}
|
||||
|
||||
if skip_info.count > 0 {
|
||||
task_log!(worker, "{}", skip_info);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -16,9 +16,11 @@ use proxmox::try_block;
|
||||
use crate::buildcfg;
|
||||
|
||||
pub mod acl;
|
||||
pub mod acme;
|
||||
pub mod cached_user_info;
|
||||
pub mod datastore;
|
||||
pub mod network;
|
||||
pub mod node;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
pub mod tfa;
|
||||
@ -98,10 +100,6 @@ pub fn create_configdir() -> Result<(), Error> {
|
||||
/// Update self signed node certificate.
|
||||
pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
|
||||
create_configdir()?;
|
||||
|
||||
let key_path = PathBuf::from(configdir!("/proxy.key"));
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
||||
@ -111,15 +109,6 @@ pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
|
||||
|
||||
let priv_pem = rsa.private_key_to_pem()?;
|
||||
|
||||
replace_file(
|
||||
&key_path,
|
||||
&priv_pem,
|
||||
CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o0640))
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid),
|
||||
)?;
|
||||
|
||||
let mut x509 = X509Builder::new()?;
|
||||
|
||||
x509.set_version(2)?;
|
||||
@ -198,14 +187,25 @@ pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
|
||||
let x509 = x509.build();
|
||||
let cert_pem = x509.to_pem()?;
|
||||
|
||||
replace_file(
|
||||
&cert_path,
|
||||
&cert_pem,
|
||||
CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o0640))
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid),
|
||||
)?;
|
||||
set_proxy_certificate(&cert_pem, &priv_pem)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(), Error> {
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let options = CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o0640))
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid);
|
||||
let key_path = PathBuf::from(configdir!("/proxy.key"));
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
||||
create_configdir()?;
|
||||
replace_file(&key_path, &key_pem, options.clone())
|
||||
.map_err(|err| format_err!("error writing certificate private key - {}", err))?;
|
||||
replace_file(&cert_path, &cert_pem, options)
|
||||
.map_err(|err| format_err!("error writing certificate file - {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
||||
return Ok(());
|
||||
}
|
||||
match components[1] {
|
||||
"disks" | "log" | "status" | "tasks" | "time" => {
|
||||
"certificates" | "disks" | "log" | "status" | "tasks" | "time" => {
|
||||
if components_len == 2 {
|
||||
return Ok(());
|
||||
}
|
||||
|
174
src/config/acme/mod.rs
Normal file
174
src/config/acme/mod.rs
Normal file
@ -0,0 +1,174 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fs::{CreateOptions, file_read_string};
|
||||
|
||||
use crate::api2::types::{
|
||||
PROXMOX_SAFE_ID_REGEX,
|
||||
AcmeChallengeSchema,
|
||||
KnownAcmeDirectory,
|
||||
AcmeAccountName,
|
||||
};
|
||||
use crate::tools::ControlFlow;
|
||||
|
||||
pub(crate) const ACME_DIR: &str = configdir!("/acme");
|
||||
pub(crate) const ACME_ACCOUNT_DIR: &str = configdir!("/acme/accounts");
|
||||
|
||||
pub(crate) const ACME_DNS_SCHEMA_FN: &str = "/usr/share/proxmox-acme/dns-challenge-schema.json";
|
||||
|
||||
pub mod plugin;
|
||||
|
||||
// `const fn`ify this once it is supported in `proxmox`
|
||||
fn root_only() -> CreateOptions {
|
||||
CreateOptions::new()
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(nix::unistd::Gid::from_raw(0))
|
||||
.perm(nix::sys::stat::Mode::from_bits_truncate(0o700))
|
||||
}
|
||||
|
||||
fn create_acme_subdir(dir: &str) -> nix::Result<()> {
|
||||
match proxmox::tools::fs::create_dir(dir, root_only()) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) if err.already_exists() => Ok(()),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn make_acme_dir() -> nix::Result<()> {
|
||||
create_acme_subdir(ACME_DIR)
|
||||
}
|
||||
|
||||
pub(crate) fn make_acme_account_dir() -> nix::Result<()> {
|
||||
make_acme_dir()?;
|
||||
create_acme_subdir(ACME_ACCOUNT_DIR)
|
||||
}
|
||||
|
||||
pub const KNOWN_ACME_DIRECTORIES: &[KnownAcmeDirectory] = &[
|
||||
KnownAcmeDirectory {
|
||||
name: "Let's Encrypt V2",
|
||||
url: "https://acme-v02.api.letsencrypt.org/directory",
|
||||
},
|
||||
KnownAcmeDirectory {
|
||||
name: "Let's Encrypt V2 Staging",
|
||||
url: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
||||
},
|
||||
];
|
||||
|
||||
pub const DEFAULT_ACME_DIRECTORY_ENTRY: &KnownAcmeDirectory = &KNOWN_ACME_DIRECTORIES[0];
|
||||
|
||||
pub fn account_path(name: &str) -> String {
|
||||
format!("{}/{}", ACME_ACCOUNT_DIR, name)
|
||||
}
|
||||
|
||||
|
||||
pub fn foreach_acme_account<F>(mut func: F) -> Result<(), Error>
|
||||
where
|
||||
F: FnMut(AcmeAccountName) -> ControlFlow<Result<(), Error>>,
|
||||
{
|
||||
match crate::tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) {
|
||||
Ok(files) => {
|
||||
for file in files {
|
||||
let file = file?;
|
||||
let file_name = unsafe { file.file_name_utf8_unchecked() };
|
||||
|
||||
if file_name.starts_with('_') {
|
||||
continue;
|
||||
}
|
||||
|
||||
let account_name = match AcmeAccountName::from_string(file_name.to_owned()) {
|
||||
Ok(account_name) => account_name,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if let ControlFlow::Break(result) = func(account_name) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(err) if err.not_found() => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mark_account_deactivated(name: &str) -> Result<(), Error> {
|
||||
let from = account_path(name);
|
||||
for i in 0..100 {
|
||||
let to = account_path(&format!("_deactivated_{}_{}", name, i));
|
||||
if !Path::new(&to).exists() {
|
||||
return std::fs::rename(&from, &to).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to move account path {:?} to {:?} - {}",
|
||||
from,
|
||||
to,
|
||||
err
|
||||
)
|
||||
});
|
||||
}
|
||||
}
|
||||
bail!(
|
||||
"No free slot to rename deactivated account {:?}, please cleanup {:?}",
|
||||
from,
|
||||
ACME_ACCOUNT_DIR
|
||||
);
|
||||
}
|
||||
|
||||
pub fn load_dns_challenge_schema() -> Result<Vec<AcmeChallengeSchema>, Error> {
|
||||
let raw = file_read_string(&ACME_DNS_SCHEMA_FN)?;
|
||||
let schemas: serde_json::Map<String, Value> = serde_json::from_str(&raw)?;
|
||||
|
||||
Ok(schemas
|
||||
.iter()
|
||||
.map(|(id, schema)| AcmeChallengeSchema {
|
||||
id: id.to_owned(),
|
||||
name: schema
|
||||
.get("name")
|
||||
.and_then(Value::as_str)
|
||||
.unwrap_or(id)
|
||||
.to_owned(),
|
||||
ty: "dns",
|
||||
schema: schema.to_owned(),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub fn complete_acme_account(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
let mut out = Vec::new();
|
||||
let _ = foreach_acme_account(|name| {
|
||||
out.push(name.into_string());
|
||||
ControlFlow::CONTINUE
|
||||
});
|
||||
out
|
||||
}
|
||||
|
||||
pub fn complete_acme_plugin(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match plugin::config() {
|
||||
Ok((config, _digest)) => config
|
||||
.iter()
|
||||
.map(|(id, (_type, _cfg))| id.clone())
|
||||
.collect(),
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
vec![
|
||||
"dns".to_string(),
|
||||
//"http".to_string(), // makes currently not realyl sense to create or the like
|
||||
]
|
||||
}
|
||||
|
||||
pub fn complete_acme_api_challenge_type(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
if param.get("type") == Some(&"dns".to_string()) {
|
||||
match load_dns_challenge_schema() {
|
||||
Ok(schema) => schema.into_iter().map(|s| s.id).collect(),
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
215
src/config/acme/plugin.rs
Normal file
215
src/config/acme/plugin.rs
Normal file
@ -0,0 +1,215 @@
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::*,
|
||||
section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin},
|
||||
};
|
||||
|
||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
|
||||
use crate::api2::types::PROXMOX_SAFE_ID_FORMAT;
|
||||
|
||||
pub const PLUGIN_ID_SCHEMA: Schema = StringSchema::new("ACME Challenge Plugin ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Standalone ACME Plugin for the http-1 challenge.
|
||||
pub struct StandalonePlugin {
|
||||
/// Plugin ID.
|
||||
id: String,
|
||||
}
|
||||
|
||||
impl Default for StandalonePlugin {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
id: "standalone".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
disable: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"validation-delay": {
|
||||
default: 30,
|
||||
optional: true,
|
||||
minimum: 0,
|
||||
maximum: 2 * 24 * 60 * 60,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// DNS ACME Challenge Plugin core data.
|
||||
#[derive(Deserialize, Serialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsPluginCore {
|
||||
/// Plugin ID.
|
||||
pub(crate) id: String,
|
||||
|
||||
/// DNS API Plugin Id.
|
||||
pub(crate) api: String,
|
||||
|
||||
/// Extra delay in seconds to wait before requesting validation.
|
||||
///
|
||||
/// Allows to cope with long TTL of DNS records.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
validation_delay: Option<u32>,
|
||||
|
||||
/// Flag to disable the config.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
disable: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
core: { type: DnsPluginCore },
|
||||
},
|
||||
)]
|
||||
/// DNS ACME Challenge Plugin.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsPlugin {
|
||||
#[serde(flatten)]
|
||||
pub(crate) core: DnsPluginCore,
|
||||
|
||||
// FIXME: The `Updater` should allow:
|
||||
// * having different descriptions for this and the Updater version
|
||||
// * having different `#[serde]` attributes for the Updater
|
||||
// * or, well, leaving fields out completely in teh Updater but this means we may need to
|
||||
// separate Updater and Builder deriving.
|
||||
// We handle this property separately in the API calls.
|
||||
/// DNS plugin data (base64url encoded without padding).
|
||||
#[serde(with = "proxmox::tools::serde::string_as_base64url_nopad")]
|
||||
pub(crate) data: String,
|
||||
}
|
||||
|
||||
impl DnsPlugin {
|
||||
pub fn decode_data(&self, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||
Ok(base64::decode_config_buf(
|
||||
&self.data,
|
||||
base64::URL_SAFE_NO_PAD,
|
||||
output,
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&PLUGIN_ID_SCHEMA);
|
||||
|
||||
let standalone_schema = match &StandalonePlugin::API_SCHEMA {
|
||||
Schema::Object(schema) => schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let standalone_plugin = SectionConfigPlugin::new(
|
||||
"standalone".to_string(),
|
||||
Some("id".to_string()),
|
||||
standalone_schema,
|
||||
);
|
||||
config.register_plugin(standalone_plugin);
|
||||
|
||||
let dns_challenge_schema = match DnsPlugin::API_SCHEMA {
|
||||
Schema::AllOf(ref schema) => schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let dns_challenge_plugin = SectionConfigPlugin::new(
|
||||
"dns".to_string(),
|
||||
Some("id".to_string()),
|
||||
dns_challenge_schema,
|
||||
);
|
||||
config.register_plugin(dns_challenge_plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
const ACME_PLUGIN_CFG_FILENAME: &str = configdir!("/acme/plugins.cfg");
|
||||
const ACME_PLUGIN_CFG_LOCKFILE: &str = configdir!("/acme/.plugins.lck");
|
||||
const LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
pub fn lock() -> Result<std::fs::File, Error> {
|
||||
super::make_acme_dir()?;
|
||||
proxmox::tools::fs::open_file_locked(ACME_PLUGIN_CFG_LOCKFILE, LOCK_TIMEOUT, true)
|
||||
}
|
||||
|
||||
pub fn config() -> Result<(PluginData, [u8; 32]), Error> {
|
||||
let content = proxmox::tools::fs::file_read_optional_string(ACME_PLUGIN_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let mut data = CONFIG.parse(ACME_PLUGIN_CFG_FILENAME, &content)?;
|
||||
|
||||
if data.sections.get("standalone").is_none() {
|
||||
let standalone = StandalonePlugin::default();
|
||||
data.set_data("standalone", "standalone", &standalone)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok((PluginData { data }, digest))
|
||||
}
|
||||
|
||||
pub fn save_config(config: &PluginData) -> Result<(), Error> {
|
||||
super::make_acme_dir()?;
|
||||
let raw = CONFIG.write(ACME_PLUGIN_CFG_FILENAME, &config.data)?;
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||
// set the correct owner/group/permissions while saving file
|
||||
// owner(rw) = root, group(r)= backup
|
||||
let options = CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid);
|
||||
|
||||
replace_file(ACME_PLUGIN_CFG_FILENAME, raw.as_bytes(), options)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct PluginData {
|
||||
data: SectionConfigData,
|
||||
}
|
||||
|
||||
// And some convenience helpers.
|
||||
impl PluginData {
|
||||
pub fn remove(&mut self, name: &str) -> Option<(String, Value)> {
|
||||
self.data.sections.remove(name)
|
||||
}
|
||||
|
||||
pub fn contains_key(&mut self, name: &str) -> bool {
|
||||
self.data.sections.contains_key(name)
|
||||
}
|
||||
|
||||
pub fn get(&self, name: &str) -> Option<&(String, Value)> {
|
||||
self.data.sections.get(name)
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, name: &str) -> Option<&mut (String, Value)> {
|
||||
self.data.sections.get_mut(name)
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, id: String, ty: String, plugin: Value) {
|
||||
self.data.sections.insert(id, (ty, plugin));
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = (&String, &(String, Value))> + Send {
|
||||
self.data.sections.iter()
|
||||
}
|
||||
}
|
@ -13,7 +13,11 @@ use proxmox::api::{
|
||||
}
|
||||
};
|
||||
|
||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
use proxmox::tools::fs::{
|
||||
open_file_locked,
|
||||
replace_file,
|
||||
CreateOptions,
|
||||
};
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
@ -82,8 +86,8 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore configuration properties.
|
||||
pub struct DataStoreConfig {
|
||||
pub name: String,
|
||||
@ -133,6 +137,11 @@ fn init() -> SectionConfig {
|
||||
pub const DATASTORE_CFG_FILENAME: &str = "/etc/proxmox-backup/datastore.cfg";
|
||||
pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
||||
|
||||
/// Get exclusive lock
|
||||
pub fn lock_config() -> Result<std::fs::File, Error> {
|
||||
open_file_locked(DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
||||
}
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||
|
220
src/config/node.rs
Normal file
220
src/config/node.rs
Normal file
@ -0,0 +1,220 @@
|
||||
use std::collections::HashSet;
|
||||
use std::fs::File;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use nix::sys::stat::Mode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, Updater};
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
|
||||
use proxmox_http::ProxyConfig;
|
||||
|
||||
use crate::acme::AcmeClient;
|
||||
use crate::api2::types::{
|
||||
AcmeAccountName, AcmeDomain, ACME_DOMAIN_PROPERTY_SCHEMA, HTTP_PROXY_SCHEMA,
|
||||
};
|
||||
|
||||
const CONF_FILE: &str = configdir!("/node.cfg");
|
||||
const LOCK_FILE: &str = configdir!("/.node.lck");
|
||||
const LOCK_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
pub fn lock() -> Result<File, Error> {
|
||||
proxmox::tools::fs::open_file_locked(LOCK_FILE, LOCK_TIMEOUT, true)
|
||||
}
|
||||
|
||||
/// Read the Node Config.
|
||||
pub fn config() -> Result<(NodeConfig, [u8; 32]), Error> {
|
||||
let content =
|
||||
proxmox::tools::fs::file_read_optional_string(CONF_FILE)?.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data: NodeConfig = crate::tools::config::from_str(&content, &NodeConfig::API_SCHEMA)?;
|
||||
|
||||
Ok((data, digest))
|
||||
}
|
||||
|
||||
/// Write the Node Config, requires the write lock to be held.
|
||||
pub fn save_config(config: &NodeConfig) -> Result<(), Error> {
|
||||
config.validate()?;
|
||||
|
||||
let raw = crate::tools::config::to_bytes(config, &NodeConfig::API_SCHEMA)?;
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let options = CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o0640))
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(backup_user.gid);
|
||||
|
||||
replace_file(CONF_FILE, &raw, options)
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
account: { type: AcmeAccountName },
|
||||
}
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// The ACME configuration.
|
||||
///
|
||||
/// Currently only contains the name of the account use.
|
||||
pub struct AcmeConfig {
|
||||
/// Account to use to acquire ACME certificates.
|
||||
account: AcmeAccountName,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
acme: {
|
||||
optional: true,
|
||||
type: String,
|
||||
format: &ApiStringFormat::PropertyString(&AcmeConfig::API_SCHEMA),
|
||||
},
|
||||
acmedomain0: {
|
||||
schema: ACME_DOMAIN_PROPERTY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
acmedomain1: {
|
||||
schema: ACME_DOMAIN_PROPERTY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
acmedomain2: {
|
||||
schema: ACME_DOMAIN_PROPERTY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
acmedomain3: {
|
||||
schema: ACME_DOMAIN_PROPERTY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
acmedomain4: {
|
||||
schema: ACME_DOMAIN_PROPERTY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"http-proxy": {
|
||||
schema: HTTP_PROXY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node specific configuration.
|
||||
pub struct NodeConfig {
|
||||
/// The acme account to use on this node.
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acme: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acmedomain0: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acmedomain1: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acmedomain2: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acmedomain3: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
acmedomain4: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Updater::is_empty")]
|
||||
http_proxy: Option<String>,
|
||||
}
|
||||
|
||||
impl NodeConfig {
|
||||
pub fn acme_config(&self) -> Option<Result<AcmeConfig, Error>> {
|
||||
self.acme.as_deref().map(|config| -> Result<_, Error> {
|
||||
Ok(crate::tools::config::from_property_string(
|
||||
config,
|
||||
&AcmeConfig::API_SCHEMA,
|
||||
)?)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn acme_client(&self) -> Result<AcmeClient, Error> {
|
||||
let account = if let Some(cfg) = self.acme_config().transpose()? {
|
||||
cfg.account
|
||||
} else {
|
||||
AcmeAccountName::from_string("default".to_string())? // should really not happen
|
||||
};
|
||||
AcmeClient::load(&account).await
|
||||
}
|
||||
|
||||
pub fn acme_domains(&self) -> AcmeDomainIter {
|
||||
AcmeDomainIter::new(self)
|
||||
}
|
||||
|
||||
/// Returns the parsed ProxyConfig
|
||||
pub fn http_proxy(&self) -> Option<ProxyConfig> {
|
||||
if let Some(http_proxy) = &self.http_proxy {
|
||||
match ProxyConfig::parse_proxy_url(&http_proxy) {
|
||||
Ok(proxy) => Some(proxy),
|
||||
Err(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the HTTP proxy configuration
|
||||
pub fn set_http_proxy(&mut self, http_proxy: Option<String>) {
|
||||
self.http_proxy = http_proxy;
|
||||
}
|
||||
|
||||
/// Validate the configuration.
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
let mut domains = HashSet::new();
|
||||
for domain in self.acme_domains() {
|
||||
let domain = domain?;
|
||||
if !domains.insert(domain.domain.to_lowercase()) {
|
||||
bail!("duplicate domain '{}' in ACME config", domain.domain);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcmeDomainIter<'a> {
|
||||
config: &'a NodeConfig,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> AcmeDomainIter<'a> {
|
||||
fn new(config: &'a NodeConfig) -> Self {
|
||||
Self { config, index: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for AcmeDomainIter<'a> {
|
||||
type Item = Result<AcmeDomain, Error>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let domain = loop {
|
||||
let index = self.index;
|
||||
self.index += 1;
|
||||
|
||||
let domain = match index {
|
||||
0 => self.config.acmedomain0.as_deref(),
|
||||
1 => self.config.acmedomain1.as_deref(),
|
||||
2 => self.config.acmedomain2.as_deref(),
|
||||
3 => self.config.acmedomain3.as_deref(),
|
||||
4 => self.config.acmedomain4.as_deref(),
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
if let Some(domain) = domain {
|
||||
break domain;
|
||||
}
|
||||
};
|
||||
|
||||
Some(crate::tools::config::from_property_string(
|
||||
domain,
|
||||
&AcmeDomain::API_SCHEMA,
|
||||
))
|
||||
}
|
||||
}
|
@ -53,8 +53,8 @@ lazy_static! {
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize,Clone)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
pub id: String,
|
||||
@ -82,8 +82,8 @@ pub struct SyncJobConfig {
|
||||
},
|
||||
)]
|
||||
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
#[serde(flatten)]
|
||||
|
@ -62,8 +62,8 @@ lazy_static! {
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize,Clone)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Tape Backup Job Setup
|
||||
pub struct TapeBackupJobSetup {
|
||||
pub store: String,
|
||||
@ -98,8 +98,8 @@ pub struct TapeBackupJobSetup {
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize,Clone)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Tape Backup Job
|
||||
pub struct TapeBackupJobConfig {
|
||||
pub id: String,
|
||||
@ -121,8 +121,8 @@ pub struct TapeBackupJobConfig {
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Status of Tape Backup Job
|
||||
pub struct TapeBackupJobStatus {
|
||||
#[serde(flatten)]
|
||||
|
@ -14,8 +14,8 @@ const LOCK_FILE: &str = configdir!("/token.shadow.lock");
|
||||
const CONF_FILE: &str = configdir!("/token.shadow");
|
||||
const LOCK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// ApiToken id / secret pair
|
||||
pub struct ApiTokenSecret {
|
||||
pub tokenid: Authid,
|
||||
|
@ -48,8 +48,8 @@ lazy_static! {
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Verification Job
|
||||
pub struct VerificationJobConfig {
|
||||
/// unique ID to address this job
|
||||
@ -80,8 +80,8 @@ pub struct VerificationJobConfig {
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Status of Verification Job
|
||||
pub struct VerificationJobStatus {
|
||||
#[serde(flatten)]
|
||||
|
@ -32,3 +32,5 @@ pub mod auth;
|
||||
pub mod rrd;
|
||||
|
||||
pub mod tape;
|
||||
|
||||
pub mod acme;
|
||||
|
@ -7,6 +7,7 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use nix::unistd::Pid;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::sys::linux::procfs::PidStat;
|
||||
|
||||
@ -91,3 +92,19 @@ pub use report::*;
|
||||
pub mod ticket;
|
||||
|
||||
pub mod auth;
|
||||
|
||||
pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
|
||||
let proxy_pid = crate::server::read_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
||||
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
|
||||
let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
|
||||
let proxy_pid = crate::server::read_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
||||
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
|
||||
let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -2,11 +2,12 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{PathBuf, Path};
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::*;
|
||||
use tokio::net::UnixListener;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
use nix::sys::socket;
|
||||
|
||||
@ -102,25 +103,31 @@ where
|
||||
}
|
||||
|
||||
|
||||
pub async fn send_command<P>(
|
||||
path: P,
|
||||
params: Value
|
||||
) -> Result<Value, Error>
|
||||
where P: Into<PathBuf>,
|
||||
pub async fn send_command<P, T>(path: P, params: &T) -> Result<Value, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
T: ?Sized + Serialize,
|
||||
{
|
||||
let path: PathBuf = path.into();
|
||||
|
||||
tokio::net::UnixStream::connect(path)
|
||||
.map_err(move |err| format_err!("control socket connect failed - {}", err))
|
||||
.and_then(move |mut conn| {
|
||||
|
||||
let mut command_string = params.to_string();
|
||||
let mut command_string = serde_json::to_string(params)?;
|
||||
command_string.push('\n');
|
||||
send_raw_command(path.as_ref(), &command_string).await
|
||||
}
|
||||
|
||||
async move {
|
||||
pub async fn send_raw_command<P>(path: P, command_string: &str) -> Result<Value, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
|
||||
|
||||
let mut conn = tokio::net::UnixStream::connect(path)
|
||||
.map_err(move |err| format_err!("control socket connect failed - {}", err))
|
||||
.await?;
|
||||
|
||||
conn.write_all(command_string.as_bytes()).await?;
|
||||
if !command_string.as_bytes().ends_with(b"\n") {
|
||||
conn.write_all(b"\n").await?;
|
||||
}
|
||||
|
||||
AsyncWriteExt::shutdown(&mut conn).await?;
|
||||
let mut rx = tokio::io::BufReader::new(conn);
|
||||
let mut data = String::new();
|
||||
@ -138,8 +145,6 @@ pub async fn send_command<P>(
|
||||
bail!("unable to parse response: {}", data);
|
||||
}
|
||||
}
|
||||
}).await
|
||||
}
|
||||
|
||||
/// A callback for a specific commando socket.
|
||||
pub type CommandoSocketFn = Box<(dyn Fn(Option<&Value>) -> Result<Value, Error> + Send + Sync + 'static)>;
|
||||
|
@ -176,7 +176,13 @@ Datastore: {{job.store}}
|
||||
Tape Pool: {{job.pool}}
|
||||
Tape Drive: {{job.drive}}
|
||||
|
||||
{{#if snapshot-list ~}}
|
||||
Snapshots included:
|
||||
|
||||
{{#each snapshot-list~}}
|
||||
{{this}}
|
||||
{{/each~}}
|
||||
{{/if}}
|
||||
Tape Backup failed: {{error}}
|
||||
|
||||
|
||||
|
@ -61,16 +61,20 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Represents the State of a specific Job
|
||||
pub enum JobState {
|
||||
/// A job was created at 'time', but never started/finished
|
||||
Created { time: i64 },
|
||||
/// The Job was last started in 'upid',
|
||||
Started { upid: String },
|
||||
/// The Job was last started in 'upid', which finished with 'state'
|
||||
Finished { upid: String, state: TaskState },
|
||||
/// The Job was last started in 'upid', which finished with 'state', and was last updated at 'updated'
|
||||
Finished {
|
||||
upid: String,
|
||||
state: TaskState,
|
||||
updated: Option<i64>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Represents a Job and holds the correct lock
|
||||
@ -147,12 +151,47 @@ pub fn create_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
|
||||
job.write_state()
|
||||
}
|
||||
|
||||
/// Tries to update the state file with the current time
|
||||
/// if the job is currently running, does nothing.
|
||||
/// Intended for use when the schedule changes.
|
||||
pub fn update_job_last_run_time(jobtype: &str, jobname: &str) -> Result<(), Error> {
|
||||
let mut job = match Job::new(jobtype, jobname) {
|
||||
Ok(job) => job,
|
||||
Err(_) => return Ok(()), // was locked (running), so do not update
|
||||
};
|
||||
let time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
job.state = match JobState::load(jobtype, jobname)? {
|
||||
JobState::Created { .. } => JobState::Created { time },
|
||||
JobState::Started { .. } => return Ok(()), // currently running (without lock?)
|
||||
JobState::Finished {
|
||||
upid,
|
||||
state,
|
||||
updated: _,
|
||||
} => JobState::Finished {
|
||||
upid,
|
||||
state,
|
||||
updated: Some(time),
|
||||
},
|
||||
};
|
||||
job.write_state()
|
||||
}
|
||||
|
||||
/// Returns the last run time of a job by reading the statefile
|
||||
/// Note that this is not locked
|
||||
pub fn last_run_time(jobtype: &str, jobname: &str) -> Result<i64, Error> {
|
||||
match JobState::load(jobtype, jobname)? {
|
||||
JobState::Created { time } => Ok(time),
|
||||
JobState::Started { upid } | JobState::Finished { upid, .. } => {
|
||||
JobState::Finished {
|
||||
updated: Some(time),
|
||||
..
|
||||
} => Ok(time),
|
||||
JobState::Started { upid }
|
||||
| JobState::Finished {
|
||||
upid,
|
||||
state: _,
|
||||
updated: None,
|
||||
} => {
|
||||
let upid: UPID = upid
|
||||
.parse()
|
||||
.map_err(|err| format_err!("could not parse upid from state: {}", err))?;
|
||||
@ -180,7 +219,11 @@ impl JobState {
|
||||
let state = upid_read_status(&parsed)
|
||||
.map_err(|err| format_err!("error reading upid log status: {}", err))?;
|
||||
|
||||
Ok(JobState::Finished { upid, state })
|
||||
Ok(JobState::Finished {
|
||||
upid,
|
||||
state,
|
||||
updated: None,
|
||||
})
|
||||
} else {
|
||||
Ok(JobState::Started { upid })
|
||||
}
|
||||
@ -240,7 +283,11 @@ impl Job {
|
||||
}
|
||||
.to_string();
|
||||
|
||||
self.state = JobState::Finished { upid, state };
|
||||
self.state = JobState::Finished {
|
||||
upid,
|
||||
state,
|
||||
updated: None,
|
||||
};
|
||||
|
||||
self.write_state()
|
||||
}
|
||||
@ -274,17 +321,25 @@ pub fn compute_schedule_status(
|
||||
job_state: &JobState,
|
||||
schedule: Option<&str>,
|
||||
) -> Result<JobScheduleStatus, Error> {
|
||||
|
||||
let (upid, endtime, state, starttime) = match job_state {
|
||||
let (upid, endtime, state, last) = match job_state {
|
||||
JobState::Created { time } => (None, None, None, *time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
}
|
||||
JobState::Finished {
|
||||
upid,
|
||||
state,
|
||||
updated,
|
||||
} => {
|
||||
let last = updated.unwrap_or_else(|| state.endtime());
|
||||
(
|
||||
Some(upid),
|
||||
Some(state.endtime()),
|
||||
Some(state.to_string()),
|
||||
last,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let mut status = JobScheduleStatus::default();
|
||||
@ -292,8 +347,6 @@ pub fn compute_schedule_status(
|
||||
status.last_run_state = state;
|
||||
status.last_run_endtime = endtime;
|
||||
|
||||
let last = endtime.unwrap_or(starttime);
|
||||
|
||||
if let Some(schedule) = schedule {
|
||||
if let Ok(event) = parse_calendar_event(&schedule) {
|
||||
// ignore errors
|
||||
|
@ -8,6 +8,7 @@ use crate::{
|
||||
server::jobstate::Job,
|
||||
server::WorkerTask,
|
||||
task_log,
|
||||
task_warn,
|
||||
};
|
||||
|
||||
pub fn do_prune_job(
|
||||
@ -67,7 +68,14 @@ pub fn do_prune_job(
|
||||
info.backup_dir.backup_time_string()
|
||||
);
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
||||
task_warn!(
|
||||
worker,
|
||||
"failed to remove dir {:?}: {}",
|
||||
info.backup_dir.relative_path(),
|
||||
err,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
||||
);
|
||||
resp.map(|body| {
|
||||
Body::wrap_stream(DeflateEncoder::with_quality(
|
||||
body.map_err(|err| {
|
||||
TryStreamExt::map_err(body, |err| {
|
||||
proxmox::io_format_err!("error during compression: {}", err)
|
||||
}),
|
||||
Level::Default,
|
||||
|
@ -59,7 +59,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
||||
"upid": upid.to_string(),
|
||||
},
|
||||
});
|
||||
let status = super::send_command(sock, cmd).await?;
|
||||
let status = super::send_command(sock, &cmd).await?;
|
||||
|
||||
if let Some(active) = status.as_bool() {
|
||||
Ok(active)
|
||||
@ -133,7 +133,7 @@ pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
|
||||
"upid": upid.to_string(),
|
||||
},
|
||||
});
|
||||
super::send_command(sock, cmd).map_ok(|_| ()).await
|
||||
super::send_command(sock, &cmd).map_ok(|_| ()).await
|
||||
}
|
||||
|
||||
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
|
||||
|
@ -215,31 +215,8 @@ impl LtoTapeHandle {
|
||||
}
|
||||
|
||||
/// Position the tape after filemark count. Count 0 means BOT.
|
||||
///
|
||||
/// Note: we dont use LOCATE(10), because that needs LTO5
|
||||
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
|
||||
|
||||
if position == 0 {
|
||||
return self.rewind();
|
||||
}
|
||||
|
||||
let current_position = self.current_file_number()?;
|
||||
|
||||
if current_position == position {
|
||||
// make sure we are immediated afer the filemark
|
||||
self.sg_tape.space_filemarks(-1)?;
|
||||
self.sg_tape.space_filemarks(1)?;
|
||||
} else if current_position < position {
|
||||
let diff = position - current_position;
|
||||
self.sg_tape.space_filemarks(diff.try_into()?)?;
|
||||
} else {
|
||||
let diff = current_position - position + 1;
|
||||
self.sg_tape.space_filemarks(-diff.try_into()?)?;
|
||||
// move to EOT side of filemark
|
||||
self.sg_tape.space_filemarks(1)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
self.sg_tape.locate_file(position)
|
||||
}
|
||||
|
||||
pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> {
|
||||
@ -309,6 +286,10 @@ impl TapeDriver for LtoTapeHandle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn move_to_file(&mut self, file: u64) -> Result<(), Error> {
|
||||
self.locate_file(file)
|
||||
}
|
||||
|
||||
fn rewind(&mut self) -> Result<(), Error> {
|
||||
self.sg_tape.rewind()
|
||||
}
|
||||
|
@ -94,6 +94,23 @@ impl DataCompressionModePage {
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Endian)]
|
||||
struct MediumConfigurationModePage {
|
||||
page_code: u8, // 0x1d
|
||||
page_length: u8, // 0x1e
|
||||
flags2: u8,
|
||||
reserved: [u8;29],
|
||||
}
|
||||
|
||||
impl MediumConfigurationModePage {
|
||||
|
||||
pub fn is_worm(&self) -> bool {
|
||||
(self.flags2 & 1) == 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LtoTapeStatus {
|
||||
pub block_length: u32,
|
||||
@ -106,7 +123,6 @@ pub struct LtoTapeStatus {
|
||||
pub struct SgTape {
|
||||
file: File,
|
||||
info: InquiryInfo,
|
||||
density_code: u8, // drive type
|
||||
encryption_key_loaded: bool,
|
||||
}
|
||||
|
||||
@ -125,12 +141,9 @@ impl SgTape {
|
||||
bail!("not a tape device (peripheral_type = {})", info.peripheral_type);
|
||||
}
|
||||
|
||||
let density_code = report_density(&mut file)?;
|
||||
|
||||
Ok(Self {
|
||||
file,
|
||||
info,
|
||||
density_code,
|
||||
encryption_key_loaded: false,
|
||||
})
|
||||
}
|
||||
@ -144,6 +157,13 @@ impl SgTape {
|
||||
&self.info
|
||||
}
|
||||
|
||||
/// Return the maximum supported density code
|
||||
///
|
||||
/// This can be used to detect the drive generation.
|
||||
pub fn max_density_code(&mut self) -> Result<u8, Error> {
|
||||
report_density(&mut self.file)
|
||||
}
|
||||
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> Result<SgTape, Error> {
|
||||
// do not wait for media, use O_NONBLOCK
|
||||
let file = OpenOptions::new()
|
||||
@ -196,26 +216,52 @@ impl SgTape {
|
||||
/// Format media, single partition
|
||||
pub fn format_media(&mut self, fast: bool) -> Result<(), Error> {
|
||||
|
||||
// try to get info about loaded media first
|
||||
let (has_format, is_worm) = match self.read_medium_configuration_page() {
|
||||
Ok((_head, block_descriptor, page)) => {
|
||||
// FORMAT requires LTO5 or newer
|
||||
let has_format = block_descriptor.density_code >= 0x58;
|
||||
let is_worm = page.is_worm();
|
||||
(has_format, is_worm)
|
||||
}
|
||||
Err(_) => {
|
||||
// LTO3 and older do not supprt medium configuration mode page
|
||||
(false, false)
|
||||
}
|
||||
};
|
||||
|
||||
if is_worm {
|
||||
// We cannot FORMAT WORM media! Instead we check if its empty.
|
||||
|
||||
self.move_to_eom(false)?;
|
||||
let pos = self.position()?;
|
||||
if pos.logical_object_number != 0 {
|
||||
bail!("format failed - detected WORM media with data.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
} else {
|
||||
self.rewind()?;
|
||||
|
||||
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
|
||||
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
||||
let mut cmd = Vec::new();
|
||||
|
||||
if self.density_code >= 0x58 { // FORMAT requires LTO5 or newer)
|
||||
cmd.extend(&[0x04, 0, 0, 0, 0, 0]);
|
||||
if has_format {
|
||||
cmd.extend(&[0x04, 0, 0, 0, 0, 0]); // FORMAT
|
||||
sg_raw.do_command(&cmd)?;
|
||||
if !fast {
|
||||
self.erase_media(false)?; // overwrite everything
|
||||
}
|
||||
} else {
|
||||
// try rewind/erase instead
|
||||
self.rewind()?;
|
||||
self.erase_media(fast)?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Lock/Unlock drive door
|
||||
pub fn set_medium_removal(&mut self, allow: bool) -> Result<(), ScsiError> {
|
||||
@ -249,6 +295,34 @@ impl SgTape {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
|
||||
if position == 0 {
|
||||
return self.rewind();
|
||||
}
|
||||
|
||||
let position = position -1;
|
||||
|
||||
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
|
||||
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
||||
let mut cmd = Vec::new();
|
||||
// Note: LOCATE(16) works for LTO4 or newer
|
||||
cmd.extend(&[0x92, 0b000_01_000, 0, 0]); // LOCATE(16) filemarks
|
||||
cmd.extend(&position.to_be_bytes());
|
||||
cmd.extend(&[0, 0, 0, 0]);
|
||||
|
||||
sg_raw.do_command(&cmd)
|
||||
.map_err(|err| format_err!("locate file {} failed - {}", position, err))?;
|
||||
|
||||
// move to other side of filemark
|
||||
cmd.truncate(0);
|
||||
cmd.extend(&[0x11, 0x01, 0, 0, 1, 0]); // SPACE(6) one filemarks
|
||||
|
||||
sg_raw.do_command(&cmd)
|
||||
.map_err(|err| format_err!("locate file {} (space) failed - {}", position, err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn position(&mut self) -> Result<ReadPositionLongPage, Error> {
|
||||
|
||||
let expected_size = std::mem::size_of::<ReadPositionLongPage>();
|
||||
@ -256,6 +330,9 @@ impl SgTape {
|
||||
let mut sg_raw = SgRaw::new(&mut self.file, 32)?;
|
||||
sg_raw.set_timeout(30); // use short timeout
|
||||
let mut cmd = Vec::new();
|
||||
// READ POSITION LONG FORM works on LTO4 or newer (with recent
|
||||
// firmware), although it is missing in the IBM LTO4 SSCI
|
||||
// reference manual.
|
||||
cmd.extend(&[0x34, 0x06, 0, 0, 0, 0, 0, 0, 0, 0]); // READ POSITION LONG FORM
|
||||
|
||||
let data = sg_raw.do_command(&cmd)
|
||||
@ -620,11 +697,7 @@ impl SgTape {
|
||||
}
|
||||
|
||||
if let Some(buffer_mode) = buffer_mode {
|
||||
let mut mode = head.flags3 & 0b1_000_1111;
|
||||
if buffer_mode {
|
||||
mode |= 0b0_001_0000;
|
||||
}
|
||||
head.flags3 = mode;
|
||||
head.set_buffer_mode(buffer_mode);
|
||||
}
|
||||
|
||||
let mut data = Vec::new();
|
||||
@ -653,6 +726,30 @@ impl SgTape {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_medium_configuration_page(
|
||||
&mut self,
|
||||
) -> Result<(ModeParameterHeader, ModeBlockDescriptor, MediumConfigurationModePage), Error> {
|
||||
|
||||
let (head, block_descriptor, page): (_,_, MediumConfigurationModePage)
|
||||
= scsi_mode_sense(&mut self.file, false, 0x1d, 0)?;
|
||||
|
||||
proxmox::try_block!({
|
||||
if (page.page_code & 0b0011_1111) != 0x1d {
|
||||
bail!("wrong page code {}", page.page_code);
|
||||
}
|
||||
if page.page_length != 0x1e {
|
||||
bail!("wrong page length {}", page.page_length);
|
||||
}
|
||||
|
||||
let block_descriptor = match block_descriptor {
|
||||
Some(block_descriptor) => block_descriptor,
|
||||
None => bail!("missing block descriptor"),
|
||||
};
|
||||
|
||||
Ok((head, block_descriptor, page))
|
||||
}).map_err(|err| format_err!("read_medium_configuration failed - {}", err))
|
||||
}
|
||||
|
||||
fn read_compression_page(
|
||||
&mut self,
|
||||
) -> Result<(ModeParameterHeader, ModeBlockDescriptor, DataCompressionModePage), Error> {
|
||||
@ -660,16 +757,21 @@ impl SgTape {
|
||||
let (head, block_descriptor, page): (_,_, DataCompressionModePage)
|
||||
= scsi_mode_sense(&mut self.file, false, 0x0f, 0)?;
|
||||
|
||||
if !(page.page_code == 0x0f && page.page_length == 0x0e) {
|
||||
bail!("read_compression_page: got strange page code/length");
|
||||
proxmox::try_block!({
|
||||
if (page.page_code & 0b0011_1111) != 0x0f {
|
||||
bail!("wrong page code {}", page.page_code);
|
||||
}
|
||||
if page.page_length != 0x0e {
|
||||
bail!("wrong page length {}", page.page_length);
|
||||
}
|
||||
|
||||
let block_descriptor = match block_descriptor {
|
||||
Some(block_descriptor) => block_descriptor,
|
||||
None => bail!("read_compression_page failed: missing block descriptor"),
|
||||
None => bail!("missing block descriptor"),
|
||||
};
|
||||
|
||||
Ok((head, block_descriptor, page))
|
||||
}).map_err(|err| format_err!("read_compression_page failed: {}", err))
|
||||
}
|
||||
|
||||
/// Read drive options/status
|
||||
@ -686,8 +788,8 @@ impl SgTape {
|
||||
|
||||
Ok(LtoTapeStatus {
|
||||
block_length: block_descriptor.block_length(),
|
||||
write_protect: (head.flags3 & 0b1000_0000) != 0,
|
||||
buffer_mode: (head.flags3 & 0b0111_0000) >> 4,
|
||||
write_protect: head.write_protect(),
|
||||
buffer_mode: head.buffer_mode(),
|
||||
compression: page.compression_enabled(),
|
||||
density_code: block_descriptor.density_code,
|
||||
})
|
||||
|
@ -195,8 +195,7 @@ struct DataEncryptionStatus {
|
||||
struct SspDataEncryptionCapabilityPage {
|
||||
page_code: u16,
|
||||
page_len: u16,
|
||||
extdecc_cfgp_byte: u8,
|
||||
reserved: [u8; 15],
|
||||
reserved: [u8; 16],
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
@ -222,17 +221,7 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
|
||||
|
||||
proxmox::try_block!({
|
||||
let mut reader = &data[..];
|
||||
let page: SspDataEncryptionCapabilityPage = unsafe { reader.read_be_value()? };
|
||||
|
||||
let extdecc = (page.extdecc_cfgp_byte & 0b00001100) >> 2;
|
||||
if extdecc != 2 {
|
||||
bail!("not external data encryption control capable");
|
||||
}
|
||||
|
||||
let cfg_p = page.extdecc_cfgp_byte & 0b00000011;
|
||||
if cfg_p != 1 {
|
||||
bail!("not allow to change logical block encryption parameters");
|
||||
}
|
||||
let _page: SspDataEncryptionCapabilityPage = unsafe { reader.read_be_value()? };
|
||||
|
||||
let mut aes_cgm_index = None;
|
||||
|
||||
@ -257,7 +246,7 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
|
||||
|
||||
match aes_cgm_index {
|
||||
Some(index) => Ok(index),
|
||||
None => bail!("drive dies not support AES-CGM encryption"),
|
||||
None => bail!("drive does not support AES-CGM encryption"),
|
||||
}
|
||||
}).map_err(|err: Error| format_err!("decode data encryption caps page failed - {}", err))
|
||||
|
||||
|
@ -80,6 +80,9 @@ pub trait TapeDriver {
|
||||
/// Move to last file
|
||||
fn move_to_last_file(&mut self) -> Result<(), Error>;
|
||||
|
||||
/// Move to given file nr
|
||||
fn move_to_file(&mut self, file: u64) -> Result<(), Error>;
|
||||
|
||||
/// Current file number
|
||||
fn current_file_number(&mut self) -> Result<u64, Error>;
|
||||
|
||||
@ -474,16 +477,34 @@ pub fn request_and_load_media(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum TapeLockError {
|
||||
#[error("timeout while trying to lock")]
|
||||
TimeOut,
|
||||
#[error("{0}")]
|
||||
Other(#[from] Error),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for TapeLockError {
|
||||
fn from(error: std::io::Error) -> Self {
|
||||
Self::Other(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires an exclusive lock for the tape device
|
||||
///
|
||||
/// Basically calls lock_device_path() using the configured drive path.
|
||||
pub fn lock_tape_device(
|
||||
config: &SectionConfigData,
|
||||
drive: &str,
|
||||
) -> Result<DeviceLockGuard, Error> {
|
||||
) -> Result<DeviceLockGuard, TapeLockError> {
|
||||
let path = tape_device_path(config, drive)?;
|
||||
lock_device_path(&path)
|
||||
.map_err(|err| format_err!("unable to lock drive '{}' - {}", drive, err))
|
||||
lock_device_path(&path).map_err(|err| match err {
|
||||
TapeLockError::Other(err) => {
|
||||
TapeLockError::Other(format_err!("unable to lock drive '{}' - {}", drive, err))
|
||||
}
|
||||
other => other,
|
||||
})
|
||||
}
|
||||
|
||||
/// Writes the given state for the specified drive
|
||||
@ -552,7 +573,7 @@ pub struct DeviceLockGuard(std::fs::File);
|
||||
//
|
||||
// Uses systemd escape_unit to compute a file name from `device_path`, the try
|
||||
// to lock `/var/lock/<name>`.
|
||||
fn lock_device_path(device_path: &str) -> Result<DeviceLockGuard, Error> {
|
||||
fn lock_device_path(device_path: &str) -> Result<DeviceLockGuard, TapeLockError> {
|
||||
|
||||
let lock_name = crate::tools::systemd::escape_unit(device_path, true);
|
||||
|
||||
@ -561,7 +582,13 @@ fn lock_device_path(device_path: &str) -> Result<DeviceLockGuard, Error> {
|
||||
|
||||
let timeout = std::time::Duration::new(10, 0);
|
||||
let mut file = std::fs::OpenOptions::new().create(true).append(true).open(path)?;
|
||||
proxmox::tools::fs::lock_file(&mut file, true, Some(timeout))?;
|
||||
if let Err(err) = proxmox::tools::fs::lock_file(&mut file, true, Some(timeout)) {
|
||||
if err.kind() == std::io::ErrorKind::Interrupted {
|
||||
return Err(TapeLockError::TimeOut);
|
||||
} else {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
|
@ -261,6 +261,28 @@ impl TapeDriver for VirtualTapeHandle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn move_to_file(&mut self, file: u64) -> Result<(), Error> {
|
||||
let mut status = self.load_status()?;
|
||||
match status.current_tape {
|
||||
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
|
||||
|
||||
let index = self.load_tape_index(name)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
|
||||
|
||||
if file as usize > index.files {
|
||||
bail!("invalid file nr");
|
||||
}
|
||||
|
||||
*pos = file as usize;
|
||||
|
||||
self.store_status(&status)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
None => bail!("drive is empty (no tape loaded)."),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_next_file(&mut self) -> Result<Box<dyn TapeRead>, BlockReadError> {
|
||||
let mut status = self.load_status()
|
||||
|
@ -54,6 +54,7 @@ use crate::{
|
||||
tape::{
|
||||
TAPE_STATUS_DIR,
|
||||
MediaSet,
|
||||
MediaCatalog,
|
||||
file_formats::{
|
||||
MediaLabel,
|
||||
MediaSetLabel,
|
||||
@ -850,3 +851,38 @@ pub fn complete_media_label_text(
|
||||
|
||||
inventory.map.values().map(|entry| entry.id.label.label_text.clone()).collect()
|
||||
}
|
||||
|
||||
pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
let media_set_uuid: Uuid = match param.get("media-set").and_then(|s| s.parse().ok()) {
|
||||
Some(uuid) => uuid,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = match Inventory::load(&status_path) {
|
||||
Ok(inventory) => inventory,
|
||||
Err(_) => return Vec::new(),
|
||||
};
|
||||
|
||||
let mut res = Vec::new();
|
||||
let media_ids = inventory.list_used_media().into_iter().filter(|media| {
|
||||
match &media.media_set_label {
|
||||
Some(label) => label.uuid == media_set_uuid,
|
||||
None => false,
|
||||
}
|
||||
});
|
||||
|
||||
for media_id in media_ids {
|
||||
let catalog = match MediaCatalog::open(status_path, &media_id, false, false) {
|
||||
Ok(catalog) => catalog,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
for (store, content) in catalog.content() {
|
||||
for snapshot in content.snapshot_index.keys() {
|
||||
res.push(format!("{}:{}", store, snapshot));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
@ -924,6 +924,16 @@ impl MediaSetCatalog {
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the media uuid and snapshot archive file number
|
||||
pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<(&Uuid, u64)> {
|
||||
for (uuid, catalog) in self.catalog_list.iter() {
|
||||
if let Some(nr) = catalog.lookup_snapshot(store, snapshot) {
|
||||
return Some((uuid, nr));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Test if the catalog already contain a chunk
|
||||
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||
for catalog in self.catalog_list.values() {
|
||||
@ -933,6 +943,16 @@ impl MediaSetCatalog {
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the media uuid and chunk archive file number
|
||||
pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<(&Uuid, u64)> {
|
||||
for (uuid, catalog) in self.catalog_list.iter() {
|
||||
if let Some(nr) = catalog.lookup_chunk(store, digest) {
|
||||
return Some((uuid, nr));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Type definitions for internal binary catalog encoding
|
||||
|
@ -258,7 +258,7 @@ impl MediaPool {
|
||||
/// Make sure the current media set is usable for writing
|
||||
///
|
||||
/// If not, starts a new media set. Also creates a new
|
||||
/// set if media_set_policy implies it.
|
||||
/// set if media_set_policy implies it, or if 'force' is true.
|
||||
///
|
||||
/// Note: We also call this in list_media to compute correct media
|
||||
/// status, so this must not change persistent/saved state.
|
||||
@ -267,6 +267,7 @@ impl MediaPool {
|
||||
pub fn start_write_session(
|
||||
&mut self,
|
||||
current_time: i64,
|
||||
force: bool,
|
||||
) -> Result<Option<String>, Error> {
|
||||
|
||||
let _pool_lock = if self.no_media_set_locking {
|
||||
@ -277,11 +278,15 @@ impl MediaPool {
|
||||
|
||||
self.inventory.reload()?;
|
||||
|
||||
let mut create_new_set = match self.current_set_usable() {
|
||||
let mut create_new_set = if force {
|
||||
Some(String::from("forced"))
|
||||
} else {
|
||||
match self.current_set_usable() {
|
||||
Err(err) => {
|
||||
Some(err.to_string())
|
||||
}
|
||||
Ok(_) => None,
|
||||
}
|
||||
};
|
||||
|
||||
if create_new_set.is_none() {
|
||||
|
@ -71,11 +71,12 @@ impl PoolWriter {
|
||||
drive_name: &str,
|
||||
worker: &WorkerTask,
|
||||
notify_email: Option<String>,
|
||||
force_media_set: bool,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let current_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let new_media_set_reason = pool.start_write_session(current_time)?;
|
||||
let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
|
||||
if let Some(reason) = new_media_set_reason {
|
||||
task_log!(
|
||||
worker,
|
||||
|
@ -51,7 +51,7 @@ impl NewChunksIterator {
|
||||
loop {
|
||||
let digest = match chunk_iter.next() {
|
||||
None => {
|
||||
tx.send(Ok(None)).unwrap();
|
||||
let _ = tx.send(Ok(None)); // ignore send error
|
||||
break;
|
||||
}
|
||||
Some(digest) => digest?,
|
||||
@ -67,7 +67,13 @@ impl NewChunksIterator {
|
||||
|
||||
let blob = datastore.load_chunk(&digest)?;
|
||||
//println!("LOAD CHUNK {}", proxmox::tools::digest_to_hex(&digest));
|
||||
tx.send(Ok(Some((digest, blob)))).unwrap();
|
||||
match tx.send(Ok(Some((digest, blob)))) {
|
||||
Ok(()) => {},
|
||||
Err(err) => {
|
||||
eprintln!("could not send chunk to reader thread: {}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
chunk_index.insert(digest);
|
||||
}
|
||||
@ -75,7 +81,9 @@ impl NewChunksIterator {
|
||||
Ok(())
|
||||
});
|
||||
if let Err(err) = result {
|
||||
tx.send(Err(err)).unwrap();
|
||||
if let Err(err) = tx.send(Err(err)) {
|
||||
eprintln!("error sending result to reader thread: {}", err);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -47,7 +47,7 @@ fn test_alloc_writable_media_1() -> Result<(), Error> {
|
||||
|
||||
ctime += 10;
|
||||
|
||||
pool.start_write_session(ctime)?;
|
||||
pool.start_write_session(ctime, false)?;
|
||||
|
||||
// no media in pool
|
||||
assert!(pool.alloc_writable_media(ctime).is_err());
|
||||
@ -77,7 +77,7 @@ fn test_alloc_writable_media_2() -> Result<(), Error> {
|
||||
|
||||
let ctime = 10;
|
||||
|
||||
pool.start_write_session(ctime)?;
|
||||
pool.start_write_session(ctime, false)?;
|
||||
|
||||
// use free media
|
||||
assert_eq!(pool.alloc_writable_media(ctime)?, tape1_uuid);
|
||||
@ -117,7 +117,7 @@ fn test_alloc_writable_media_3() -> Result<(), Error> {
|
||||
|
||||
let mut ctime = 10;
|
||||
|
||||
pool.start_write_session(ctime)?;
|
||||
pool.start_write_session(ctime, false)?;
|
||||
|
||||
// use free media
|
||||
assert_eq!(pool.alloc_writable_media(ctime)?, tape1_uuid);
|
||||
@ -164,7 +164,7 @@ fn test_alloc_writable_media_4() -> Result<(), Error> {
|
||||
|
||||
let start_time = 10;
|
||||
|
||||
pool.start_write_session(start_time)?;
|
||||
pool.start_write_session(start_time, false)?;
|
||||
|
||||
// use free media
|
||||
assert_eq!(pool.alloc_writable_media(start_time)?, tape1_uuid);
|
||||
@ -178,7 +178,7 @@ fn test_alloc_writable_media_4() -> Result<(), Error> {
|
||||
assert!(pool.alloc_writable_media(start_time + 5).is_err());
|
||||
|
||||
// Create new media set, so that previous set can expire
|
||||
pool.start_write_session(start_time + 10)?;
|
||||
pool.start_write_session(start_time + 10, false)?;
|
||||
|
||||
assert!(pool.alloc_writable_media(start_time + 10).is_err());
|
||||
assert!(pool.alloc_writable_media(start_time + 11).is_err());
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user