Compare commits
369 Commits
Author | SHA1 | Date | |
---|---|---|---|
497a7b3f8e | |||
71549afa3f | |||
a294588409 | |||
5a83930667 | |||
c25ea25f0a | |||
f7885eb263 | |||
a48d534d39 | |||
bfa942c0cf | |||
f54634a890 | |||
efb7c5348c | |||
d6fcc1170a | |||
3f742f952a | |||
84af82e8cf | |||
48109c5354 | |||
fd18775ac1 | |||
e678a50ea1 | |||
6523588c8d | |||
6fbf0acc76 | |||
36b7085ec2 | |||
1b1a553741 | |||
98b7d58b94 | |||
7fa9a37c7c | |||
f533d16ef6 | |||
778c7d954b | |||
605fe2e7e7 | |||
1b552c109d | |||
d4d49f7325 | |||
8bca935f08 | |||
fd6d243843 | |||
037f6b6d5e | |||
8eef31724f | |||
2de1b06a06 | |||
a332040a7f | |||
957133077f | |||
36c6e7bb82 | |||
ccc3896ff3 | |||
cef5c72682 | |||
51a2d9e375 | |||
048b43af24 | |||
bfd2b47649 | |||
67a5cf4714 | |||
6227654ad8 | |||
e384f16a19 | |||
89725197c0 | |||
e7d4be9d85 | |||
ba3d7e19fb | |||
b65dfff574 | |||
8cc3760e74 | |||
1cb08a0a05 | |||
6f4228809e | |||
5af3bcf062 | |||
67d00d5c0e | |||
cdc83c4eb2 | |||
ffa403b5fd | |||
5bd77f00e2 | |||
802189f7f5 | |||
a4e5a0fc9f | |||
58bfa3b19c | |||
f9c0a94140 | |||
e3619d4101 | |||
5839c469c1 | |||
bbdda58b35 | |||
ed2080762c | |||
45d5d873ce | |||
f46806414a | |||
ebf34e7edd | |||
aad2d162ab | |||
68149b9045 | |||
1ce8e905ea | |||
ccb3b45e18 | |||
6afdda8832 | |||
2121174827 | |||
df12c9ec4e | |||
4c1b776168 | |||
42dad3abd3 | |||
6c76aa434d | |||
e5f9b7f79e | |||
dd2162f6bd | |||
cabdabba3d | |||
3e593a2459 | |||
7c5287bb95 | |||
7c72ae04f1 | |||
86582454e8 | |||
013b1e8bca | |||
40ff84b138 | |||
b2065dc7d2 | |||
97dfc62f0d | |||
e351ac786d | |||
7b570c177d | |||
6838b75904 | |||
dbda1513c5 | |||
c62a6acb2e | |||
e4a5c072b4 | |||
80f950c05d | |||
4933b853cd | |||
aec1b91eb8 | |||
2e2d64fdba | |||
a37c8d2431 | |||
a8a20e9210 | |||
be5b468975 | |||
9789461363 | |||
9f58e312d7 | |||
cffe0b81e3 | |||
bb14ed8cab | |||
023adb5945 | |||
e5545c9804 | |||
efe96ec039 | |||
1d3ae83359 | |||
4bb3876352 | |||
400e90cfbe | |||
e16c289f50 | |||
140c159b36 | |||
8be69a8453 | |||
9ba4833f3c | |||
0b12a5a698 | |||
2eac359430 | |||
855b55dc14 | |||
5ad40a3dd1 | |||
7116a2d9da | |||
0d5e990a62 | |||
4f57f4ad84 | |||
13e13d836f | |||
3ab2432ab6 | |||
76e8565076 | |||
a5f30a562b | |||
a2ef36d445 | |||
9a1ecae0b7 | |||
42b010174e | |||
68e77657e6 | |||
1b2f851e42 | |||
cc99866ea3 | |||
1ea3f23f7e | |||
3f780ddf73 | |||
9edf96e6b6 | |||
73e1ba65ca | |||
02631056b8 | |||
131d0f10c2 | |||
f9aa980c7d | |||
ad0364c558 | |||
76486eb3d1 | |||
65ab4ca976 | |||
99a73fad15 | |||
16a01c19dd | |||
86b8ba448c | |||
9b8e8012a7 | |||
b29292a87b | |||
c1feb447e8 | |||
62a0e190cb | |||
5890143920 | |||
ef4df211ab | |||
eb5e0ae65a | |||
bbc71e3b02 | |||
ac81ed17b9 | |||
89145cde34 | |||
ef4b2c2470 | |||
7190cbf2ac | |||
f726e1e0ea | |||
6d81e65986 | |||
ba5f5083c3 | |||
314db4072c | |||
baff2324f3 | |||
02eae829f7 | |||
bb77143108 | |||
02cb5b5f80 | |||
a301c362e3 | |||
7526d86419 | |||
a00888e93f | |||
fc5870be53 | |||
3c8c2827cb | |||
6c221244df | |||
38629c3961 | |||
513d019ac3 | |||
3fa1b4b48c | |||
a6eac535e4 | |||
58a3fae773 | |||
0889806a3c | |||
51ec8a3c62 | |||
a12b1be728 | |||
4d04cd9ab9 | |||
a3399f4337 | |||
2b7f8dd5ea | |||
72fbe9ffa5 | |||
0be8bce718 | |||
4805edc4ec | |||
9eb784076c | |||
b9c5cd8291 | |||
9008c0c177 | |||
f027c2146e | |||
afbf2e10f3 | |||
9805207aa5 | |||
8e0b852f24 | |||
0052dc6d28 | |||
61f05679d2 | |||
9751ef4b36 | |||
0a240aaa9a | |||
e0665a64bd | |||
dc46aa9a00 | |||
ced694589d | |||
6c053ffc89 | |||
9f5b57a348 | |||
f1c4b8df34 | |||
269e274bb5 | |||
bfd357c5a1 | |||
9517a5759a | |||
a5d51b0c4f | |||
d9822cd3cb | |||
66501529a2 | |||
2072dede4a | |||
31c94d1645 | |||
9ee4c23833 | |||
a14a1c7b90 | |||
9ef88578af | |||
c4c4b5a3ef | |||
0ed40b19c7 | |||
a0cd0f9cec | |||
49e47c491b | |||
424d2d68d3 | |||
415690a0e7 | |||
2c0abe9234 | |||
2649c89358 | |||
bbd34d70d5 | |||
9779ad0b00 | |||
70fd0652a1 | |||
6b85671dd2 | |||
82bdf6b5e7 | |||
ba2679c9d7 | |||
8866cbccc8 | |||
b3477d286f | |||
68e2ea99ba | |||
d6688884f6 | |||
7d3482f5bf | |||
7a39b41c20 | |||
4672273fe6 | |||
01284de0b2 | |||
b20368ee1b | |||
e584593cb5 | |||
069a6e28a7 | |||
8fab19da73 | |||
991be99c37 | |||
1900d7810c | |||
6b5013edb3 | |||
f313494d48 | |||
353dcf1d13 | |||
3006d70ebe | |||
681e096448 | |||
ac9a9e8002 | |||
ecbc385b7b | |||
5117cf4f17 | |||
da7ec1d2af | |||
934de1d691 | |||
0c27d880b0 | |||
be3a0295b6 | |||
aa2838c27a | |||
ea584a7510 | |||
ba0ccc5991 | |||
75f83c6a81 | |||
0dda5a6695 | |||
289738dc1a | |||
d830804f02 | |||
82cc4b56e5 | |||
923f94a4d7 | |||
bbff317aa7 | |||
20429238e0 | |||
364299740f | |||
b81818b6ad | |||
2f02e431b0 | |||
e64f38cb6b | |||
ae24382634 | |||
82cae19d19 | |||
3f5fbc5620 | |||
000e6cad5c | |||
49f44cedbf | |||
eb1c59cc2a | |||
c7d032fc17 | |||
73b77d4787 | |||
67466ce564 | |||
4e0faf5ef3 | |||
c23192d34e | |||
83771aa037 | |||
95f9d67ce9 | |||
314d360fcd | |||
f8a74456cc | |||
4906bac10f | |||
86c831a5c3 | |||
a5951b4f38 | |||
f75292bd8d | |||
bfff4eaa7f | |||
067dc06dba | |||
18cdf20afc | |||
e57841c442 | |||
751f6b6148 | |||
3c430e9a55 | |||
155f657f6b | |||
86fb38776b | |||
f323e90602 | |||
770a36e53a | |||
d420962fbc | |||
01fd2447b2 | |||
85beb7d875 | |||
af06decd1b | |||
aceae32baa | |||
74a4f9efc9 | |||
fb1e7a86f4 | |||
dc99315cf9 | |||
34bd1109b0 | |||
13a2445744 | |||
c968da789e | |||
3f84541412 | |||
4d8bd03668 | |||
f9bd5e1691 | |||
ecd66ecaf6 | |||
33d7292f29 | |||
f4d371d2d2 | |||
835d0e5dd3 | |||
9a06eb1618 | |||
309e14ebb7 | |||
2d48533378 | |||
fffd6874e6 | |||
0ddd48f0b5 | |||
cb590dbc07 | |||
6c4f762c49 | |||
7a0afee391 | |||
0dda883994 | |||
c2e2078b3f | |||
26a3450f19 | |||
324c069848 | |||
bd4c5607ca | |||
e1d85f1840 | |||
1ce1a5e5cc | |||
6f66a0ca71 | |||
62a5b3907b | |||
85b6c4ead4 | |||
a190979c04 | |||
4a489ae3de | |||
9ac8b73e07 | |||
414be8b675 | |||
fda19dcc6f | |||
cd975e5787 | |||
3b7b1dfb8e | |||
d8a47ec649 | |||
252cd3b781 | |||
0decd11efb | |||
b84d2592fb | |||
0219ba2cc5 | |||
bbff6c4968 | |||
bb88c6a29d | |||
a02466966d | |||
b0fc11804e | |||
d9d81741e3 | |||
9678366102 | |||
a2c73c78dd | |||
c6a0e7d98e | |||
85417b2a88 | |||
d738669066 | |||
442d6da8fb | |||
62f10a01db | |||
5667b76381 | |||
d9b318a444 | |||
86ce56f193 | |||
8d72c2c32e | |||
c48c38ab8c | |||
3d3769830b | |||
4921a411ad | |||
81c767efce | |||
60abf03f05 | |||
dcbf29e71b | |||
037e6c0ca8 | |||
c7024b282a | |||
90ff75f85c |
56
Cargo.toml
56
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.1.14"
|
version = "2.0.10"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -15,10 +15,29 @@ edition = "2018"
|
|||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
description = "Proxmox Backup"
|
description = "Proxmox Backup"
|
||||||
homepage = "https://www.proxmox.com"
|
homepage = "https://www.proxmox.com"
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
||||||
|
|
||||||
|
[workspace]
|
||||||
|
members = [
|
||||||
|
"pbs-buildcfg",
|
||||||
|
"pbs-client",
|
||||||
|
"pbs-config",
|
||||||
|
"pbs-datastore",
|
||||||
|
"pbs-fuse-loop",
|
||||||
|
"pbs-runtime",
|
||||||
|
"proxmox-rest-server",
|
||||||
|
"proxmox-systemd",
|
||||||
|
"pbs-tape",
|
||||||
|
"pbs-tools",
|
||||||
|
|
||||||
|
"proxmox-backup-banner",
|
||||||
|
"proxmox-backup-client",
|
||||||
|
"proxmox-file-restore",
|
||||||
|
"proxmox-restore-daemon",
|
||||||
|
"pxar-bin",
|
||||||
|
]
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "proxmox_backup"
|
name = "proxmox_backup"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
@ -33,7 +52,6 @@ endian_trait = { version = "0.6", features = ["arrays"] }
|
|||||||
env_logger = "0.7"
|
env_logger = "0.7"
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
foreign-types = "0.3"
|
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
@ -50,8 +68,6 @@ openssl = "0.10"
|
|||||||
pam = "0.7"
|
pam = "0.7"
|
||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
|
||||||
pin-project = "1.0"
|
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
@ -69,24 +85,38 @@ url = "2.1"
|
|||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
webauthn-rs = "0.2.5"
|
webauthn-rs = "0.2.5"
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
|
||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
|
# Used only by examples currently:
|
||||||
|
zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||||
|
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
proxmox = { version = "0.11.6", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
proxmox = { version = "0.13.3", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
||||||
proxmox-acme-rs = "0.3"
|
proxmox-acme-rs = "0.2.1"
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-apt = "0.7.0"
|
||||||
proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
|
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
proxmox-openid = "0.7.0"
|
||||||
|
|
||||||
|
pbs-api-types = { path = "pbs-api-types" }
|
||||||
|
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||||
|
pbs-client = { path = "pbs-client" }
|
||||||
|
pbs-config = { path = "pbs-config" }
|
||||||
|
pbs-datastore = { path = "pbs-datastore" }
|
||||||
|
pbs-runtime = { path = "pbs-runtime" }
|
||||||
|
proxmox-rest-server = { path = "proxmox-rest-server" }
|
||||||
|
proxmox-systemd = { path = "proxmox-systemd" }
|
||||||
|
pbs-tools = { path = "pbs-tools" }
|
||||||
|
pbs-tape = { path = "pbs-tape" }
|
||||||
|
|
||||||
# Local path overrides
|
# Local path overrides
|
||||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
#proxmox = { path = "../proxmox/proxmox" }
|
||||||
#proxmox-http = { path = "../proxmox/proxmox-http", features = [ "client", "http-helpers", "websocket" ] }
|
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
#pxar = { path = "../pxar" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
|
115
Makefile
115
Makefile
@ -17,7 +17,8 @@ USR_BIN := \
|
|||||||
|
|
||||||
# Binaries usable by admins
|
# Binaries usable by admins
|
||||||
USR_SBIN := \
|
USR_SBIN := \
|
||||||
proxmox-backup-manager
|
proxmox-backup-manager \
|
||||||
|
proxmox-backup-debug \
|
||||||
|
|
||||||
# Binaries for services:
|
# Binaries for services:
|
||||||
SERVICE_BIN := \
|
SERVICE_BIN := \
|
||||||
@ -30,6 +31,24 @@ SERVICE_BIN := \
|
|||||||
RESTORE_BIN := \
|
RESTORE_BIN := \
|
||||||
proxmox-restore-daemon
|
proxmox-restore-daemon
|
||||||
|
|
||||||
|
SUBCRATES := \
|
||||||
|
pbs-api-types \
|
||||||
|
pbs-buildcfg \
|
||||||
|
pbs-client \
|
||||||
|
pbs-config \
|
||||||
|
pbs-datastore \
|
||||||
|
pbs-fuse-loop \
|
||||||
|
pbs-runtime \
|
||||||
|
proxmox-rest-server \
|
||||||
|
proxmox-systemd \
|
||||||
|
pbs-tape \
|
||||||
|
pbs-tools \
|
||||||
|
proxmox-backup-banner \
|
||||||
|
proxmox-backup-client \
|
||||||
|
proxmox-file-restore \
|
||||||
|
proxmox-restore-daemon \
|
||||||
|
pxar-bin
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
COMPILEDIR := target/release
|
COMPILEDIR := target/release
|
||||||
@ -57,13 +76,15 @@ RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
|||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
${RESTORE_DEB} ${RESTORE_DBG_DEB}
|
${RESTORE_DEB} ${RESTORE_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
DESTDIR=
|
DESTDIR=
|
||||||
|
|
||||||
all: cargo-build $(SUBDIRS)
|
tests ?= --workspace
|
||||||
|
|
||||||
|
all: $(SUBDIRS)
|
||||||
|
|
||||||
.PHONY: $(SUBDIRS)
|
.PHONY: $(SUBDIRS)
|
||||||
$(SUBDIRS):
|
$(SUBDIRS):
|
||||||
@ -75,25 +96,23 @@ test:
|
|||||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
doc:
|
doc:
|
||||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
$(CARGO) doc --workspace --no-deps $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
# always re-create this dir
|
# always re-create this dir
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
|
@echo "Setting pkg-buildcfg version to: $(DEB_VERSION_UPSTREAM)"
|
||||||
|
sed -i -e 's/^version =.*$$/version = "$(DEB_VERSION_UPSTREAM)"/' \
|
||||||
|
pbs-buildcfg/Cargo.toml
|
||||||
rm -rf build
|
rm -rf build
|
||||||
rm -f debian/control
|
mkdir build
|
||||||
debcargo package \
|
cp -a debian \
|
||||||
--config debian/debcargo.toml \
|
Cargo.toml src \
|
||||||
--changelog-ready \
|
$(SUBCRATES) \
|
||||||
--no-overlay-write-back \
|
docs etc examples tests www zsh-completions \
|
||||||
--directory build \
|
defines.mk Makefile \
|
||||||
proxmox-backup \
|
./build/
|
||||||
$(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
rm -f build/Cargo.lock
|
||||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
|
||||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
|
||||||
rm build/debian/control.in build/debian/control.src
|
|
||||||
cp build/debian/control debian/control
|
|
||||||
rm build/Cargo.lock
|
|
||||||
find build/debian -name "*.hint" -delete
|
find build/debian -name "*.hint" -delete
|
||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C build/$(i) clean ;)
|
$(MAKE) -C build/$(i) clean ;)
|
||||||
@ -123,27 +142,61 @@ $(DSC): build
|
|||||||
cd build; dpkg-buildpackage -S -us -uc -d -nc
|
cd build; dpkg-buildpackage -S -us -uc -d -nc
|
||||||
lintian $(DSC)
|
lintian $(DSC)
|
||||||
|
|
||||||
|
.PHONY: clean distclean deb clean
|
||||||
distclean: clean
|
distclean: clean
|
||||||
|
clean: clean-deb
|
||||||
clean:
|
|
||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C $(i) clean ;)
|
$(MAKE) -C $(i) clean ;)
|
||||||
$(CARGO) clean
|
$(CARGO) clean
|
||||||
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build
|
rm -f .do-cargo-build
|
||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
|
|
||||||
|
# allows one to avoid running cargo clean when one just wants to tidy up after a packgae build
|
||||||
|
clean-deb:
|
||||||
|
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build/
|
||||||
|
|
||||||
.PHONY: dinstall
|
.PHONY: dinstall
|
||||||
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
|
${DEBUG_DEB} ${DEBUG_DBG_DEB}
|
||||||
dpkg -i $^
|
dpkg -i $^
|
||||||
|
|
||||||
# make sure we build binaries before docs
|
# make sure we build binaries before docs
|
||||||
docs: cargo-build
|
docs: $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen
|
||||||
|
|
||||||
.PHONY: cargo-build
|
.PHONY: cargo-build
|
||||||
cargo-build:
|
cargo-build:
|
||||||
$(CARGO) build $(CARGO_BUILD_ARGS)
|
rm -f .do-cargo-build
|
||||||
|
$(MAKE) $(COMPILED_BINS)
|
||||||
|
|
||||||
|
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||||
|
.do-cargo-build:
|
||||||
|
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||||
|
--bin proxmox-backup-api \
|
||||||
|
--bin proxmox-backup-proxy \
|
||||||
|
--bin proxmox-backup-manager \
|
||||||
|
--bin docgen \
|
||||||
|
--package proxmox-backup-banner \
|
||||||
|
--bin proxmox-backup-banner \
|
||||||
|
--package proxmox-backup-client \
|
||||||
|
--bin proxmox-backup-client \
|
||||||
|
--bin proxmox-backup-debug \
|
||||||
|
--package proxmox-file-restore \
|
||||||
|
--bin proxmox-file-restore \
|
||||||
|
--package pxar-bin \
|
||||||
|
--bin pxar \
|
||||||
|
--package pbs-tape \
|
||||||
|
--bin pmt \
|
||||||
|
--bin pmtx \
|
||||||
|
--package proxmox-restore-daemon \
|
||||||
|
--bin proxmox-restore-daemon \
|
||||||
|
--package proxmox-backup \
|
||||||
|
--bin dump-catalog-shell-cli \
|
||||||
|
--bin proxmox-daily-update \
|
||||||
|
--bin proxmox-file-restore \
|
||||||
|
--bin proxmox-tape \
|
||||||
|
--bin sg-tape-cmd
|
||||||
|
touch "$@"
|
||||||
|
|
||||||
$(COMPILED_BINS): cargo-build
|
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
@ -169,12 +222,16 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
|
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||||
|
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} \
|
||||||
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
${CLIENT_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB} \
|
||||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist buster
|
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist bullseye
|
||||||
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist bullseye
|
||||||
|
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist bullseye
|
||||||
|
23
build.rs
23
build.rs
@ -1,23 +0,0 @@
|
|||||||
// build.rs
|
|
||||||
use std::env;
|
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
fn git_command(args: &[&str]) -> String {
|
|
||||||
match Command::new("git").args(args).output() {
|
|
||||||
Ok(output) => String::from_utf8(output.stdout).unwrap().trim_end().to_string(),
|
|
||||||
Err(err) => {
|
|
||||||
panic!("git {:?} failed: {}", args, err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let repo_path = git_command(&["rev-parse", "--show-toplevel"]);
|
|
||||||
let repoid = match env::var("REPOID") {
|
|
||||||
Ok(repoid) => repoid,
|
|
||||||
Err(_) => git_command(&["rev-parse", "HEAD"]),
|
|
||||||
};
|
|
||||||
|
|
||||||
println!("cargo:rustc-env=REPOID={}", repoid);
|
|
||||||
println!("cargo:rerun-if-changed={}/.git/HEAD", repo_path);
|
|
||||||
}
|
|
224
debian/changelog
vendored
224
debian/changelog
vendored
@ -1,39 +1,184 @@
|
|||||||
rust-proxmox-backup (1.1.14-1) buster; urgency=medium
|
rust-proxmox-backup (2.0.10-1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
* drop RawWaker usage to avoid a leaking a refcount
|
* ui: fix order of prune keep reasons
|
||||||
|
|
||||||
* pbs-tools: LruCache: implement Drop to fix a memory leak for the cache
|
* server: add proxmox-backup-debug binary with chunk/file inspection, an API
|
||||||
|
shell with completion support
|
||||||
|
|
||||||
* ui: add notice for nearing PBS 1.1 End-of-Life
|
* restructured code base to reduce linkage and libraray ABI version
|
||||||
|
constraints for all non-server binaries (client, pxar, file-restore)
|
||||||
|
|
||||||
* backport "datastore: lookup: reuse ChunkStore on stale datastore re-open"
|
* zsh: fix passign parameters in auto-completion scripts
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Thu, 02 Jun 2022 18:07:54 +0200
|
* tape: also add 'force-media-set' to availablea CLI options
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.13-3) buster; urgency=medium
|
* api: nodes: add missing node list (index) api endpoint
|
||||||
|
|
||||||
* fix sending log-rotation command to API daemons
|
* docs: proxmox-backup-debug: add info about the new 'api' subcommand
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Oct 2021 10:21:18 +0200
|
* docs/technical-overview: add troubleshooting section
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.13-2) buster; urgency=medium
|
-- Proxmox Support Team <support@proxmox.com> Tue, 21 Sep 2021 14:00:48 +0200
|
||||||
|
|
||||||
* revert "auth: improve thread safety of 'crypt' C-library", not safe for
|
rust-proxmox-backup (2.0.9-2) bullseye; urgency=medium
|
||||||
Debian buster based releases.
|
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Mon, 26 Jul 2021 16:40:07 +0200
|
* tape backup: mention groups that were empty
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.13-1) buster; urgency=medium
|
* tape: compute next-media-label for each tape backup job
|
||||||
|
|
||||||
|
* tape: lto: increase default timeout to 10 minutes
|
||||||
|
|
||||||
|
* ui: display next-media-label for tape backup jobs
|
||||||
|
|
||||||
|
* cli: proxmox-tape backup-job list: use status api and display next-run
|
||||||
|
and next-media-label
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Aug 2021 14:44:12 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.8-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* use proxmox-apt to 0.6
|
||||||
|
|
||||||
|
* api: apt: adapt to proxmox-apt back-end changes
|
||||||
|
|
||||||
|
* api/ui: allow zstd compression for new zpools
|
||||||
|
|
||||||
|
* tape: media_catalog: add snapshot list cache for catalog
|
||||||
|
|
||||||
|
* api2: tape: media: use MediaCatalog::snapshot_list for content listing
|
||||||
|
|
||||||
|
* tape: lock media_catalog file to to get a consistent view with load_catalog
|
||||||
|
|
||||||
|
* tape: changer: handle libraries that sends wrong amount of data
|
||||||
|
|
||||||
|
* tape: changer: remove unnecesary inquiry parameter
|
||||||
|
|
||||||
|
* api2: tape/restore: commit temporary catalog at the end
|
||||||
|
|
||||||
|
* docs: tape: add instructions on how to restore the catalog
|
||||||
|
|
||||||
|
* ui: tape/ChangerStatus: improve layout for large libraries
|
||||||
|
|
||||||
|
* tape: changer: handle invalid descriptor data from library in status page
|
||||||
|
|
||||||
|
* datastore config: cleanup code (use flatten attribute)
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 02 Aug 2021 10:34:55 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.7-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* tape changer: better cope with models that are not following spec
|
||||||
|
proposals when returning the status page
|
||||||
|
|
||||||
|
* tape changer: make DVCID information optional, not all devices return it
|
||||||
|
|
||||||
|
* restore daemon: setup the 'backup' system user and group in the minimal
|
||||||
|
restore environment, as we like to ensure that all state files are ownend
|
||||||
|
by them.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Jul 2021 08:43:51 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.6-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* increase maximum drives per changer to 255
|
||||||
|
|
||||||
|
* allow one to pass a secret not only directly through the environment value,
|
||||||
|
but also indirectly through a file path, an open file descriptor or a
|
||||||
|
command that can write the secret to standard out.
|
||||||
|
|
||||||
|
* pull in new proxmox library version to improve the file system
|
||||||
|
comaptibility on creation of atomic files, e.g., lock files.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Jul 2021 10:22:19 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.5-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* ui: tape: backup overview: increase timeout for media-set content
|
||||||
|
|
||||||
|
* tape: changer: always retry until timeout
|
||||||
|
|
||||||
|
* file-restore: increase lock timeout on QEMU map
|
||||||
|
|
||||||
|
* fix #3515: file-restore-daemon: allow LVs/PVs with dash in name
|
||||||
|
|
||||||
|
* fix #3526: correctly filter tasks with 'since' and 'until'
|
||||||
|
|
||||||
|
* tape: changer: make scsi request for DVCID a separate one, as some
|
||||||
|
libraries cannot handle requesting that combined with volume tags in one
|
||||||
|
go
|
||||||
|
|
||||||
|
* api, ui: datastore: add new 'prune-datastore' api call and expose it with
|
||||||
|
a 'Prune All' button
|
||||||
|
|
||||||
|
* make creating log files more robust so that theys are always owned by the
|
||||||
|
less privileged `backup` user
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 21 Jul 2021 09:12:39 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.4-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* change tape drive lock path to avoid issues with sticky bit on tmpfs
|
||||||
|
mountpoint
|
||||||
|
|
||||||
|
* tape: changer: query transport-element types separately
|
||||||
|
|
||||||
* auth: improve thread safety of 'crypt' C-library
|
* auth: improve thread safety of 'crypt' C-library
|
||||||
|
|
||||||
* file-restore: increase lock timeout on QEMU map
|
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 18:51:21 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.3-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* api: apt: add repositories info and update calls
|
||||||
|
|
||||||
|
* ui: administration: add APT repositories status and update panel
|
||||||
|
|
||||||
|
* api: access domains: add get/create/update/delete endpoints for realms
|
||||||
|
|
||||||
|
* ui: access control: add 'Realm' tab for adding and editing OpenID Connect
|
||||||
|
identity provider
|
||||||
|
|
||||||
|
* fix #3447: ui: Dashboard: disallow selection of datastore statistics row
|
||||||
|
|
||||||
|
* ui: tapeRestore: make window non-resizable
|
||||||
|
|
||||||
|
* ui: dashboard: rework resource-load panel to a more detailed status panel,
|
||||||
|
showing, among other things, uptime, Kernel version, CPU info and
|
||||||
|
repository status.
|
||||||
|
|
||||||
|
* ui: adminsitration/dashboard: auto-scale columns count and add
|
||||||
|
browser-local setting to override that to a fixed value of columns.
|
||||||
|
|
||||||
|
* fix #3212: api, ui: add support for notes on backup groups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 08:07:41 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.2-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* ui: use task list component from widget toolkit
|
||||||
|
|
||||||
|
* api: add keep-job-configs flag to datastore remove endpoint
|
||||||
|
|
||||||
|
* api: config: delete datastore: also remove tape backup jobs
|
||||||
|
|
||||||
|
* ui: tape restore: mark datastore selector as 'not a form field' to fix
|
||||||
|
compatibility with ExtJS 7.0
|
||||||
|
|
||||||
|
* ui: datastore removal: only navigate away when the user actually confirmed
|
||||||
|
the removal of that datastore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Jul 2021 14:44:12 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.1-2) bullseye; urgency=medium
|
||||||
|
|
||||||
* file restore daemon: log basic startup steps
|
* file restore daemon: log basic startup steps
|
||||||
|
|
||||||
* REST-API: set error message extension for bad-request response log to
|
* REST-API: set error message extension for bad-request response log to
|
||||||
ensure the actual error is logged in any (access) log, making debugging
|
ensure the actual error is logged in any (access) log, making debugging
|
||||||
such issues easier.
|
such issues easier
|
||||||
|
|
||||||
|
* restore daemon: create /run/proxmox-backup on startup as there's now some
|
||||||
|
runtime state saved there, which failed all API requests to the restore
|
||||||
|
daemon otherwise
|
||||||
|
|
||||||
* restore daemon: use millisecond log resolution
|
* restore daemon: use millisecond log resolution
|
||||||
|
|
||||||
@ -41,33 +186,19 @@ rust-proxmox-backup (1.1.13-1) buster; urgency=medium
|
|||||||
ensuring DNS propagation of that record. This makes it catch up with the
|
ensuring DNS propagation of that record. This makes it catch up with the
|
||||||
docs/web-interface, where the option was already available.
|
docs/web-interface, where the option was already available.
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Jul 2021 12:34:29 +0200
|
* docs: initial update to repositories for bullseye
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.12-1) buster; urgency=medium
|
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 23:14:49 +0200
|
||||||
|
|
||||||
* subscription: set higher-level error to message instead of bailing out, to
|
rust-proxmox-backup (2.0.0-2) bullseye; urgency=medium
|
||||||
ensure a force-check gets through
|
|
||||||
|
|
||||||
* ui: dashboard: datastore stats: fix closing <i> tag
|
* file-restore-daemon/disk: add LVM (thin) support
|
||||||
|
|
||||||
* ui: datastore: option view: only navigate up when we actually removed the
|
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 02:15:16 +0200
|
||||||
datastore
|
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Fri, 09 Jul 2021 12:56:35 +0200
|
rust-proxmox-backup (2.0.0-1) bullseye; urgency=medium
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.11-1) buster; urgency=medium
|
* initial bump for Debian 11 Bullseye / Proxmox Backup Server 2.0
|
||||||
|
|
||||||
* tape/drive: fix logging when requesting media
|
|
||||||
|
|
||||||
* tape: fix LTO locate_file for HP drives
|
|
||||||
|
|
||||||
* fix #3393 (again): pxar/create: try to read xattrs/fcaps/acls by default
|
|
||||||
|
|
||||||
* proxmox-backup-manager: show task log on datastore create
|
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Wed, 30 Jun 2021 11:24:20 +0200
|
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.10-1) buster; urgency=medium
|
|
||||||
|
|
||||||
* ui: datastore list summary: catch and show errors per datastore
|
* ui: datastore list summary: catch and show errors per datastore
|
||||||
|
|
||||||
@ -84,7 +215,7 @@ rust-proxmox-backup (1.1.10-1) buster; urgency=medium
|
|||||||
* ui: datastore options: add remove button to drop a datastore from the
|
* ui: datastore options: add remove button to drop a datastore from the
|
||||||
configuration, without removing any actual data
|
configuration, without removing any actual data
|
||||||
|
|
||||||
* ui: tape: drive selector: do not autoselect the drive
|
* ui: tape: drive selector: do not auto select the drive
|
||||||
|
|
||||||
* ui: tape: backup job: use correct default value for pbsUserSelector
|
* ui: tape: backup job: use correct default value for pbsUserSelector
|
||||||
|
|
||||||
@ -93,7 +224,22 @@ rust-proxmox-backup (1.1.10-1) buster; urgency=medium
|
|||||||
* backup: add helpers for async last recently used (LRU) caches for chunk
|
* backup: add helpers for async last recently used (LRU) caches for chunk
|
||||||
and index reading of backup snapshot
|
and index reading of backup snapshot
|
||||||
|
|
||||||
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Jun 2021 09:46:15 +0200
|
* fix #3459: manager: add --ignore-verified and --outdated-after parameters
|
||||||
|
|
||||||
|
* proxmox-backup-manager: show task log on datastore create
|
||||||
|
|
||||||
|
* tape: snapshot reader: read chunks sorted by inode (per index) to improve
|
||||||
|
sequential reads when backing up data from slow spinning disks to tape.
|
||||||
|
|
||||||
|
* file-restore: support ZFS pools
|
||||||
|
|
||||||
|
* improve fix for #3393: pxar create: try to read xattrs/fcaps/acls by default
|
||||||
|
|
||||||
|
* fix compatibility with ExtJS 7.0
|
||||||
|
|
||||||
|
* docs: build api-viewer from widget-toolkit-dev
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 28 Jun 2021 19:35:40 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (1.1.9-1) stable; urgency=medium
|
rust-proxmox-backup (1.1.9-1) stable; urgency=medium
|
||||||
|
|
||||||
|
47
debian/control
vendored
47
debian/control
vendored
@ -1,8 +1,8 @@
|
|||||||
Source: rust-proxmox-backup
|
Source: rust-proxmox-backup
|
||||||
Section: admin
|
Section: admin
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Build-Depends: debhelper (>= 11),
|
Build-Depends: debhelper (>= 12),
|
||||||
dh-cargo (>= 18),
|
dh-cargo (>= 24),
|
||||||
cargo:native,
|
cargo:native,
|
||||||
rustc:native,
|
rustc:native,
|
||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
@ -37,20 +37,21 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pam-sys-0.5+default-dev,
|
librust-pam-sys-0.5+default-dev,
|
||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-1+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-proxmox-0.13+api-macro-dev,
|
||||||
librust-proxmox-0.11+api-macro-dev (>= 0.11.6-~~),
|
librust-proxmox-0.13+cli-dev,
|
||||||
librust-proxmox-0.11+cli-dev (>= 0.11.6-~~),
|
librust-proxmox-0.13+default-dev,
|
||||||
librust-proxmox-0.11+default-dev (>= 0.11.6-~~),
|
librust-proxmox-0.13+router-dev,
|
||||||
librust-proxmox-0.11+router-dev (>= 0.11.6-~~),
|
librust-proxmox-0.13+sortable-macro-dev,
|
||||||
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.6-~~),
|
librust-proxmox-0.13+tfa-dev,
|
||||||
librust-proxmox-0.11+tfa-dev (>= 0.11.6-~~),
|
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||||
librust-proxmox-acme-rs-0.3+default-dev,
|
librust-proxmox-apt-0.7+default-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-proxmox-http-0.2+client-dev (>= 0.2.1-~~),
|
librust-proxmox-http-0.4+client-dev,
|
||||||
librust-proxmox-http-0.2+default-dev (>= 0.2.1-~~),
|
librust-proxmox-http-0.4+default-dev ,
|
||||||
librust-proxmox-http-0.2+http-helpers-dev (>= 0.2.1-~~),
|
librust-proxmox-http-0.4+http-helpers-dev,
|
||||||
librust-proxmox-http-0.2+websocket-dev (>= 0.2.1-~~),
|
librust-proxmox-http-0.4+websocket-dev,
|
||||||
|
librust-proxmox-openid-0.7+default-dev,
|
||||||
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
@ -84,8 +85,8 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.4+bindgen-dev,
|
librust-zstd-0.6+bindgen-dev,
|
||||||
librust-zstd-0.4+default-dev,
|
librust-zstd-0.6+default-dev,
|
||||||
libacl1-dev,
|
libacl1-dev,
|
||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev,
|
||||||
@ -99,6 +100,7 @@ Build-Depends: debhelper (>= 11),
|
|||||||
graphviz <!nodoc>,
|
graphviz <!nodoc>,
|
||||||
latexmk <!nodoc>,
|
latexmk <!nodoc>,
|
||||||
patchelf,
|
patchelf,
|
||||||
|
proxmox-widget-toolkit-dev <!nodoc>,
|
||||||
pve-eslint (>= 7.18.0-1),
|
pve-eslint (>= 7.18.0-1),
|
||||||
python3-docutils,
|
python3-docutils,
|
||||||
python3-pygments,
|
python3-pygments,
|
||||||
@ -109,15 +111,16 @@ Build-Depends: debhelper (>= 11),
|
|||||||
texlive-xetex <!nodoc>,
|
texlive-xetex <!nodoc>,
|
||||||
xindy <!nodoc>
|
xindy <!nodoc>
|
||||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||||
Standards-Version: 4.4.1
|
Standards-Version: 4.5.1
|
||||||
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
||||||
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
||||||
Homepage: https://www.proxmox.com
|
Homepage: https://www.proxmox.com
|
||||||
|
Rules-Requires-Root: binary-targets
|
||||||
|
|
||||||
Package: proxmox-backup-server
|
Package: proxmox-backup-server
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 7~),
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
libproxmox-acme-plugins,
|
libproxmox-acme-plugins,
|
||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
@ -128,7 +131,7 @@ Depends: fonts-font-awesome,
|
|||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.6-2),
|
proxmox-widget-toolkit (>= 3.3-2),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -152,7 +155,8 @@ Description: Proxmox Backup Client tools
|
|||||||
Package: proxmox-backup-docs
|
Package: proxmox-backup-docs
|
||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
Depends: libjs-extjs,
|
Depends: fonts-font-awesome,
|
||||||
|
libjs-extjs,
|
||||||
libjs-mathjax,
|
libjs-mathjax,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
Architecture: all
|
Architecture: all
|
||||||
@ -165,6 +169,7 @@ Depends: ${misc:Depends},
|
|||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
proxmox-backup-restore-image,
|
proxmox-backup-restore-image,
|
||||||
|
Breaks: proxmox-backup-restore-image (<< 0.3.1)
|
||||||
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
This package contains the Proxmox Backup single file restore client for
|
This package contains the Proxmox Backup single file restore client for
|
||||||
restoring individual files and folders from both host/container and VM/block
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
55
debian/control.in
vendored
55
debian/control.in
vendored
@ -1,55 +0,0 @@
|
|||||||
Package: proxmox-backup-server
|
|
||||||
Architecture: any
|
|
||||||
Depends: fonts-font-awesome,
|
|
||||||
libjs-extjs (>= 6.0.1),
|
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
|
||||||
libproxmox-acme-plugins,
|
|
||||||
libsgutils2-2,
|
|
||||||
libzstd1 (>= 1.3.8),
|
|
||||||
lvm2,
|
|
||||||
openssh-server,
|
|
||||||
pbs-i18n,
|
|
||||||
postfix | mail-transport-agent,
|
|
||||||
proxmox-backup-docs,
|
|
||||||
proxmox-mini-journalreader,
|
|
||||||
proxmox-widget-toolkit (>= 2.6-2),
|
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
|
||||||
sg3-utils,
|
|
||||||
smartmontools,
|
|
||||||
${misc:Depends},
|
|
||||||
${shlibs:Depends},
|
|
||||||
Recommends: zfsutils-linux,
|
|
||||||
ifupdown2,
|
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
|
||||||
tools. This includes a web-based graphical user interface.
|
|
||||||
|
|
||||||
Package: proxmox-backup-client
|
|
||||||
Architecture: any
|
|
||||||
Depends: qrencode,
|
|
||||||
${misc:Depends},
|
|
||||||
${shlibs:Depends},
|
|
||||||
Description: Proxmox Backup Client tools
|
|
||||||
This package contains the Proxmox Backup client, which provides a
|
|
||||||
simple command line tool to create and restore backups.
|
|
||||||
|
|
||||||
Package: proxmox-backup-docs
|
|
||||||
Build-Profiles: <!nodoc>
|
|
||||||
Section: doc
|
|
||||||
Depends: libjs-extjs,
|
|
||||||
libjs-mathjax,
|
|
||||||
${misc:Depends},
|
|
||||||
Architecture: all
|
|
||||||
Description: Proxmox Backup Documentation
|
|
||||||
This package contains the Proxmox Backup Documentation files.
|
|
||||||
|
|
||||||
Package: proxmox-backup-file-restore
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${misc:Depends},
|
|
||||||
${shlibs:Depends},
|
|
||||||
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
|
||||||
proxmox-backup-restore-image,
|
|
||||||
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
|
||||||
This package contains the Proxmox Backup single file restore client for
|
|
||||||
restoring individual files and folders from both host/container and VM/block
|
|
||||||
device backups. It includes a block device restore driver using QEMU.
|
|
42
debian/debcargo.toml
vendored
42
debian/debcargo.toml
vendored
@ -1,42 +0,0 @@
|
|||||||
overlay = "."
|
|
||||||
crate_src_path = ".."
|
|
||||||
whitelist = ["tests/*.c"]
|
|
||||||
|
|
||||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
|
||||||
|
|
||||||
[source]
|
|
||||||
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
|
|
||||||
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
|
|
||||||
section = "admin"
|
|
||||||
build_depends = [
|
|
||||||
"bash-completion",
|
|
||||||
"debhelper (>= 12~)",
|
|
||||||
"fonts-dejavu-core <!nodoc>",
|
|
||||||
"fonts-lato <!nodoc>",
|
|
||||||
"fonts-open-sans <!nodoc>",
|
|
||||||
"graphviz <!nodoc>",
|
|
||||||
"latexmk <!nodoc>",
|
|
||||||
"patchelf",
|
|
||||||
"pve-eslint (>= 7.18.0-1)",
|
|
||||||
"python3-docutils",
|
|
||||||
"python3-pygments",
|
|
||||||
"python3-sphinx <!nodoc>",
|
|
||||||
"rsync",
|
|
||||||
"texlive-fonts-extra <!nodoc>",
|
|
||||||
"texlive-fonts-recommended <!nodoc>",
|
|
||||||
"texlive-xetex <!nodoc>",
|
|
||||||
"xindy <!nodoc>",
|
|
||||||
]
|
|
||||||
|
|
||||||
build_depends_excludes = [
|
|
||||||
"debhelper (>=11)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[packages.lib]
|
|
||||||
depends = [
|
|
||||||
"libacl1-dev",
|
|
||||||
"libfuse3-dev",
|
|
||||||
"libsystemd-dev",
|
|
||||||
"uuid-dev",
|
|
||||||
"libsgutils2-dev",
|
|
||||||
]
|
|
36
debian/postinst
vendored
36
debian/postinst
vendored
@ -26,43 +26,7 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
# FIXME: Remove with 1.1
|
|
||||||
if test -n "$2"; then
|
if test -n "$2"; then
|
||||||
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
|
|
||||||
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
|
|
||||||
echo "NOTE: drop all verify schedules from datastore config."
|
|
||||||
echo "You can now add more flexible verify jobs"
|
|
||||||
flock -w 30 /etc/proxmox-backup/.datastore.lck \
|
|
||||||
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
|
|
||||||
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
|
|
||||||
fi
|
|
||||||
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
|
|
||||||
if [ -e /etc/proxmox-backup/remote.cfg ]; then
|
|
||||||
echo "NOTE: Switching over remote.cfg to new field names.."
|
|
||||||
flock -w 30 /etc/proxmox-backup/.remote.lck \
|
|
||||||
sed -i \
|
|
||||||
-e 's/^\s\+userid /\tauth-id /g' \
|
|
||||||
/etc/proxmox-backup/remote.cfg || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if dpkg --compare-versions "$2" 'le' '1.0.14-1'; then
|
|
||||||
# FIXME: Remove with 2.0
|
|
||||||
if grep -s -q -P -e '^linux:' /etc/proxmox-backup/tape.cfg; then
|
|
||||||
echo "========="
|
|
||||||
echo "= NOTE: You have now unsupported 'linux' tape drives configured."
|
|
||||||
echo "= * Execute 'udevadm control --reload-rules && udevadm trigger' to update /dev"
|
|
||||||
echo "= * Edit '/etc/proxmox-backup/tape.cfg', remove 'linux' entries and re-add over CLI/GUI"
|
|
||||||
echo "========="
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
# FIXME: remove with 2.0
|
|
||||||
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
|
||||||
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
|
||||||
chmod 0750 /var/lib/proxmox-backup/tape || true
|
|
||||||
fi
|
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
|
8
debian/proxmox-backup-debug.bc
vendored
Normal file
8
debian/proxmox-backup-debug.bc
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# proxmox-backup-debug bash completion
|
||||||
|
|
||||||
|
# see http://tiswww.case.edu/php/chet/bash/FAQ
|
||||||
|
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
|
||||||
|
# this modifies global var, but I found no better way
|
||||||
|
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
|
||||||
|
|
||||||
|
complete -C 'proxmox-backup-debug bashcomplete' proxmox-backup-debug
|
1
debian/proxmox-backup-docs.links
vendored
1
debian/proxmox-backup-docs.links
vendored
@ -1,5 +1,6 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||||
|
/usr/share/fonts-font-awesome/ /usr/share/doc/proxmox-backup/html/lto-barcode/font-awesome
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
||||||
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
||||||
|
1
debian/proxmox-backup-server.bash-completion
vendored
1
debian/proxmox-backup-server.bash-completion
vendored
@ -1,4 +1,5 @@
|
|||||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||||
|
debian/proxmox-backup-debug.bc proxmox-backup-debug
|
||||||
debian/proxmox-tape.bc proxmox-tape
|
debian/proxmox-tape.bc proxmox-tape
|
||||||
debian/pmtx.bc pmtx
|
debian/pmtx.bc pmtx
|
||||||
debian/pmt.bc pmt
|
debian/pmt.bc pmt
|
||||||
|
3
debian/proxmox-backup-server.install
vendored
3
debian/proxmox-backup-server.install
vendored
@ -9,6 +9,7 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
|||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||||
|
usr/sbin/proxmox-backup-debug
|
||||||
usr/sbin/proxmox-backup-manager
|
usr/sbin/proxmox-backup-manager
|
||||||
usr/bin/pmtx
|
usr/bin/pmtx
|
||||||
usr/bin/pmt
|
usr/bin/pmt
|
||||||
@ -17,6 +18,7 @@ usr/share/javascript/proxmox-backup/index.hbs
|
|||||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||||
usr/share/javascript/proxmox-backup/images
|
usr/share/javascript/proxmox-backup/images
|
||||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||||
|
usr/share/man/man1/proxmox-backup-debug.1
|
||||||
usr/share/man/man1/proxmox-backup-manager.1
|
usr/share/man/man1/proxmox-backup-manager.1
|
||||||
usr/share/man/man1/proxmox-backup-proxy.1
|
usr/share/man/man1/proxmox-backup-proxy.1
|
||||||
usr/share/man/man1/proxmox-tape.1
|
usr/share/man/man1/proxmox-tape.1
|
||||||
@ -31,6 +33,7 @@ usr/share/man/man5/verification.cfg.5
|
|||||||
usr/share/man/man5/media-pool.cfg.5
|
usr/share/man/man5/media-pool.cfg.5
|
||||||
usr/share/man/man5/tape.cfg.5
|
usr/share/man/man5/tape.cfg.5
|
||||||
usr/share/man/man5/tape-job.cfg.5
|
usr/share/man/man5/tape-job.cfg.5
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||||
usr/share/zsh/vendor-completions/_pmtx
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
8
debian/rules
vendored
8
debian/rules
vendored
@ -32,6 +32,9 @@ override_dh_auto_build:
|
|||||||
override_dh_missing:
|
override_dh_missing:
|
||||||
dh_missing --fail-missing
|
dh_missing --fail-missing
|
||||||
|
|
||||||
|
override_dh_auto_test:
|
||||||
|
# ignore here to avoid rebuilding the binaries with the wrong target
|
||||||
|
|
||||||
override_dh_auto_install:
|
override_dh_auto_install:
|
||||||
dh_auto_install -- \
|
dh_auto_install -- \
|
||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
@ -45,11 +48,6 @@ override_dh_installsystemd:
|
|||||||
override_dh_fixperms:
|
override_dh_fixperms:
|
||||||
dh_fixperms --exclude sg-tape-cmd
|
dh_fixperms --exclude sg-tape-cmd
|
||||||
|
|
||||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
|
||||||
# TODO: remove once available (Debian 11 ?)
|
|
||||||
override_dh_dwz:
|
|
||||||
dh_dwz --no-dwz-multifile
|
|
||||||
|
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
dh_strip
|
dh_strip
|
||||||
for exe in $$(find \
|
for exe in $$(find \
|
||||||
|
@ -5,6 +5,7 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
|
proxmox-backup-debug/synopsis.rst \
|
||||||
proxmox-file-restore/synopsis.rst \
|
proxmox-file-restore/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
pmtx/synopsis.rst \
|
pmtx/synopsis.rst \
|
||||||
@ -27,7 +28,8 @@ MAN1_PAGES := \
|
|||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1 \
|
proxmox-backup-manager.1 \
|
||||||
proxmox-file-restore.1
|
proxmox-file-restore.1 \
|
||||||
|
proxmox-backup-debug.1
|
||||||
|
|
||||||
MAN5_PAGES := \
|
MAN5_PAGES := \
|
||||||
media-pool.cfg.5 \
|
media-pool.cfg.5 \
|
||||||
@ -46,23 +48,35 @@ PRUNE_SIMULATOR_FILES := \
|
|||||||
prune-simulator/clear-trigger.png \
|
prune-simulator/clear-trigger.png \
|
||||||
prune-simulator/prune-simulator.js
|
prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
PRUNE_SIMULATOR_JS_SOURCE := \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||||
|
prune-simulator/prune-simulator_source.js
|
||||||
|
|
||||||
|
LTO_BARCODE_JS_SOURCE := \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||||
|
lto-barcode/code39.js \
|
||||||
|
lto-barcode/prefix-field.js \
|
||||||
|
lto-barcode/label-style.js \
|
||||||
|
lto-barcode/tape-type.js \
|
||||||
|
lto-barcode/paper-size.js \
|
||||||
|
lto-barcode/page-layout.js \
|
||||||
|
lto-barcode/page-calibration.js \
|
||||||
|
lto-barcode/label-list.js \
|
||||||
|
lto-barcode/label-setup.js \
|
||||||
|
lto-barcode/lto-barcode.js
|
||||||
|
|
||||||
LTO_BARCODE_FILES := \
|
LTO_BARCODE_FILES := \
|
||||||
lto-barcode/index.html \
|
lto-barcode/index.html \
|
||||||
lto-barcode/code39.js \
|
lto-barcode/lto-barcode-generator.js
|
||||||
lto-barcode/prefix-field.js \
|
|
||||||
lto-barcode/label-style.js \
|
|
||||||
lto-barcode/tape-type.js \
|
|
||||||
lto-barcode/paper-size.js \
|
|
||||||
lto-barcode/page-layout.js \
|
|
||||||
lto-barcode/page-calibration.js \
|
|
||||||
lto-barcode/label-list.js \
|
|
||||||
lto-barcode/label-setup.js \
|
|
||||||
lto-barcode/lto-barcode.js
|
|
||||||
|
|
||||||
API_VIEWER_SOURCES= \
|
API_VIEWER_SOURCES= \
|
||||||
api-viewer/index.html \
|
api-viewer/index.html \
|
||||||
api-viewer/apidoc.js
|
api-viewer/apidoc.js
|
||||||
|
|
||||||
|
API_VIEWER_FILES := \
|
||||||
|
api-viewer/apidata.js \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS =
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
@ -187,6 +201,12 @@ proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
|
|||||||
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
|
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-backup-debug/synopsis.rst: ${COMPILEDIR}/proxmox-backup-debug
|
||||||
|
${COMPILEDIR}/proxmox-backup-debug printdoc > proxmox-backup-debug/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-backup-debug.1: proxmox-backup-debug/man1.rst proxmox-backup-debug/description.rst proxmox-backup-debug/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
.PHONY: onlinehelpinfo
|
.PHONY: onlinehelpinfo
|
||||||
onlinehelpinfo:
|
onlinehelpinfo:
|
||||||
@echo "Generating OnlineHelpInfo.js..."
|
@echo "Generating OnlineHelpInfo.js..."
|
||||||
@ -196,8 +216,17 @@ onlinehelpinfo:
|
|||||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||||
${COMPILEDIR}/docgen apidata.js >$@
|
${COMPILEDIR}/docgen apidata.js >$@
|
||||||
|
|
||||||
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
|
api-viewer/apidoc.js: ${API_VIEWER_FILES}
|
||||||
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
|
cat ${API_VIEWER_FILES} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
prune-simulator/prune-simulator.js: ${PRUNE_SIMULATOR_JS_SOURCE}
|
||||||
|
cat ${PRUNE_SIMULATOR_JS_SOURCE} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
|
||||||
|
cat ${LTO_BARCODE_JS_SOURCE} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
.PHONY: html
|
.PHONY: html
|
||||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||||
@ -228,7 +257,7 @@ epub3: ${GENERATED_SYNOPSIS}
|
|||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
||||||
rm -f api-viewer/apidoc.js lto-barcode/lto-barcode-generator.js
|
rm -f api-viewer/apidoc.js lto-barcode/lto-barcode-generator.js prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
|
||||||
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||||
|
@ -1,526 +0,0 @@
|
|||||||
// avoid errors when running without development tools
|
|
||||||
if (!Ext.isDefined(Ext.global.console)) {
|
|
||||||
var console = {
|
|
||||||
dir: function() {},
|
|
||||||
log: function() {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.onReady(function() {
|
|
||||||
|
|
||||||
Ext.define('pve-param-schema', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'name', 'type', 'typetext', 'description', 'verbose_description',
|
|
||||||
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
|
|
||||||
'pattern', 'title', 'requires', 'format', 'default',
|
|
||||||
'disallow', 'extends', 'links',
|
|
||||||
{
|
|
||||||
name: 'optional',
|
|
||||||
type: 'boolean'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var store = Ext.define('pve-updated-treestore', {
|
|
||||||
extend: 'Ext.data.TreeStore',
|
|
||||||
model: Ext.define('pve-api-doc', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'path', 'info', 'text',
|
|
||||||
]
|
|
||||||
}),
|
|
||||||
proxy: {
|
|
||||||
type: 'memory',
|
|
||||||
data: pbsapi
|
|
||||||
},
|
|
||||||
sorters: [{
|
|
||||||
property: 'leaf',
|
|
||||||
direction: 'ASC'
|
|
||||||
}, {
|
|
||||||
property: 'text',
|
|
||||||
direction: 'ASC'
|
|
||||||
}],
|
|
||||||
filterer: 'bottomup',
|
|
||||||
doFilter: function(node) {
|
|
||||||
this.filterNodes(node, this.getFilters().getFilterFn(), true);
|
|
||||||
},
|
|
||||||
|
|
||||||
filterNodes: function(node, filterFn, parentVisible) {
|
|
||||||
var me = this,
|
|
||||||
bottomUpFiltering = me.filterer === 'bottomup',
|
|
||||||
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
|
|
||||||
childNodes = node.childNodes,
|
|
||||||
len = childNodes && childNodes.length, i, matchingChildren;
|
|
||||||
|
|
||||||
if (len) {
|
|
||||||
for (i = 0; i < len; ++i) {
|
|
||||||
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
|
|
||||||
}
|
|
||||||
if (bottomUpFiltering) {
|
|
||||||
match = matchingChildren || match;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
node.set("visible", match, me._silentOptions);
|
|
||||||
return match;
|
|
||||||
},
|
|
||||||
|
|
||||||
}).create();
|
|
||||||
|
|
||||||
var render_description = function(value, metaData, record) {
|
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
value = pdef.verbose_description || value;
|
|
||||||
|
|
||||||
// TODO: try to render asciidoc correctly
|
|
||||||
|
|
||||||
metaData.style = 'white-space:pre-wrap;'
|
|
||||||
|
|
||||||
return Ext.htmlEncode(value);
|
|
||||||
};
|
|
||||||
|
|
||||||
var render_type = function(value, metaData, record) {
|
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
|
||||||
};
|
|
||||||
|
|
||||||
let render_simple_format = function(pdef, type_fallback) {
|
|
||||||
if (pdef.typetext)
|
|
||||||
return pdef.typetext;
|
|
||||||
|
|
||||||
if (pdef['enum'])
|
|
||||||
return pdef['enum'].join(' | ');
|
|
||||||
|
|
||||||
if (pdef.format)
|
|
||||||
return pdef.format;
|
|
||||||
|
|
||||||
if (pdef.pattern)
|
|
||||||
return pdef.pattern;
|
|
||||||
|
|
||||||
if (pdef.type === 'boolean')
|
|
||||||
return `<true|false>`;
|
|
||||||
|
|
||||||
if (type_fallback && pdef.type)
|
|
||||||
return `<${pdef.type}>`;
|
|
||||||
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let render_format = function(value, metaData, record) {
|
|
||||||
let pdef = record.data;
|
|
||||||
|
|
||||||
metaData.style = 'white-space:normal;'
|
|
||||||
|
|
||||||
if (pdef.type === 'array' && pdef.items) {
|
|
||||||
let format = render_simple_format(pdef.items, true);
|
|
||||||
return `[${Ext.htmlEncode(format)}, ...]`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ext.htmlEncode(render_simple_format(pdef) || '');
|
|
||||||
};
|
|
||||||
|
|
||||||
var real_path = function(path) {
|
|
||||||
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
|
|
||||||
};
|
|
||||||
|
|
||||||
var permission_text = function(permission) {
|
|
||||||
let permhtml = "";
|
|
||||||
|
|
||||||
if (permission.user) {
|
|
||||||
if (!permission.description) {
|
|
||||||
if (permission.user === 'world') {
|
|
||||||
permhtml += "Accessible without any authentication.";
|
|
||||||
} else if (permission.user === 'all') {
|
|
||||||
permhtml += "Accessible by all authenticated users.";
|
|
||||||
} else {
|
|
||||||
permhtml += 'Onyl accessible by user "' +
|
|
||||||
permission.user + '"';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (permission.check) {
|
|
||||||
permhtml += "<pre>Check: " +
|
|
||||||
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
|
|
||||||
} else if (permission.userParam) {
|
|
||||||
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
|
|
||||||
} else if (permission.or) {
|
|
||||||
permhtml += "<div>Or<div style='padding-left: 10px;'>";
|
|
||||||
Ext.Array.each(permission.or, function(sub_permission) {
|
|
||||||
permhtml += permission_text(sub_permission);
|
|
||||||
})
|
|
||||||
permhtml += "</div></div>";
|
|
||||||
} else if (permission.and) {
|
|
||||||
permhtml += "<div>And<div style='padding-left: 10px;'>";
|
|
||||||
Ext.Array.each(permission.and, function(sub_permission) {
|
|
||||||
permhtml += permission_text(sub_permission);
|
|
||||||
})
|
|
||||||
permhtml += "</div></div>";
|
|
||||||
} else {
|
|
||||||
//console.log(permission);
|
|
||||||
permhtml += "Unknown syntax!";
|
|
||||||
}
|
|
||||||
|
|
||||||
return permhtml;
|
|
||||||
};
|
|
||||||
|
|
||||||
var render_docu = function(data) {
|
|
||||||
var md = data.info;
|
|
||||||
|
|
||||||
// console.dir(data);
|
|
||||||
|
|
||||||
var items = [];
|
|
||||||
|
|
||||||
var clicmdhash = {
|
|
||||||
GET: 'get',
|
|
||||||
POST: 'create',
|
|
||||||
PUT: 'set',
|
|
||||||
DELETE: 'delete'
|
|
||||||
};
|
|
||||||
|
|
||||||
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
|
|
||||||
var info = md[method];
|
|
||||||
if (info) {
|
|
||||||
|
|
||||||
var usage = "";
|
|
||||||
|
|
||||||
usage += "<table><tr><td>HTTP: </td><td>"
|
|
||||||
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
|
|
||||||
|
|
||||||
var sections = [
|
|
||||||
{
|
|
||||||
title: 'Description',
|
|
||||||
html: Ext.htmlEncode(info.description),
|
|
||||||
bodyPadding: 10
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: 'Usage',
|
|
||||||
html: usage,
|
|
||||||
bodyPadding: 10
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
if (info.parameters && info.parameters.properties) {
|
|
||||||
|
|
||||||
var pstore = Ext.create('Ext.data.Store', {
|
|
||||||
model: 'pve-param-schema',
|
|
||||||
proxy: {
|
|
||||||
type: 'memory'
|
|
||||||
},
|
|
||||||
groupField: 'optional',
|
|
||||||
sorters: [
|
|
||||||
{
|
|
||||||
property: 'name',
|
|
||||||
direction: 'ASC'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.Object.each(info.parameters.properties, function(name, pdef) {
|
|
||||||
pdef.name = name;
|
|
||||||
pstore.add(pdef);
|
|
||||||
});
|
|
||||||
|
|
||||||
pstore.sort();
|
|
||||||
|
|
||||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
|
||||||
enableGroupingMenu: false,
|
|
||||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
xtype: 'gridpanel',
|
|
||||||
title: 'Parameters',
|
|
||||||
features: [groupingFeature],
|
|
||||||
store: pstore,
|
|
||||||
viewConfig: {
|
|
||||||
trackOver: false,
|
|
||||||
stripeRows: true
|
|
||||||
},
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
header: 'Name',
|
|
||||||
dataIndex: 'name',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Type',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_type,
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Default',
|
|
||||||
dataIndex: 'default',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Format',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_format,
|
|
||||||
flex: 2
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Description',
|
|
||||||
dataIndex: 'description',
|
|
||||||
renderer: render_description,
|
|
||||||
flex: 6
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (info.returns) {
|
|
||||||
|
|
||||||
var retinf = info.returns;
|
|
||||||
var rtype = retinf.type;
|
|
||||||
if (!rtype && retinf.items)
|
|
||||||
rtype = 'array';
|
|
||||||
if (!rtype)
|
|
||||||
rtype = 'object';
|
|
||||||
|
|
||||||
var rpstore = Ext.create('Ext.data.Store', {
|
|
||||||
model: 'pve-param-schema',
|
|
||||||
proxy: {
|
|
||||||
type: 'memory'
|
|
||||||
},
|
|
||||||
groupField: 'optional',
|
|
||||||
sorters: [
|
|
||||||
{
|
|
||||||
property: 'name',
|
|
||||||
direction: 'ASC'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var properties;
|
|
||||||
if (rtype === 'array' && retinf.items.properties) {
|
|
||||||
properties = retinf.items.properties;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rtype === 'object' && retinf.properties) {
|
|
||||||
properties = retinf.properties;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.Object.each(properties, function(name, pdef) {
|
|
||||||
pdef.name = name;
|
|
||||||
rpstore.add(pdef);
|
|
||||||
});
|
|
||||||
|
|
||||||
rpstore.sort();
|
|
||||||
|
|
||||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
|
||||||
enableGroupingMenu: false,
|
|
||||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
|
|
||||||
});
|
|
||||||
var returnhtml;
|
|
||||||
if (retinf.items) {
|
|
||||||
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (retinf.properties) {
|
|
||||||
returnhtml = returnhtml || '';
|
|
||||||
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawSection = Ext.create('Ext.panel.Panel', {
|
|
||||||
bodyPadding: '0px 10px 10px 10px',
|
|
||||||
html: returnhtml,
|
|
||||||
hidden: true
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
xtype: 'gridpanel',
|
|
||||||
title: 'Returns: ' + rtype,
|
|
||||||
features: [groupingFeature],
|
|
||||||
store: rpstore,
|
|
||||||
viewConfig: {
|
|
||||||
trackOver: false,
|
|
||||||
stripeRows: true
|
|
||||||
},
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
header: 'Name',
|
|
||||||
dataIndex: 'name',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Type',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_type,
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Default',
|
|
||||||
dataIndex: 'default',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Format',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_format,
|
|
||||||
flex: 2
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Description',
|
|
||||||
dataIndex: 'description',
|
|
||||||
renderer: render_description,
|
|
||||||
flex: 6
|
|
||||||
}
|
|
||||||
],
|
|
||||||
bbar: [
|
|
||||||
{
|
|
||||||
xtype: 'button',
|
|
||||||
text: 'Show RAW',
|
|
||||||
handler: function(btn) {
|
|
||||||
rawSection.setVisible(!rawSection.isVisible());
|
|
||||||
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
|
|
||||||
}}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push(rawSection);
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!data.path.match(/\/_upgrade_/)) {
|
|
||||||
var permhtml = '';
|
|
||||||
|
|
||||||
if (!info.permissions) {
|
|
||||||
permhtml = "Root only.";
|
|
||||||
} else {
|
|
||||||
if (info.permissions.description) {
|
|
||||||
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
|
|
||||||
Ext.htmlEncode(info.permissions.description) + "</div>";
|
|
||||||
}
|
|
||||||
permhtml += permission_text(info.permissions);
|
|
||||||
}
|
|
||||||
|
|
||||||
// we do not have this information for PBS api
|
|
||||||
//if (!info.allowtoken) {
|
|
||||||
// permhtml += "<br />This API endpoint is not available for API tokens."
|
|
||||||
//}
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
title: 'Required permissions',
|
|
||||||
bodyPadding: 10,
|
|
||||||
html: permhtml
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
items.push({
|
|
||||||
title: method,
|
|
||||||
autoScroll: true,
|
|
||||||
defaults: {
|
|
||||||
border: false
|
|
||||||
},
|
|
||||||
items: sections
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var ct = Ext.getCmp('docview');
|
|
||||||
ct.setTitle("Path: " + real_path(data.path));
|
|
||||||
ct.removeAll(true);
|
|
||||||
ct.add(items);
|
|
||||||
ct.setActiveTab(0);
|
|
||||||
};
|
|
||||||
|
|
||||||
Ext.define('Ext.form.SearchField', {
|
|
||||||
extend: 'Ext.form.field.Text',
|
|
||||||
alias: 'widget.searchfield',
|
|
||||||
|
|
||||||
emptyText: 'Search...',
|
|
||||||
|
|
||||||
flex: 1,
|
|
||||||
|
|
||||||
inputType: 'search',
|
|
||||||
listeners: {
|
|
||||||
'change': function(){
|
|
||||||
|
|
||||||
var value = this.getValue();
|
|
||||||
if (!Ext.isEmpty(value)) {
|
|
||||||
store.filter({
|
|
||||||
property: 'path',
|
|
||||||
value: value,
|
|
||||||
anyMatch: true
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
store.clearFilter();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var tree = Ext.create('Ext.tree.Panel', {
|
|
||||||
title: 'Resource Tree',
|
|
||||||
tbar: [
|
|
||||||
{
|
|
||||||
xtype: 'searchfield',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
type: 'expand',
|
|
||||||
tooltip: 'Expand all',
|
|
||||||
tooltipType: 'title',
|
|
||||||
callback: (tree) => tree.expandAll(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'collapse',
|
|
||||||
tooltip: 'Collapse all',
|
|
||||||
tooltipType: 'title',
|
|
||||||
callback: (tree) => tree.collapseAll(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
store: store,
|
|
||||||
width: 200,
|
|
||||||
region: 'west',
|
|
||||||
split: true,
|
|
||||||
margins: '5 0 5 5',
|
|
||||||
rootVisible: false,
|
|
||||||
listeners: {
|
|
||||||
selectionchange: function(v, selections) {
|
|
||||||
if (!selections[0])
|
|
||||||
return;
|
|
||||||
var rec = selections[0];
|
|
||||||
render_docu(rec.data);
|
|
||||||
location.hash = '#' + rec.data.path;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.create('Ext.container.Viewport', {
|
|
||||||
layout: 'border',
|
|
||||||
renderTo: Ext.getBody(),
|
|
||||||
items: [
|
|
||||||
tree,
|
|
||||||
{
|
|
||||||
xtype: 'tabpanel',
|
|
||||||
title: 'Documentation',
|
|
||||||
id: 'docview',
|
|
||||||
region: 'center',
|
|
||||||
margins: '5 5 5 0',
|
|
||||||
layout: 'fit',
|
|
||||||
items: []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var deepLink = function() {
|
|
||||||
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
|
|
||||||
var endpoint = store.findNode('path', path);
|
|
||||||
|
|
||||||
if (endpoint) {
|
|
||||||
tree.getSelectionModel().select(endpoint);
|
|
||||||
tree.expandPath(endpoint.getPath());
|
|
||||||
render_docu(endpoint.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
window.onhashchange = deepLink;
|
|
||||||
|
|
||||||
deepLink();
|
|
||||||
|
|
||||||
});
|
|
@ -49,15 +49,31 @@ Environment Variables
|
|||||||
When set, this value is used for the password required for the backup server.
|
When set, this value is used for the password required for the backup server.
|
||||||
You can also set this to a API token secret.
|
You can also set this to a API token secret.
|
||||||
|
|
||||||
|
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
||||||
|
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
||||||
|
name or from the `stdout` of a command, respectively. The first defined
|
||||||
|
environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_ENCRYPTION_PASSWORD``
|
``PBS_ENCRYPTION_PASSWORD``
|
||||||
When set, this value is used to access the secret encryption key (if
|
When set, this value is used to access the secret encryption key (if
|
||||||
protected by password).
|
protected by password).
|
||||||
|
|
||||||
|
``PBS_ENCRYPTION_PASSWORD_FD``, ``PBS_ENCRYPTION_PASSWORD_FILE``, ``PBS_ENCRYPTION_PASSWORD_CMD``
|
||||||
|
Like ``PBS_ENCRYPTION_PASSWORD``, but read data from an open file descriptor,
|
||||||
|
a file name or from the `stdout` of a command, respectively. The first
|
||||||
|
defined environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||||
certificate (only used if the system CA certificates cannot validate the
|
certificate (only used if the system CA certificates cannot validate the
|
||||||
certificate).
|
certificate).
|
||||||
|
|
||||||
|
|
||||||
|
.. Note:: Passwords must be valid UTF8 an may not contain
|
||||||
|
newlines. For your convienience, we just use the first line as
|
||||||
|
password, so you can add arbitrary comments after the
|
||||||
|
first newline.
|
||||||
|
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
@ -21,3 +21,7 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: pxar/description.rst
|
.. include:: pxar/description.rst
|
||||||
|
|
||||||
|
``proxmox-backup-debug``
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-backup-debug/description.rst
|
||||||
|
12
docs/faq.rst
12
docs/faq.rst
@ -24,11 +24,13 @@ future plans to support 32-bit processors.
|
|||||||
How long will my Proxmox Backup Server version be supported?
|
How long will my Proxmox Backup Server version be supported?
|
||||||
------------------------------------------------------------
|
------------------------------------------------------------
|
||||||
|
|
||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||||
+=======================+====================+===============+============+====================+
|
+=======================+======================+===============+============+====================+
|
||||||
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
|
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
|
||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
|
||||||
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|
|
||||||
|
|
||||||
Can I copy or synchronize my datastore to another location?
|
Can I copy or synchronize my datastore to another location?
|
||||||
|
@ -51,7 +51,7 @@ data:
|
|||||||
|
|
||||||
* - ``MAGIC: [u8; 8]``
|
* - ``MAGIC: [u8; 8]``
|
||||||
* - ``CRC32: [u8; 4]``
|
* - ``CRC32: [u8; 4]``
|
||||||
* - ``ÌV: [u8; 16]``
|
* - ``IV: [u8; 16]``
|
||||||
* - ``TAG: [u8; 16]``
|
* - ``TAG: [u8; 16]``
|
||||||
* - ``Data: (max 16MiB)``
|
* - ``Data: (max 16MiB)``
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ for various management tasks such as disk management.
|
|||||||
`Proxmox Backup`_ without the server part.
|
`Proxmox Backup`_ without the server part.
|
||||||
|
|
||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allow
|
The installer will guide you through the setup process and allow
|
||||||
you to partition the local disk(s), apply basic system configurations
|
you to partition the local disk(s), apply basic system configurations
|
||||||
|
@ -34,17 +34,7 @@
|
|||||||
</style>
|
</style>
|
||||||
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
||||||
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
<script type="text/javascript" src="lto-barcode-generator.js"></script>
|
||||||
<script type="text/javascript" src="code39.js"></script>
|
|
||||||
<script type="text/javascript" src="prefix-field.js"></script>
|
|
||||||
<script type="text/javascript" src="label-style.js"></script>
|
|
||||||
<script type="text/javascript" src="tape-type.js"></script>
|
|
||||||
<script type="text/javascript" src="paper-size.js"></script>
|
|
||||||
<script type="text/javascript" src="page-layout.js"></script>
|
|
||||||
<script type="text/javascript" src="page-calibration.js"></script>
|
|
||||||
<script type="text/javascript" src="label-list.js"></script>
|
|
||||||
<script type="text/javascript" src="label-setup.js"></script>
|
|
||||||
<script type="text/javascript" src="lto-barcode.js"></script>
|
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
</body>
|
</body>
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
// for toolkit.js
|
||||||
if (Ext.isFirefox) {
|
function gettext(val) { return val; };
|
||||||
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
|
||||||
}
|
|
||||||
|
|
||||||
function draw_labels(target_id, label_list, page_layout, calibration) {
|
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||||
let max_labels = compute_max_labels(page_layout);
|
let max_labels = compute_max_labels(page_layout);
|
||||||
|
@ -17,15 +17,13 @@ update``.
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian bullseye main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian bullseye-updates main contrib
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security bullseye-security main contrib
|
||||||
|
|
||||||
|
|
||||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
|
||||||
|
|
||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
updates.
|
updates.
|
||||||
|
|
||||||
@ -45,31 +43,21 @@ key with the following commands:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
|
||||||
Verify the SHA512 checksum afterwards with:
|
Verify the SHA512 checksum afterwards with the expected output below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
7fb03ec8a1675723d2853b84aa4fdb49a46a3bb72b9951361488bfd19b29aab0a789a4f8c7406e71a69aabbc727c936d3549731c4659ffa1a08f44db8fdcebfa /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
|
||||||
The output should be:
|
and the md5sum, with the expected output below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
bcc35c7173e0845c0d6ad6470b70f50e /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
and the md5sum:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
|
||||||
|
|
||||||
Here, the output should be:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
|
||||||
|
|
||||||
.. _sysadmin_package_repos_enterprise:
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
@ -84,7 +72,7 @@ enabled by default:
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||||
|
|
||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
|
||||||
|
|
||||||
|
|
||||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||||
@ -114,15 +102,15 @@ We recommend to configure this repository in ``/etc/apt/sources.list``.
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian bullseye main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian bullseye-updates main contrib
|
||||||
|
|
||||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||||
# NOT recommended for production use
|
# NOT recommended for production use
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security bullseye-security main contrib
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Test Repository
|
`Proxmox Backup`_ Test Repository
|
||||||
@ -140,7 +128,7 @@ You can access this repository by adding the following line to
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
deb http://download.proxmox.com/debian/pbs bullseye pbstest
|
||||||
|
|
||||||
.. _package_repositories_client_only:
|
.. _package_repositories_client_only:
|
||||||
|
|
||||||
@ -161,6 +149,26 @@ APT-based Proxmox Backup Client Repository
|
|||||||
For modern Linux distributions using `apt` as package manager, like all Debian
|
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||||
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||||
|
|
||||||
|
In order to configure this repository you need to first :ref:`setup the Proxmox
|
||||||
|
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
||||||
|
the APT sources lists.
|
||||||
|
|
||||||
|
**Repositories for Debian 11 (Bullseye) based releases**
|
||||||
|
|
||||||
|
This repository is tested with:
|
||||||
|
|
||||||
|
- Debian Bullseye
|
||||||
|
|
||||||
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
|
snipped
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
|
deb http://download.proxmox.com/debian/pbs-client bullseye main
|
||||||
|
|
||||||
|
**Repositories for Debian 10 (Buster) based releases**
|
||||||
|
|
||||||
This repository is tested with:
|
This repository is tested with:
|
||||||
|
|
||||||
- Debian Buster
|
- Debian Buster
|
||||||
@ -168,9 +176,6 @@ This repository is tested with:
|
|||||||
|
|
||||||
It may work with older, and should work with more recent released versions.
|
It may work with older, and should work with more recent released versions.
|
||||||
|
|
||||||
In order to configure this repository you need to first :ref:`setup the Proxmox
|
|
||||||
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
|
||||||
the APT sources lists.
|
|
||||||
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
snipped
|
snipped
|
||||||
|
|
||||||
|
14
docs/proxmox-backup-debug/description.rst
Normal file
14
docs/proxmox-backup-debug/description.rst
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
Implements debugging functionality to inspect Proxmox Backup datastore
|
||||||
|
files, verify the integrity of chunks.
|
||||||
|
|
||||||
|
Also contains an 'api' subcommand where arbitrary api paths can be called
|
||||||
|
(get/create/set/delete) as well as display their parameters (usage) and
|
||||||
|
their child-links (ls).
|
||||||
|
|
||||||
|
By default, it connects to the proxmox-backup-proxy on localhost via https,
|
||||||
|
but by setting the environment variable `PROXMOX_DEBUG_API_CODE` to `1` the
|
||||||
|
tool directly calls the corresponding code.
|
||||||
|
|
||||||
|
.. WARNING:: Using `PROXMOX_DEBUG_API_CODE` can be dangerous and is only intended
|
||||||
|
for debugging purposes. It is not intended for use on a production system.
|
||||||
|
|
33
docs/proxmox-backup-debug/man1.rst
Normal file
33
docs/proxmox-backup-debug/man1.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-backup-debug
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Debugging command line tool for Backup and Restore
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
Common Options
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. include:: ../output-format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
@ -1,7 +1,5 @@
|
|||||||
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
// for Toolkit.js
|
||||||
if (Ext.isFirefox) {
|
function gettext(val) { return val; };
|
||||||
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.onReady(function() {
|
Ext.onReady(function() {
|
||||||
const NOW = new Date();
|
const NOW = new Date();
|
||||||
@ -37,7 +35,6 @@ Ext.onReady(function() {
|
|||||||
|
|
||||||
editable: true,
|
editable: true,
|
||||||
|
|
||||||
displayField: 'text',
|
|
||||||
valueField: 'value',
|
valueField: 'value',
|
||||||
queryMode: 'local',
|
queryMode: 'local',
|
||||||
|
|
@ -3,9 +3,6 @@
|
|||||||
Tape Backup
|
Tape Backup
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
|
||||||
production use.
|
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Tape Backup: Tape changer overview
|
:alt: Tape Backup: Tape changer overview
|
||||||
@ -848,6 +845,17 @@ Update Inventory
|
|||||||
Restore Catalog
|
Restore Catalog
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To restore a catalog from an existing tape, just insert the tape into the drive
|
||||||
|
and execute:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape catalog
|
||||||
|
|
||||||
|
|
||||||
|
You can restore from a tape even without an existing catalog, but only the
|
||||||
|
whole media set. If you do this, the catalog will be automatically created.
|
||||||
|
|
||||||
|
|
||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -164,3 +164,66 @@ Verification of encrypted chunks
|
|||||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||||
available, making it impossible for the server (without the encryption key), to
|
available, making it impossible for the server (without the encryption key), to
|
||||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Index files(.fidx, .didx) contain information about how to rebuild a file, more
|
||||||
|
precisely, they contain an ordered list of references to the chunks the original
|
||||||
|
file was split up in. If there is something wrong with a snapshot it might be
|
||||||
|
useful to find out which chunks are referenced in this specific snapshot, and
|
||||||
|
check wheather all of them are present and intact. The command for getting the
|
||||||
|
list of referenced chunks could look something like this:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
||||||
|
|
||||||
|
The same command can be used to look at .blob file, without ``--decode`` just
|
||||||
|
the size and the encryption type, if any, is printed. If ``--decode`` is set the
|
||||||
|
blob file is decoded into the specified file('-' will decode it directly into
|
||||||
|
stdout).
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
||||||
|
|
||||||
|
would print the decoded contents of `qemu-server.conf.blob`. If the file you're
|
||||||
|
trying to inspect is encrypted, a path to the keyfile has to be provided using
|
||||||
|
``--keyfile``.
|
||||||
|
|
||||||
|
Checking in which index files a specific chunk file is referenced can be done
|
||||||
|
with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
||||||
|
|
||||||
|
Here ``--reference-filter`` specifies where index files should be searched, this
|
||||||
|
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
||||||
|
changed you can explicitly specify the digest using ``--digest``, by default the
|
||||||
|
chunk filename is used as the digest to look for. Specifying no
|
||||||
|
``--reference-filter`` will just print the CRC and encryption status of the
|
||||||
|
chunk. You can also decode chunks, to do so ``--decode`` has to be set. If the
|
||||||
|
chunk is encrypted a ``--keyfile`` has to be provided for decoding.
|
||||||
|
|
||||||
|
Restore without a running PBS
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to restore spefiic files of snapshots without a running PBS using
|
||||||
|
the `recover` sub-command, provided you have access to the intact index and
|
||||||
|
chunk files. Note that you also need the corresponding key file if the backup
|
||||||
|
was encrypted.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
||||||
|
|
||||||
|
In above example the `/path/to/.chunks` argument is the path to the directory
|
||||||
|
that contains contains the chunks, and `drive-scsi0.img.fidx` is the index-file
|
||||||
|
of the file you'd lile to restore. Both paths can be absolute or relative. With
|
||||||
|
``--skip-crc`` it is possible to disable the crc checks of the chunks, this will
|
||||||
|
speed up the process slightly and allows for trying to restore (partially)
|
||||||
|
corrupt chunks. It's recommended to always try without the skip-CRC option
|
||||||
|
first.
|
||||||
|
|
||||||
|
@ -1 +1 @@
|
|||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
|
||||||
|
@ -2,8 +2,8 @@ use std::io::Write;
|
|||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
@ -59,7 +59,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = pbs_runtime::main(run()) {
|
||||||
eprintln!("ERROR: {}", err);
|
eprintln!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
println!("DONE");
|
println!("DONE");
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -6,10 +6,10 @@ use hyper::{Body, Request, Response};
|
|||||||
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use proxmox_backup::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
|
|||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -5,7 +5,7 @@ extern crate proxmox_backup;
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use proxmox_backup::backup::*;
|
use pbs_datastore::Chunker;
|
||||||
|
|
||||||
struct ChunkWriter {
|
struct ChunkWriter {
|
||||||
chunker: Chunker,
|
chunker: Chunker,
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
extern crate proxmox_backup;
|
extern crate proxmox_backup;
|
||||||
|
|
||||||
//use proxmox_backup::backup::chunker::*;
|
use pbs_datastore::Chunker;
|
||||||
use proxmox_backup::backup::*;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ use futures::*;
|
|||||||
|
|
||||||
extern crate proxmox_backup;
|
extern crate proxmox_backup;
|
||||||
|
|
||||||
use proxmox_backup::backup::*;
|
use pbs_client::ChunkStream;
|
||||||
|
|
||||||
// Test Chunker with real data read from a file.
|
// Test Chunker with real data read from a file.
|
||||||
//
|
//
|
||||||
@ -13,7 +13,7 @@ use proxmox_backup::backup::*;
|
|||||||
// Note: I can currently get about 830MB/s
|
// Note: I can currently get about 830MB/s
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = pbs_runtime::main(run()) {
|
||||||
panic!("ERROR: {}", err);
|
panic!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Authid;
|
use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
|
||||||
use proxmox_backup::client::*;
|
use pbs_api_types::Authid;
|
||||||
|
|
||||||
async fn upload_speed() -> Result<f64, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
match proxmox_backup::tools::runtime::main(upload_speed()) {
|
match pbs_runtime::main(upload_speed()) {
|
||||||
Ok(mbs) => {
|
Ok(mbs) => {
|
||||||
println!("average upload speed: {} MB/s", mbs);
|
println!("average upload speed: {} MB/s", mbs);
|
||||||
}
|
}
|
||||||
|
20
pbs-api-types/Cargo.toml
Normal file
20
pbs-api-types/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-api-types"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "general API type helpers for PBS"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
lazy_static = "1.4"
|
||||||
|
libc = "0.2"
|
||||||
|
nix = "0.19.1"
|
||||||
|
openssl = "0.10"
|
||||||
|
regex = "1.2"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
|
||||||
|
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
||||||
|
|
||||||
|
proxmox-systemd = { path = "../proxmox-systemd" }
|
||||||
|
pbs-tools = { path = "../pbs-tools" }
|
284
pbs-api-types/src/acl.rs
Normal file
284
pbs-api-types/src/acl.rs
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde::de::{value, IntoDeserializer};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||||
|
};
|
||||||
|
use proxmox::{constnamedbitmap, const_regex};
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
}
|
||||||
|
|
||||||
|
// define Privilege bitfield
|
||||||
|
|
||||||
|
constnamedbitmap! {
|
||||||
|
/// Contains a list of privilege name to privilege value mappings.
|
||||||
|
///
|
||||||
|
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
||||||
|
/// allow easy matching of privileges as bitflags.
|
||||||
|
PRIVILEGES: u64 => {
|
||||||
|
/// Sys.Audit allows knowing about the system and its status
|
||||||
|
PRIV_SYS_AUDIT("Sys.Audit");
|
||||||
|
/// Sys.Modify allows modifying system-level configuration
|
||||||
|
PRIV_SYS_MODIFY("Sys.Modify");
|
||||||
|
/// Sys.Modify allows to poweroff/reboot/.. the system
|
||||||
|
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||||
|
|
||||||
|
/// Datastore.Audit allows knowing about a datastore,
|
||||||
|
/// including reading the configuration entry and listing its contents
|
||||||
|
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||||
|
/// Datastore.Allocate allows creating or deleting datastores
|
||||||
|
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||||
|
/// Datastore.Modify allows modifying a datastore and its contents
|
||||||
|
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||||
|
/// Datastore.Read allows reading arbitrary backup contents
|
||||||
|
PRIV_DATASTORE_READ("Datastore.Read");
|
||||||
|
/// Allows verifying a datastore
|
||||||
|
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
||||||
|
|
||||||
|
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
||||||
|
/// but also requires backup ownership
|
||||||
|
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||||
|
/// Datastore.Prune allows deleting snapshots,
|
||||||
|
/// but also requires backup ownership
|
||||||
|
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||||
|
|
||||||
|
/// Permissions.Modify allows modifying ACLs
|
||||||
|
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||||
|
|
||||||
|
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
||||||
|
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||||
|
/// Remote.Modify allows modifying remote.cfg
|
||||||
|
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||||
|
/// Remote.Read allows reading data from a configured `Remote`
|
||||||
|
PRIV_REMOTE_READ("Remote.Read");
|
||||||
|
|
||||||
|
/// Sys.Console allows access to the system's console
|
||||||
|
PRIV_SYS_CONSOLE("Sys.Console");
|
||||||
|
|
||||||
|
/// Tape.Audit allows reading tape backup configuration and status
|
||||||
|
PRIV_TAPE_AUDIT("Tape.Audit");
|
||||||
|
/// Tape.Modify allows modifying tape backup configuration
|
||||||
|
PRIV_TAPE_MODIFY("Tape.Modify");
|
||||||
|
/// Tape.Write allows writing tape media
|
||||||
|
PRIV_TAPE_WRITE("Tape.Write");
|
||||||
|
/// Tape.Read allows reading tape backup configuration and media contents
|
||||||
|
PRIV_TAPE_READ("Tape.Read");
|
||||||
|
|
||||||
|
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
||||||
|
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Admin always has all privileges. It can do everything except a few actions
|
||||||
|
/// which are limited to the 'root@pam` superuser
|
||||||
|
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||||
|
|
||||||
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Audit can view configuration and status information, but not modify it.
|
||||||
|
pub const ROLE_AUDIT: u64 = 0
|
||||||
|
| PRIV_SYS_AUDIT
|
||||||
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Admin can do anything on the datastore.
|
||||||
|
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT
|
||||||
|
| PRIV_DATASTORE_MODIFY
|
||||||
|
| PRIV_DATASTORE_READ
|
||||||
|
| PRIV_DATASTORE_VERIFY
|
||||||
|
| PRIV_DATASTORE_BACKUP
|
||||||
|
| PRIV_DATASTORE_PRUNE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Reader can read/verify datastore content and do restore
|
||||||
|
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT
|
||||||
|
| PRIV_DATASTORE_VERIFY
|
||||||
|
| PRIV_DATASTORE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Backup can do backup and restore, but no prune.
|
||||||
|
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||||
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||||
|
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||||
|
| PRIV_DATASTORE_PRUNE
|
||||||
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Audit can audit the datastore.
|
||||||
|
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.Audit can audit the remote
|
||||||
|
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.Admin can do anything on the remote.
|
||||||
|
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT
|
||||||
|
| PRIV_REMOTE_MODIFY
|
||||||
|
| PRIV_REMOTE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.SyncOperator can do read and prune on the remote.
|
||||||
|
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT
|
||||||
|
| PRIV_REMOTE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Audit can audit the tape backup configuration and media content
|
||||||
|
pub const ROLE_TAPE_AUDIT: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Admin can do anything on the tape backup
|
||||||
|
pub const ROLE_TAPE_ADMIN: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_MODIFY
|
||||||
|
| PRIV_TAPE_READ
|
||||||
|
| PRIV_TAPE_WRITE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
||||||
|
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_READ
|
||||||
|
| PRIV_TAPE_WRITE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Reader can do read and inspect tape content
|
||||||
|
pub const ROLE_TAPE_READER: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_READ;
|
||||||
|
|
||||||
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
|
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
type_text: "<role>",
|
||||||
|
)]
|
||||||
|
#[repr(u64)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Enum representing roles via their [PRIVILEGES] combination.
|
||||||
|
///
|
||||||
|
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
||||||
|
/// single, unique `u64` value that is used in this enum definition.
|
||||||
|
pub enum Role {
|
||||||
|
/// Administrator
|
||||||
|
Admin = ROLE_ADMIN,
|
||||||
|
/// Auditor
|
||||||
|
Audit = ROLE_AUDIT,
|
||||||
|
/// Disable Access
|
||||||
|
NoAccess = ROLE_NO_ACCESS,
|
||||||
|
/// Datastore Administrator
|
||||||
|
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
||||||
|
/// Datastore Reader (inspect datastore content and do restores)
|
||||||
|
DatastoreReader = ROLE_DATASTORE_READER,
|
||||||
|
/// Datastore Backup (backup and restore owned backups)
|
||||||
|
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
||||||
|
/// Datastore PowerUser (backup, restore and prune owned backup)
|
||||||
|
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
||||||
|
/// Datastore Auditor
|
||||||
|
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
||||||
|
/// Remote Auditor
|
||||||
|
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||||
|
/// Remote Administrator
|
||||||
|
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||||
|
/// Syncronisation Opertator
|
||||||
|
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||||
|
/// Tape Auditor
|
||||||
|
TapeAudit = ROLE_TAPE_AUDIT,
|
||||||
|
/// Tape Administrator
|
||||||
|
TapeAdmin = ROLE_TAPE_ADMIN,
|
||||||
|
/// Tape Operator
|
||||||
|
TapeOperator = ROLE_TAPE_OPERATOR,
|
||||||
|
/// Tape Reader
|
||||||
|
TapeReader = ROLE_TAPE_READER,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl FromStr for Role {
|
||||||
|
type Err = value::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
Self::deserialize(s.into_deserializer())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||||
|
|
||||||
|
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Access control path.")
|
||||||
|
.format(&ACL_PATH_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(128)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Allow to propagate (inherit) permissions.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Type of 'ugid' property.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("user", "User"),
|
||||||
|
EnumEntry::new("group", "Group")]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
propagate: {
|
||||||
|
schema: ACL_PROPAGATE_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: ACL_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid_type: {
|
||||||
|
schema: ACL_UGID_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid: {
|
||||||
|
type: String,
|
||||||
|
description: "User or Group ID.",
|
||||||
|
},
|
||||||
|
roleid: {
|
||||||
|
type: Role,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// ACL list entry.
|
||||||
|
pub struct AclListItem {
|
||||||
|
pub path: String,
|
||||||
|
pub ugid: String,
|
||||||
|
pub ugid_type: String,
|
||||||
|
pub propagate: bool,
|
||||||
|
pub roleid: String,
|
||||||
|
}
|
57
pbs-api-types/src/crypto.rs
Normal file
57
pbs-api-types/src/crypto.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
use std::fmt::{self, Display};
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
||||||
|
|
||||||
|
#[api(default: "encrypt")]
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||||
|
pub enum CryptMode {
|
||||||
|
/// Don't encrypt.
|
||||||
|
None,
|
||||||
|
/// Encrypt.
|
||||||
|
Encrypt,
|
||||||
|
/// Only sign.
|
||||||
|
SignOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
/// 32-byte fingerprint, usually calculated with SHA256.
|
||||||
|
pub struct Fingerprint {
|
||||||
|
#[serde(with = "bytes_as_fingerprint")]
|
||||||
|
bytes: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Fingerprint {
|
||||||
|
pub fn new(bytes: [u8; 32]) -> Self {
|
||||||
|
Self { bytes }
|
||||||
|
}
|
||||||
|
pub fn bytes(&self) -> &[u8; 32] {
|
||||||
|
&self.bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Display as short key ID
|
||||||
|
impl Display for Fingerprint {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for Fingerprint {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Error> {
|
||||||
|
let mut tmp = s.to_string();
|
||||||
|
tmp.retain(|c| c != ':');
|
||||||
|
let bytes = proxmox::tools::hex_to_digest(&tmp)?;
|
||||||
|
Ok(Fingerprint::new(bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
622
pbs-api-types/src/datastore.rs
Normal file
622
pbs-api-types/src/datastore.rs
Normal file
@ -0,0 +1,622 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema,
|
||||||
|
StringSchema, Updater,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
||||||
|
Fingerprint, Userid, Authid,
|
||||||
|
GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||||
|
|
||||||
|
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
||||||
|
|
||||||
|
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||||
|
|
||||||
|
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
||||||
|
|
||||||
|
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||||
|
|
||||||
|
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
|
|
||||||
|
pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(4096)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
|
||||||
|
|
||||||
|
pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
|
||||||
|
.format(&BACKUP_ID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("vm", "Virtual Machine Backup"),
|
||||||
|
EnumEntry::new("ct", "Container Backup"),
|
||||||
|
EnumEntry::new("host", "Host Backup"),
|
||||||
|
]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
|
||||||
|
.minimum(1_547_797_308)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
|
||||||
|
.format(&CHUNK_DIGEST_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
|
||||||
|
.format(&DATASTORE_MAP_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(65)
|
||||||
|
.type_text("(<source>=)?<target>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of Datastore mappings (or single datastore), comma separated. \
|
||||||
|
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
|
||||||
|
all other sources to the default 'e'. If no default is given, only the \
|
||||||
|
specified sources are mapped.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
|
||||||
|
IntegerSchema::new("Number of hourly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
|
||||||
|
IntegerSchema::new("Number of monthly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
|
||||||
|
IntegerSchema::new("Number of weekly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
|
||||||
|
IntegerSchema::new("Number of yearly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"keep-last": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Common pruning options
|
||||||
|
pub struct PruneOptions {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_last: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_hourly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_daily: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_weekly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_monthly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_yearly: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: DIR_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
"notify": {
|
||||||
|
optional: true,
|
||||||
|
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
"gc-schedule": {
|
||||||
|
optional: true,
|
||||||
|
schema: GC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"prune-schedule": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"keep-last": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
|
},
|
||||||
|
"verify-new": {
|
||||||
|
description: "If enabled, all new backups will be verified right after completion.",
|
||||||
|
optional: true,
|
||||||
|
type: bool,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Datastore configuration properties.
|
||||||
|
pub struct DataStoreConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub name: String,
|
||||||
|
#[updater(skip)]
|
||||||
|
pub path: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub gc_schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub prune_schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_last: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_hourly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_daily: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_weekly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_monthly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_yearly: Option<u64>,
|
||||||
|
/// If enabled, all backups will be verified right after completion.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verify_new: Option<bool>,
|
||||||
|
/// Send job email notification to this user
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify_user: Option<Userid>,
|
||||||
|
/// Send notification only for job errors
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about a datastore.
|
||||||
|
pub struct DataStoreListItem {
|
||||||
|
pub store: String,
|
||||||
|
pub comment: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"filename": {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about archive files inside a backup snapshot.
|
||||||
|
pub struct BackupContent {
|
||||||
|
pub filename: String,
|
||||||
|
/// Info if file is encrypted, signed, or neither.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub crypt_mode: Option<CryptMode>,
|
||||||
|
/// Archive size (from backup manifest).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub size: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Result of a verify operation.
|
||||||
|
pub enum VerifyState {
|
||||||
|
/// Verification was successful
|
||||||
|
Ok,
|
||||||
|
/// Verification reported one or more errors
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: {
|
||||||
|
type: UPID,
|
||||||
|
},
|
||||||
|
state: {
|
||||||
|
type: VerifyState,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct SnapshotVerifyState {
|
||||||
|
/// UPID of the verify task
|
||||||
|
pub upid: UPID,
|
||||||
|
/// State of the verification. Enum.
|
||||||
|
pub state: VerifyState,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verification: {
|
||||||
|
type: SnapshotVerifyState,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
files: {
|
||||||
|
items: {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
|
},
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about backup snapshot.
|
||||||
|
pub struct SnapshotListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub backup_time: i64,
|
||||||
|
/// The first line from manifest "notes"
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
/// The result of the last run verify task
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub verification: Option<SnapshotVerifyState>,
|
||||||
|
/// Fingerprint of encryption key
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub fingerprint: Option<Fingerprint>,
|
||||||
|
/// List of contained archive files.
|
||||||
|
pub files: Vec<BackupContent>,
|
||||||
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub size: Option<u64>,
|
||||||
|
/// The owner of the snapshots group
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"last-backup": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-count": {
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
files: {
|
||||||
|
items: {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
|
},
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about a backup group.
|
||||||
|
pub struct GroupListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub last_backup: i64,
|
||||||
|
/// Number of contained snapshots
|
||||||
|
pub backup_count: u64,
|
||||||
|
/// List of contained archive files.
|
||||||
|
pub files: Vec<String>,
|
||||||
|
/// The owner of group
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
/// The first line from group "notes"
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Prune result.
|
||||||
|
pub struct PruneListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub backup_time: i64,
|
||||||
|
/// Keep snapshot
|
||||||
|
pub keep: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
ct: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
host: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
vm: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
other: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
/// Counts of groups/snapshots per BackupType.
|
||||||
|
pub struct Counts {
|
||||||
|
/// The counts for CT backups
|
||||||
|
pub ct: Option<TypeCounts>,
|
||||||
|
/// The counts for Host backups
|
||||||
|
pub host: Option<TypeCounts>,
|
||||||
|
/// The counts for VM backups
|
||||||
|
pub vm: Option<TypeCounts>,
|
||||||
|
/// The counts for other backup types
|
||||||
|
pub other: Option<TypeCounts>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
/// Backup Type group/snapshot counts.
|
||||||
|
pub struct TypeCounts {
|
||||||
|
/// The number of groups of the type.
|
||||||
|
pub groups: u64,
|
||||||
|
/// The number of snapshots of the type.
|
||||||
|
pub snapshots: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"upid": {
|
||||||
|
optional: true,
|
||||||
|
type: UPID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Garbage collection status.
|
||||||
|
pub struct GarbageCollectionStatus {
|
||||||
|
pub upid: Option<String>,
|
||||||
|
/// Number of processed index files.
|
||||||
|
pub index_file_count: usize,
|
||||||
|
/// Sum of bytes referred by index files.
|
||||||
|
pub index_data_bytes: u64,
|
||||||
|
/// Bytes used on disk.
|
||||||
|
pub disk_bytes: u64,
|
||||||
|
/// Chunks used on disk.
|
||||||
|
pub disk_chunks: usize,
|
||||||
|
/// Sum of removed bytes.
|
||||||
|
pub removed_bytes: u64,
|
||||||
|
/// Number of removed chunks.
|
||||||
|
pub removed_chunks: usize,
|
||||||
|
/// Sum of pending bytes (pending removal - kept for safety).
|
||||||
|
pub pending_bytes: u64,
|
||||||
|
/// Number of pending chunks (pending removal - kept for safety).
|
||||||
|
pub pending_chunks: usize,
|
||||||
|
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||||
|
pub removed_bad: usize,
|
||||||
|
/// Number of chunks still marked as .bad after garbage collection.
|
||||||
|
pub still_bad: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for GarbageCollectionStatus {
|
||||||
|
fn default() -> Self {
|
||||||
|
GarbageCollectionStatus {
|
||||||
|
upid: None,
|
||||||
|
index_file_count: 0,
|
||||||
|
index_data_bytes: 0,
|
||||||
|
disk_bytes: 0,
|
||||||
|
disk_chunks: 0,
|
||||||
|
removed_bytes: 0,
|
||||||
|
removed_chunks: 0,
|
||||||
|
pending_bytes: 0,
|
||||||
|
pending_chunks: 0,
|
||||||
|
removed_bad: 0,
|
||||||
|
still_bad: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"gc-status": {
|
||||||
|
type: GarbageCollectionStatus,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
counts: {
|
||||||
|
type: Counts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Overall Datastore status and useful information.
|
||||||
|
pub struct DataStoreStatus {
|
||||||
|
/// Total space (bytes).
|
||||||
|
pub total: u64,
|
||||||
|
/// Used space (bytes).
|
||||||
|
pub used: u64,
|
||||||
|
/// Available space (bytes).
|
||||||
|
pub avail: u64,
|
||||||
|
/// Status of last GC
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub gc_status: Option<GarbageCollectionStatus>,
|
||||||
|
/// Group/Snapshot counts
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub counts: Option<Counts>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of snapshots.",
|
||||||
|
&SnapshotListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of archive files inside a backup snapshots.",
|
||||||
|
&BackupContent::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of backup groups.",
|
||||||
|
&GroupListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||||
|
&PruneListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
@ -1,7 +1,8 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
|
|
||||||
#[api()]
|
#[api]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// General status information about a running VM file-restore daemon
|
/// General status information about a running VM file-restore daemon
|
||||||
@ -12,4 +13,3 @@ pub struct RestoreDaemonStatus {
|
|||||||
/// not set, as then the status call will have reset the timer before returning the value
|
/// not set, as then the status call will have reset the timer before returning the value
|
||||||
pub timeout: i64,
|
pub timeout: i64,
|
||||||
}
|
}
|
||||||
|
|
392
pbs-api-types/src/jobs.rs
Normal file
392
pbs-api-types/src/jobs.rs
Normal file
@ -0,0 +1,392 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
|
||||||
|
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||||
|
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
||||||
|
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
|
||||||
|
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run sync job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run garbage collection job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run prune job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run verify job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"next-run": {
|
||||||
|
description: "Estimated time of the next run (UNIX epoch).",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
"last-run-state": {
|
||||||
|
description: "Result of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-upid": {
|
||||||
|
description: "Task UPID of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-endtime": {
|
||||||
|
description: "Endtime of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Default)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Job Scheduling Status
|
||||||
|
pub struct JobScheduleStatus {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub next_run: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_state: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_upid: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_endtime: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// When do we send notifications
|
||||||
|
pub enum Notify {
|
||||||
|
/// Never send notification
|
||||||
|
Never,
|
||||||
|
/// Send notifications for failed and successful jobs
|
||||||
|
Always,
|
||||||
|
/// Send notifications for failed jobs only
|
||||||
|
Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
gc: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verify: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
sync: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// Datastore notify settings
|
||||||
|
pub struct DatastoreNotify {
|
||||||
|
/// Garbage collection settings
|
||||||
|
pub gc: Option<Notify>,
|
||||||
|
/// Verify job setting
|
||||||
|
pub verify: Option<Notify>,
|
||||||
|
/// Sync job setting
|
||||||
|
pub sync: Option<Notify>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Datastore notification setting")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Do not verify backups that are already verified if their verification is not outdated.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Days after that a verification becomes outdated")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Verification Job
|
||||||
|
pub struct VerificationJobConfig {
|
||||||
|
/// unique ID to address this job
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
/// the datastore ID this verificaiton job affects
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// if not set to false, check the age of the last snapshot verification to filter
|
||||||
|
/// out recent ones, depending on 'outdated_after' configuration.
|
||||||
|
pub ignore_verified: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||||
|
pub outdated_after: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// when to schedule this job in calendar event notation
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: VerificationJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Verification Job
|
||||||
|
pub struct VerificationJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: VerificationJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"eject-media": {
|
||||||
|
description: "Eject media upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-media-set": {
|
||||||
|
description: "Export media set upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"latest-only": {
|
||||||
|
description: "Backup latest snapshots only.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Tape Backup Job Setup
|
||||||
|
pub struct TapeBackupJobSetup {
|
||||||
|
pub store: String,
|
||||||
|
pub pool: String,
|
||||||
|
pub drive: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub eject_media: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub export_media_set: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub latest_only: Option<bool>,
|
||||||
|
/// Send job email notification to this user
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify_user: Option<Userid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
setup: {
|
||||||
|
type: TapeBackupJobSetup,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Tape Backup Job
|
||||||
|
pub struct TapeBackupJobConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub setup: TapeBackupJobSetup,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: TapeBackupJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Tape Backup Job
|
||||||
|
pub struct TapeBackupJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: TapeBackupJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
/// Next tape used (best guess)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub next_media_label: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"owner": {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
remote: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"remote-store": {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"remove-vanished": {
|
||||||
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Sync Job
|
||||||
|
pub struct SyncJobConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
pub remote: String,
|
||||||
|
pub remote_store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub remove_vanished: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: SyncJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Sync Job
|
||||||
|
pub struct SyncJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: SyncJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
}
|
56
pbs-api-types/src/key_derivation.rs
Normal file
56
pbs-api-types/src/key_derivation.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||||
|
|
||||||
|
#[api(default: "scrypt")]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
/// Encrtypt the Key with a password using PBKDF2
|
||||||
|
PBKDF2,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Encryption Key Information
|
||||||
|
pub struct KeyInfo {
|
||||||
|
/// Path to key (if stored in a file)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub path: Option<String>,
|
||||||
|
pub kdf: Kdf,
|
||||||
|
/// Key creation time
|
||||||
|
pub created: i64,
|
||||||
|
/// Key modification time
|
||||||
|
pub modified: i64,
|
||||||
|
/// Key fingerprint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub fingerprint: Option<String>,
|
||||||
|
/// Password hint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub hint: Option<String>,
|
||||||
|
}
|
||||||
|
|
399
pbs-api-types/src/lib.rs
Normal file
399
pbs-api-types/src/lib.rs
Normal file
@ -0,0 +1,399 @@
|
|||||||
|
//! Basic API types used by most of the PBS code.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use anyhow::bail;
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema};
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||||
|
() => (
|
||||||
|
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
mod acl;
|
||||||
|
pub use acl::*;
|
||||||
|
|
||||||
|
mod datastore;
|
||||||
|
pub use datastore::*;
|
||||||
|
|
||||||
|
mod jobs;
|
||||||
|
pub use jobs::*;
|
||||||
|
|
||||||
|
mod key_derivation;
|
||||||
|
pub use key_derivation::{Kdf, KeyInfo};
|
||||||
|
|
||||||
|
mod network;
|
||||||
|
pub use network::*;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod userid;
|
||||||
|
pub use userid::Authid;
|
||||||
|
pub use userid::Userid;
|
||||||
|
pub use userid::{Realm, RealmRef};
|
||||||
|
pub use userid::{Tokenname, TokennameRef};
|
||||||
|
pub use userid::{Username, UsernameRef};
|
||||||
|
pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod user;
|
||||||
|
pub use user::*;
|
||||||
|
|
||||||
|
pub mod upid;
|
||||||
|
pub use upid::*;
|
||||||
|
|
||||||
|
mod crypto;
|
||||||
|
pub use crypto::{CryptMode, Fingerprint};
|
||||||
|
|
||||||
|
pub mod file_restore;
|
||||||
|
|
||||||
|
mod remote;
|
||||||
|
pub use remote::*;
|
||||||
|
|
||||||
|
mod tape;
|
||||||
|
pub use tape::*;
|
||||||
|
|
||||||
|
mod zfs;
|
||||||
|
pub use zfs::*;
|
||||||
|
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_use]
|
||||||
|
mod local_macros {
|
||||||
|
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
|
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||||
|
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||||
|
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||||
|
macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
|
macro_rules! DNS_ALIAS_NAME {
|
||||||
|
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
|
||||||
|
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
|
||||||
|
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
|
||||||
|
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
|
||||||
|
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
|
||||||
|
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
|
||||||
|
pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
|
||||||
|
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||||
|
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
|
||||||
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||||
|
|
||||||
|
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
|
||||||
|
|
||||||
|
pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters
|
||||||
|
|
||||||
|
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||||
|
|
||||||
|
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
|
||||||
|
|
||||||
|
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
|
/// Regex for safe identifiers.
|
||||||
|
///
|
||||||
|
/// This
|
||||||
|
/// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html)
|
||||||
|
/// contains further information why it is reasonable to restict
|
||||||
|
/// names this way. This is not only useful for filenames, but for
|
||||||
|
/// any identifier command line tools work with.
|
||||||
|
pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
|
||||||
|
|
||||||
|
pub BACKUP_REPO_URL_REGEX = concat!(
|
||||||
|
r"^^(?:(?:(",
|
||||||
|
USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
|
||||||
|
")@)?(",
|
||||||
|
DNS_NAME!(), "|", IPRE_BRACKET!(),
|
||||||
|
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$"
|
||||||
|
);
|
||||||
|
|
||||||
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
|
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX);
|
||||||
|
pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX);
|
||||||
|
pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX);
|
||||||
|
pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX);
|
||||||
|
pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX);
|
||||||
|
pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||||
|
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
|
pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||||
|
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
|
||||||
|
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||||
|
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
||||||
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
|
||||||
|
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
|
||||||
|
|
||||||
|
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||||
|
|
||||||
|
pub const FIRST_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("First name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SECOND_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Second name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const THIRD_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Third name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).")
|
||||||
|
.format(&HOSTNAME_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
|
||||||
|
.format(&DNS_NAME_OR_IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(|node| {
|
||||||
|
if node == "localhost" || node == proxmox::tools::nodename() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
bail!("no such node '{}'", node);
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||||
|
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of disk names, comma separated.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(5)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
|
||||||
|
|
||||||
|
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
|
||||||
|
StringSchema::new("X509 certificate fingerprint (sha256).")
|
||||||
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||||
|
|
||||||
|
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
|
||||||
|
|
||||||
|
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
|
||||||
|
.format(&SUBSCRIPTION_KEY_FORMAT)
|
||||||
|
.min_length(15)
|
||||||
|
.max_length(16)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
|
||||||
|
.max_length(256)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Prevent changes if current configuration file has different \
|
||||||
|
SHA256 digest. This can be used to prevent concurrent \
|
||||||
|
modifications.",
|
||||||
|
)
|
||||||
|
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
/// API schema format definition for repository URLs
|
||||||
|
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||||
|
|
||||||
|
|
||||||
|
// Complex type definitions
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
/// Storage space usage information.
|
||||||
|
pub struct StorageStatus {
|
||||||
|
/// Total space (bytes).
|
||||||
|
pub total: u64,
|
||||||
|
/// Used space (bytes).
|
||||||
|
pub used: u64,
|
||||||
|
/// Available space (bytes).
|
||||||
|
pub avail: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// RSA public key information
|
||||||
|
pub struct RsaPubKeyInfo {
|
||||||
|
/// Path to key (if stored in a file)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub path: Option<String>,
|
||||||
|
/// RSA exponent
|
||||||
|
pub exponent: String,
|
||||||
|
/// Hex-encoded RSA modulus
|
||||||
|
pub modulus: String,
|
||||||
|
/// Key (modulus) length in bits
|
||||||
|
pub length: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
|
||||||
|
let modulus = value.n().to_hex_str()?.to_string();
|
||||||
|
let exponent = value.e().to_dec_str()?.to_string();
|
||||||
|
let length = value.size() as usize * 8;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
path: None,
|
||||||
|
exponent,
|
||||||
|
modulus,
|
||||||
|
length,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
/// Describes a package for which an update is available.
|
||||||
|
pub struct APTUpdateInfo {
|
||||||
|
/// Package name
|
||||||
|
pub package: String,
|
||||||
|
/// Package title
|
||||||
|
pub title: String,
|
||||||
|
/// Package architecture
|
||||||
|
pub arch: String,
|
||||||
|
/// Human readable package description
|
||||||
|
pub description: String,
|
||||||
|
/// New version to be updated to
|
||||||
|
pub version: String,
|
||||||
|
/// Old version currently installed
|
||||||
|
pub old_version: String,
|
||||||
|
/// Package origin
|
||||||
|
pub origin: String,
|
||||||
|
/// Package priority in human-readable form
|
||||||
|
pub priority: String,
|
||||||
|
/// Package section
|
||||||
|
pub section: String,
|
||||||
|
/// URL under which the package's changelog can be retrieved
|
||||||
|
pub change_log_url: String,
|
||||||
|
/// Custom extra field for additional package information
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub extra_info: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "UPPERCASE")]
|
||||||
|
pub enum RRDMode {
|
||||||
|
/// Maximum
|
||||||
|
Max,
|
||||||
|
/// Average
|
||||||
|
Average,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[repr(u64)]
|
||||||
|
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum RRDTimeFrameResolution {
|
||||||
|
/// 1 min => last 70 minutes
|
||||||
|
Hour = 60,
|
||||||
|
/// 30 min => last 35 hours
|
||||||
|
Day = 60*30,
|
||||||
|
/// 3 hours => about 8 days
|
||||||
|
Week = 60*180,
|
||||||
|
/// 12 hours => last 35 days
|
||||||
|
Month = 60*720,
|
||||||
|
/// 1 week => last 490 days
|
||||||
|
Year = 60*10080,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Node Power command type.
|
||||||
|
pub enum NodePowerCommand {
|
||||||
|
/// Restart the server
|
||||||
|
Reboot,
|
||||||
|
/// Shutdown the server
|
||||||
|
Shutdown,
|
||||||
|
}
|
308
pbs-api-types/src/network.rs
Normal file
308
pbs-api-types/src/network.rs
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
PROXMOX_SAFE_ID_REGEX,
|
||||||
|
IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT,
|
||||||
|
CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||||
|
|
||||||
|
pub const IP_V4_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv4 address.")
|
||||||
|
.format(&IP_V4_FORMAT)
|
||||||
|
.max_length(15)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IP_V6_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv6 address.")
|
||||||
|
.format(&IP_V6_FORMAT)
|
||||||
|
.max_length(39)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IP_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.max_length(39)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_V4_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_V4_FORMAT)
|
||||||
|
.max_length(18)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_V6_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_V6_FORMAT)
|
||||||
|
.max_length(43)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_FORMAT)
|
||||||
|
.max_length(43)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Interface configuration method
|
||||||
|
pub enum NetworkConfigMethod {
|
||||||
|
/// Configuration is done manually using other tools
|
||||||
|
Manual,
|
||||||
|
/// Define interfaces with statically allocated addresses.
|
||||||
|
Static,
|
||||||
|
/// Obtain an address via DHCP
|
||||||
|
DHCP,
|
||||||
|
/// Define the loopback interface.
|
||||||
|
Loopback,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Linux Bond Mode
|
||||||
|
pub enum LinuxBondMode {
|
||||||
|
/// Round-robin policy
|
||||||
|
balance_rr = 0,
|
||||||
|
/// Active-backup policy
|
||||||
|
active_backup = 1,
|
||||||
|
/// XOR policy
|
||||||
|
balance_xor = 2,
|
||||||
|
/// Broadcast policy
|
||||||
|
broadcast = 3,
|
||||||
|
/// IEEE 802.3ad Dynamic link aggregation
|
||||||
|
#[serde(rename = "802.3ad")]
|
||||||
|
ieee802_3ad = 4,
|
||||||
|
/// Adaptive transmit load balancing
|
||||||
|
balance_tlb = 5,
|
||||||
|
/// Adaptive load balancing
|
||||||
|
balance_alb = 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||||
|
pub enum BondXmitHashPolicy {
|
||||||
|
/// Layer 2
|
||||||
|
layer2 = 0,
|
||||||
|
/// Layer 2+3
|
||||||
|
#[serde(rename = "layer2+3")]
|
||||||
|
layer2_3 = 1,
|
||||||
|
/// Layer 3+4
|
||||||
|
#[serde(rename = "layer3+4")]
|
||||||
|
layer3_4 = 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Network interface type
|
||||||
|
pub enum NetworkInterfaceType {
|
||||||
|
/// Loopback
|
||||||
|
Loopback,
|
||||||
|
/// Physical Ethernet device
|
||||||
|
Eth,
|
||||||
|
/// Linux Bridge
|
||||||
|
Bridge,
|
||||||
|
/// Linux Bond
|
||||||
|
Bond,
|
||||||
|
/// Linux VLAN (eth.10)
|
||||||
|
Vlan,
|
||||||
|
/// Interface Alias (eth:1)
|
||||||
|
Alias,
|
||||||
|
/// Unknown interface type
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||||
|
.format(&NETWORK_INTERFACE_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(libc::IFNAMSIZ-1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of network devices, comma separated.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
type: NetworkInterfaceType,
|
||||||
|
},
|
||||||
|
method: {
|
||||||
|
type: NetworkConfigMethod,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
method6: {
|
||||||
|
type: NetworkConfigMethod,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
cidr: {
|
||||||
|
schema: CIDR_V4_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
cidr6: {
|
||||||
|
schema: CIDR_V6_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
gateway: {
|
||||||
|
schema: IP_V4_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
gateway6: {
|
||||||
|
schema: IP_V6_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
description: "Option list (inet)",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Optional attribute line.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options6: {
|
||||||
|
description: "Option list (inet6)",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Optional attribute line.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
comments: {
|
||||||
|
description: "Comments (inet, may span multiple lines)",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comments6: {
|
||||||
|
description: "Comments (inet6, may span multiple lines)",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bridge_ports: {
|
||||||
|
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
slaves: {
|
||||||
|
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bond_mode: {
|
||||||
|
type: LinuxBondMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"bond-primary": {
|
||||||
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bond_xmit_hash_policy: {
|
||||||
|
type: BondXmitHashPolicy,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// Network Interface configuration
|
||||||
|
pub struct Interface {
|
||||||
|
/// Autostart interface
|
||||||
|
#[serde(rename = "autostart")]
|
||||||
|
pub autostart: bool,
|
||||||
|
/// Interface is active (UP)
|
||||||
|
pub active: bool,
|
||||||
|
/// Interface name
|
||||||
|
pub name: String,
|
||||||
|
/// Interface type
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub interface_type: NetworkInterfaceType,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub method: Option<NetworkConfigMethod>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub method6: Option<NetworkConfigMethod>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv4 address with netmask
|
||||||
|
pub cidr: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv4 gateway
|
||||||
|
pub gateway: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv6 address with netmask
|
||||||
|
pub cidr6: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv6 gateway
|
||||||
|
pub gateway6: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||||
|
pub options: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||||
|
pub options6: Vec<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comments: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comments6: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Maximum Transmission Unit
|
||||||
|
pub mtu: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bridge_ports: Option<Vec<String>>,
|
||||||
|
/// Enable bridge vlan support.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bridge_vlan_aware: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub slaves: Option<Vec<String>>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bond_mode: Option<LinuxBondMode>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
#[serde(rename = "bond-primary")]
|
||||||
|
pub bond_primary: Option<String>,
|
||||||
|
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Interface {
|
||||||
|
pub fn new(name: String) -> Self {
|
||||||
|
Self {
|
||||||
|
name,
|
||||||
|
interface_type: NetworkInterfaceType::Unknown,
|
||||||
|
autostart: false,
|
||||||
|
active: false,
|
||||||
|
method: None,
|
||||||
|
method6: None,
|
||||||
|
cidr: None,
|
||||||
|
gateway: None,
|
||||||
|
cidr6: None,
|
||||||
|
gateway6: None,
|
||||||
|
options: Vec::new(),
|
||||||
|
options6: Vec::new(),
|
||||||
|
comments: None,
|
||||||
|
comments6: None,
|
||||||
|
mtu: None,
|
||||||
|
bridge_ports: None,
|
||||||
|
bridge_vlan_aware: None,
|
||||||
|
slaves: None,
|
||||||
|
bond_mode: None,
|
||||||
|
bond_primary: None,
|
||||||
|
bond_xmit_hash_policy: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
86
pbs-api-types/src/remote.rs
Normal file
86
pbs-api-types/src/remote.rs
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
host: {
|
||||||
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
optional: true,
|
||||||
|
description: "The (optional) port",
|
||||||
|
type: u16,
|
||||||
|
},
|
||||||
|
"auth-id": {
|
||||||
|
type: Authid,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
optional: true,
|
||||||
|
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Remote configuration properties.
|
||||||
|
pub struct RemoteConfig {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
pub host: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub port: Option<u16>,
|
||||||
|
pub auth_id: Authid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub fingerprint: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
config: {
|
||||||
|
type: RemoteConfig,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Remote properties.
|
||||||
|
pub struct Remote {
|
||||||
|
pub name: String,
|
||||||
|
// Note: The stored password is base64 encoded
|
||||||
|
#[serde(skip_serializing_if="String::is_empty")]
|
||||||
|
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||||
|
pub password: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: RemoteConfig,
|
||||||
|
}
|
@ -10,10 +10,11 @@ use proxmox::api::{
|
|||||||
ArraySchema,
|
ArraySchema,
|
||||||
IntegerSchema,
|
IntegerSchema,
|
||||||
StringSchema,
|
StringSchema,
|
||||||
|
Updater,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
OptionalDeviceIdentification,
|
OptionalDeviceIdentification,
|
||||||
};
|
};
|
||||||
@ -62,10 +63,11 @@ Import/Export, i.e. any media in those slots are considered to be
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// SCSI tape changer
|
/// SCSI tape changer
|
||||||
pub struct ScsiTapeChanger {
|
pub struct ScsiTapeChanger {
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
@ -6,10 +6,10 @@ use serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api,
|
api,
|
||||||
schema::{Schema, IntegerSchema, StringSchema},
|
schema::{Schema, IntegerSchema, StringSchema, Updater},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
OptionalDeviceIdentification,
|
OptionalDeviceIdentification,
|
||||||
@ -28,7 +28,7 @@ pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
|||||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
||||||
"Associated changer drive number (requires option changer)")
|
"Associated changer drive number (requires option changer)")
|
||||||
.minimum(0)
|
.minimum(0)
|
||||||
.maximum(8)
|
.maximum(255)
|
||||||
.default(0)
|
.default(0)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
@ -69,10 +69,11 @@ pub struct VirtualTapeDrive {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Lto SCSI tape driver
|
/// Lto SCSI tape driver
|
||||||
pub struct LtoTapeDrive {
|
pub struct LtoTapeDrive {
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
@ -1,17 +1,26 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
api::api,
|
api::{api, schema::*},
|
||||||
tools::Uuid,
|
tools::Uuid,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
MEDIA_UUID_SCHEMA,
|
UUID_FORMAT,
|
||||||
MEDIA_SET_UUID_SCHEMA,
|
|
||||||
MediaStatus,
|
MediaStatus,
|
||||||
MediaLocation,
|
MediaLocation,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Media Uuid.")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"media-set-uuid": {
|
"media-set-uuid": {
|
@ -9,7 +9,7 @@ use proxmox::api::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
};
|
};
|
||||||
@ -35,8 +35,8 @@ pub enum MediaLocation {
|
|||||||
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
||||||
proxmox::forward_serialize_to_display!(MediaLocation);
|
proxmox::forward_serialize_to_display!(MediaLocation);
|
||||||
|
|
||||||
impl MediaLocation {
|
impl proxmox::api::schema::ApiType for MediaLocation {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new(
|
const API_SCHEMA: Schema = StringSchema::new(
|
||||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
||||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||||
let location: MediaLocation = text.parse()?;
|
let location: MediaLocation = text.parse()?;
|
@ -4,28 +4,23 @@
|
|||||||
//! so we cannot use them directly for the API. Instead, we represent
|
//! so we cannot use them directly for the API. Instead, we represent
|
||||||
//! them as String.
|
//! them as String.
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api,
|
api,
|
||||||
schema::{Schema, StringSchema, ApiStringFormat},
|
schema::{Schema, StringSchema, ApiStringFormat, Updater},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
tools::systemd::time::{
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CalendarEvent,
|
SINGLE_LINE_COMMENT_FORMAT,
|
||||||
TimeSpan,
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
parse_time_span,
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
parse_calendar_event,
|
|
||||||
},
|
|
||||||
api2::types::{
|
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
SINGLE_LINE_COMMENT_FORMAT,
|
|
||||||
SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||||
@ -138,10 +133,11 @@ impl std::str::FromStr for RetentionPolicy {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
/// Media pool configuration
|
/// Media pool configuration
|
||||||
pub struct MediaPoolConfig {
|
pub struct MediaPoolConfig {
|
||||||
/// The pool name
|
/// The pool name
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// Media Set allocation policy
|
/// Media Set allocation policy
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
94
pbs-api-types/src/tape/mod.rs
Normal file
94
pbs-api-types/src/tape/mod.rs
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
//! Types for tape backup API
|
||||||
|
|
||||||
|
mod device;
|
||||||
|
pub use device::*;
|
||||||
|
|
||||||
|
mod changer;
|
||||||
|
pub use changer::*;
|
||||||
|
|
||||||
|
mod drive;
|
||||||
|
pub use drive::*;
|
||||||
|
|
||||||
|
mod media_pool;
|
||||||
|
pub use media_pool::*;
|
||||||
|
|
||||||
|
mod media_status;
|
||||||
|
pub use media_status::*;
|
||||||
|
|
||||||
|
mod media_location;
|
||||||
|
|
||||||
|
pub use media_location::*;
|
||||||
|
|
||||||
|
mod media;
|
||||||
|
pub use media::*;
|
||||||
|
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat};
|
||||||
|
use proxmox::tools::Uuid;
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||||
|
|
||||||
|
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Tape encryption key fingerprint (sha256)."
|
||||||
|
)
|
||||||
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A snapshot in the format: 'store:type/id/time")
|
||||||
|
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||||
|
.type_text("store:type/id/time")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"label-text": {
|
||||||
|
schema: MEDIA_LABEL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media": {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media-set": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Content list filter parameters
|
||||||
|
pub struct MediaContentListFilter {
|
||||||
|
pub pool: Option<String>,
|
||||||
|
pub label_text: Option<String>,
|
||||||
|
pub media: Option<Uuid>,
|
||||||
|
pub media_set: Option<Uuid>,
|
||||||
|
pub backup_type: Option<String>,
|
||||||
|
pub backup_id: Option<String>,
|
||||||
|
}
|
203
pbs-api-types/src/upid.rs
Normal file
203
pbs-api-types/src/upid.rs
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType};
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::sys::linux::procfs;
|
||||||
|
|
||||||
|
use crate::Authid;
|
||||||
|
|
||||||
|
/// Unique Process/Task Identifier
|
||||||
|
///
|
||||||
|
/// We use this to uniquely identify worker task. UPIDs have a short
|
||||||
|
/// string repesentaion, which gives additional information about the
|
||||||
|
/// type of the task. for example:
|
||||||
|
/// ```text
|
||||||
|
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}:
|
||||||
|
/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam:
|
||||||
|
/// ```
|
||||||
|
/// Please note that we use tokio, so a single thread can run multiple
|
||||||
|
/// tasks.
|
||||||
|
// #[api] - manually implemented API type
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct UPID {
|
||||||
|
/// The Unix PID
|
||||||
|
pub pid: libc::pid_t,
|
||||||
|
/// The Unix process start time from `/proc/pid/stat`
|
||||||
|
pub pstart: u64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub starttime: i64,
|
||||||
|
/// The task ID (inside the process/thread)
|
||||||
|
pub task_id: usize,
|
||||||
|
/// Worker type (arbitrary ASCII string)
|
||||||
|
pub worker_type: String,
|
||||||
|
/// Worker ID (arbitrary ASCII string)
|
||||||
|
pub worker_id: Option<String>,
|
||||||
|
/// The authenticated entity who started the task
|
||||||
|
pub auth_id: Authid,
|
||||||
|
/// The node name.
|
||||||
|
pub node: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox::forward_serialize_to_display!(UPID);
|
||||||
|
proxmox::forward_deserialize_to_from_str!(UPID);
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub PROXMOX_UPID_REGEX = concat!(
|
||||||
|
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||||
|
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||||
|
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXMOX_UPID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX);
|
||||||
|
|
||||||
|
pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier")
|
||||||
|
.min_length("UPID:N:12345678:12345678:12345678:::".len())
|
||||||
|
.max_length(128) // arbitrary
|
||||||
|
.format(&PROXMOX_UPID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
impl ApiType for UPID {
|
||||||
|
const API_SCHEMA: Schema = UPID_SCHEMA;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UPID {
|
||||||
|
/// Create a new UPID
|
||||||
|
pub fn new(
|
||||||
|
worker_type: &str,
|
||||||
|
worker_id: Option<String>,
|
||||||
|
auth_id: Authid,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
|
let pid = unsafe { libc::getpid() };
|
||||||
|
|
||||||
|
let bad: &[_] = &['/', ':', ' '];
|
||||||
|
|
||||||
|
if worker_type.contains(bad) {
|
||||||
|
bail!("illegal characters in worker type '{}'", worker_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
|
let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
|
|
||||||
|
Ok(UPID {
|
||||||
|
pid,
|
||||||
|
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
||||||
|
starttime: proxmox::tools::time::epoch_i64(),
|
||||||
|
task_id,
|
||||||
|
worker_type: worker_type.to_owned(),
|
||||||
|
worker_id,
|
||||||
|
auth_id,
|
||||||
|
node: proxmox::tools::nodename().to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl std::str::FromStr for UPID {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||||
|
|
||||||
|
let worker_id = if cap["wid"].is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let wid = proxmox_systemd::unescape_unit(&cap["wid"])?;
|
||||||
|
Some(wid)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(UPID {
|
||||||
|
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||||
|
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||||
|
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
||||||
|
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||||
|
worker_type: cap["wtype"].to_string(),
|
||||||
|
worker_id,
|
||||||
|
auth_id: cap["authid"].parse()?,
|
||||||
|
node: cap["node"].to_string(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
bail!("unable to parse UPID '{}'", s);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for UPID {
|
||||||
|
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
|
||||||
|
let wid = if let Some(ref id) = self.worker_id {
|
||||||
|
proxmox_systemd::escape_unit(id, false)
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
||||||
|
// more that 8 characters for pstart
|
||||||
|
|
||||||
|
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||||
|
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum TaskStateType {
|
||||||
|
/// Ok
|
||||||
|
OK,
|
||||||
|
/// Warning
|
||||||
|
Warning,
|
||||||
|
/// Error
|
||||||
|
Error,
|
||||||
|
/// Unknown
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: { schema: UPID::API_SCHEMA },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct TaskListItem {
|
||||||
|
pub upid: String,
|
||||||
|
/// The node name where the task is running on.
|
||||||
|
pub node: String,
|
||||||
|
/// The Unix PID
|
||||||
|
pub pid: i64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub pstart: u64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub starttime: i64,
|
||||||
|
/// Worker type (arbitrary ASCII string)
|
||||||
|
pub worker_type: String,
|
||||||
|
/// Worker ID (arbitrary ASCII string)
|
||||||
|
pub worker_id: Option<String>,
|
||||||
|
/// The authenticated entity who started the task
|
||||||
|
pub user: Authid,
|
||||||
|
/// The task end time (Epoch)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub endtime: Option<i64>,
|
||||||
|
/// Task end status
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub status: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"A list of tasks.",
|
||||||
|
&TaskListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
208
pbs-api-types/src/user.rs
Normal file
208
pbs-api-types/src/user.rs
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||||
|
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||||
|
|
||||||
|
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Enable the account (default). You can set this to '0' to disable the account.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Account expiration date (seconds since epoch). '0' means no expiration date.")
|
||||||
|
.default(0)
|
||||||
|
.minimum(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
firstname: {
|
||||||
|
optional: true,
|
||||||
|
schema: FIRST_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
lastname: {
|
||||||
|
schema: LAST_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
email: {
|
||||||
|
schema: EMAIL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
tokens: {
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
description: "List of user's API tokens.",
|
||||||
|
items: {
|
||||||
|
type: ApiToken
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// User properties with added list of ApiTokens
|
||||||
|
pub struct UserWithTokens {
|
||||||
|
pub userid: Userid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub firstname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub lastname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub email: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty", default)]
|
||||||
|
pub tokens: Vec<ApiToken>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
tokenid: {
|
||||||
|
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// ApiToken properties.
|
||||||
|
pub struct ApiToken {
|
||||||
|
pub tokenid: Authid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiToken {
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
if !self.enable.unwrap_or(true) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(expire) = self.expire {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
if expire > 0 && expire <= now {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
firstname: {
|
||||||
|
optional: true,
|
||||||
|
schema: FIRST_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
lastname: {
|
||||||
|
schema: LAST_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
email: {
|
||||||
|
schema: EMAIL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
/// User properties.
|
||||||
|
pub struct User {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub userid: Userid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub firstname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub lastname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub email: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl User {
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
if !self.enable.unwrap_or(true) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(expire) = self.expire {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
if expire > 0 && expire <= now {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
@ -30,7 +30,7 @@ use lazy_static::lazy_static;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
|
|
||||||
// we only allow a limited set of characters
|
// we only allow a limited set of characters
|
||||||
@ -38,10 +38,15 @@ use proxmox::const_regex;
|
|||||||
// colon separated lists)!
|
// colon separated lists)!
|
||||||
// slash is not allowed because it is used as pve API delimiter
|
// slash is not allowed because it is used as pve API delimiter
|
||||||
// also see "man useradd"
|
// also see "man useradd"
|
||||||
|
#[macro_export]
|
||||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
|
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
|
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
@ -93,7 +98,6 @@ pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
|
|||||||
.max_length(32);
|
.max_length(32);
|
||||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
type: String,
|
type: String,
|
||||||
format: &PROXMOX_USER_NAME_FORMAT,
|
format: &PROXMOX_USER_NAME_FORMAT,
|
||||||
@ -393,19 +397,21 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete user id consisting of a user name and a realm
|
/// A complete user id consisting of a user name and a realm
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, UpdaterType)]
|
||||||
pub struct Userid {
|
pub struct Userid {
|
||||||
data: String,
|
data: String,
|
||||||
name_len: usize,
|
name_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Userid {
|
impl ApiType for Userid {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
|
const API_SCHEMA: Schema = StringSchema::new("User ID")
|
||||||
.format(&PROXMOX_USER_ID_FORMAT)
|
.format(&PROXMOX_USER_ID_FORMAT)
|
||||||
.min_length(3)
|
.min_length(3)
|
||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Userid {
|
||||||
const fn new(data: String, name_len: usize) -> Self {
|
const fn new(data: String, name_len: usize) -> Self {
|
||||||
Self { data, name_len }
|
Self { data, name_len }
|
||||||
}
|
}
|
||||||
@ -522,19 +528,21 @@ impl PartialEq<String> for Userid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete authentication id consisting of a user id and an optional token name.
|
/// A complete authentication id consisting of a user id and an optional token name.
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)]
|
||||||
pub struct Authid {
|
pub struct Authid {
|
||||||
user: Userid,
|
user: Userid,
|
||||||
tokenname: Option<Tokenname>
|
tokenname: Option<Tokenname>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Authid {
|
impl ApiType for Authid {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
|
const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
|
||||||
.format(&PROXMOX_AUTH_ID_FORMAT)
|
.format(&PROXMOX_AUTH_ID_FORMAT)
|
||||||
.min_length(3)
|
.min_length(3)
|
||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Authid {
|
||||||
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
|
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
|
||||||
Self { user, tokenname }
|
Self { user, tokenname }
|
||||||
}
|
}
|
81
pbs-api-types/src/zfs.rs
Normal file
81
pbs-api-types/src/zfs.rs
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Pool sector size exponent.")
|
||||||
|
.minimum(9)
|
||||||
|
.maximum(16)
|
||||||
|
.default(12)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
||||||
|
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(default: "On")]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS compression algorithm to use.
|
||||||
|
pub enum ZfsCompressionType {
|
||||||
|
/// Gnu Zip
|
||||||
|
Gzip,
|
||||||
|
/// LZ4
|
||||||
|
Lz4,
|
||||||
|
/// LZJB
|
||||||
|
Lzjb,
|
||||||
|
/// ZLE
|
||||||
|
Zle,
|
||||||
|
/// ZStd
|
||||||
|
ZStd,
|
||||||
|
/// Enable compression using the default algorithm.
|
||||||
|
On,
|
||||||
|
/// Disable compression.
|
||||||
|
Off,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS RAID level to use.
|
||||||
|
pub enum ZfsRaidLevel {
|
||||||
|
/// Single Disk
|
||||||
|
Single,
|
||||||
|
/// Mirror
|
||||||
|
Mirror,
|
||||||
|
/// Raid10
|
||||||
|
Raid10,
|
||||||
|
/// RaidZ
|
||||||
|
RaidZ,
|
||||||
|
/// RaidZ2
|
||||||
|
RaidZ2,
|
||||||
|
/// RaidZ3
|
||||||
|
RaidZ3,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// zpool list item
|
||||||
|
pub struct ZpoolListItem {
|
||||||
|
/// zpool name
|
||||||
|
pub name: String,
|
||||||
|
/// Health
|
||||||
|
pub health: String,
|
||||||
|
/// Total size
|
||||||
|
pub size: u64,
|
||||||
|
/// Used size
|
||||||
|
pub alloc: u64,
|
||||||
|
/// Free space
|
||||||
|
pub free: u64,
|
||||||
|
/// ZFS fragnentation level
|
||||||
|
pub frag: u64,
|
||||||
|
/// ZFS deduplication ratio
|
||||||
|
pub dedup: f64,
|
||||||
|
}
|
9
pbs-buildcfg/Cargo.toml
Normal file
9
pbs-buildcfg/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-buildcfg"
|
||||||
|
version = "2.0.10"
|
||||||
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "macros used for pbs related paths such as configdir and rundir"
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
24
pbs-buildcfg/build.rs
Normal file
24
pbs-buildcfg/build.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// build.rs
|
||||||
|
use std::env;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let repoid = match env::var("REPOID") {
|
||||||
|
Ok(repoid) => repoid,
|
||||||
|
Err(_) => {
|
||||||
|
match Command::new("git")
|
||||||
|
.args(&["rev-parse", "HEAD"])
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) => {
|
||||||
|
String::from_utf8(output.stdout).unwrap()
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
panic!("git rev-parse failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("cargo:rustc-env=REPOID={}", repoid);
|
||||||
|
}
|
@ -1,9 +1,24 @@
|
|||||||
//! Exports configuration data from the build system
|
//! Exports configuration data from the build system
|
||||||
|
|
||||||
|
pub const PROXMOX_PKG_VERSION: &str =
|
||||||
|
concat!(
|
||||||
|
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||||
|
".",
|
||||||
|
env!("CARGO_PKG_VERSION_MINOR"),
|
||||||
|
);
|
||||||
|
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||||
|
pub const PROXMOX_PKG_REPOID: &str = env!("REPOID");
|
||||||
|
|
||||||
|
|
||||||
/// The configured configuration directory
|
/// The configured configuration directory
|
||||||
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
||||||
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
||||||
|
|
||||||
|
/// Unix system user used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_USER_NAME: &str = "backup";
|
||||||
|
/// Unix system group used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||||
|
|
||||||
@ -56,7 +71,7 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
|||||||
/// This is a simply way to get the full path for configuration files.
|
/// This is a simply way to get the full path for configuration files.
|
||||||
/// #### Example:
|
/// #### Example:
|
||||||
/// ```
|
/// ```
|
||||||
/// # #[macro_use] extern crate proxmox_backup;
|
/// use pbs_buildcfg::configdir;
|
||||||
/// let cert_path = configdir!("/proxy.pfx");
|
/// let cert_path = configdir!("/proxy.pfx");
|
||||||
/// ```
|
/// ```
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
@ -70,6 +85,6 @@ macro_rules! configdir {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! rundir {
|
macro_rules! rundir {
|
||||||
($subdir:expr) => {
|
($subdir:expr) => {
|
||||||
concat!(PROXMOX_BACKUP_RUN_DIR_M!(), $subdir)
|
concat!($crate::PROXMOX_BACKUP_RUN_DIR_M!(), $subdir)
|
||||||
};
|
};
|
||||||
}
|
}
|
40
pbs-client/Cargo.toml
Normal file
40
pbs-client/Cargo.toml
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Wolfgang Bumiller <w.bumiller@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "The main proxmox backup client crate"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
bitflags = "1.2.1"
|
||||||
|
bytes = "1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
|
http = "0.2"
|
||||||
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
|
lazy_static = "1.4"
|
||||||
|
libc = "0.2"
|
||||||
|
nix = "0.19.1"
|
||||||
|
openssl = "0.10"
|
||||||
|
percent-encoding = "2.1"
|
||||||
|
pin-project-lite = "0.2"
|
||||||
|
regex = "1.2"
|
||||||
|
rustyline = "7"
|
||||||
|
serde_json = "1.0"
|
||||||
|
tokio = { version = "1.6", features = [ "fs", "signal" ] }
|
||||||
|
tokio-stream = "0.1.0"
|
||||||
|
tower-service = "0.3.0"
|
||||||
|
xdg = "2.2"
|
||||||
|
|
||||||
|
pathpatterns = "0.1.2"
|
||||||
|
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
||||||
|
proxmox-fuse = "0.1.1"
|
||||||
|
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
pbs-datastore = { path = "../pbs-datastore" }
|
||||||
|
pbs-runtime = { path = "../pbs-runtime" }
|
||||||
|
pbs-tools = { path = "../pbs-tools" }
|
@ -9,10 +9,15 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
|
|
||||||
use crate::{
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
tools::compute_file_csum,
|
use pbs_tools::sha::sha256;
|
||||||
backup::*,
|
use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, BackupManifest};
|
||||||
};
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
|
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||||
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
|
use pbs_datastore::index::IndexFile;
|
||||||
|
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::{HttpClient, H2Client};
|
||||||
|
|
||||||
@ -148,7 +153,7 @@ impl BackupReader {
|
|||||||
&self,
|
&self,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<DataBlobReader<File>, Error> {
|
) -> Result<DataBlobReader<'_, File>, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
@ -158,7 +163,8 @@ impl BackupReader {
|
|||||||
|
|
||||||
self.download(name, &mut tmpfile).await?;
|
self.download(name, &mut tmpfile).await?;
|
||||||
|
|
||||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
tmpfile.seek(SeekFrom::Start(0))?;
|
||||||
|
let (csum, size) = sha256(&mut tmpfile)?;
|
||||||
manifest.verify_file(name, &csum, size)?;
|
manifest.verify_file(name, &csum, size)?;
|
||||||
|
|
||||||
tmpfile.seek(SeekFrom::Start(0))?;
|
tmpfile.seek(SeekFrom::Start(0))?;
|
@ -3,12 +3,7 @@ use std::fmt;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use proxmox::api::schema::*;
|
use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
|
||||||
|
|
||||||
/// API schema format definition for repository URLs
|
|
||||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
|
||||||
|
|
||||||
/// Reference remote backup locations
|
/// Reference remote backup locations
|
||||||
///
|
///
|
@ -1,12 +1,12 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::future::Future;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::future::AbortHandle;
|
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
|
||||||
use futures::stream::Stream;
|
use futures::stream::{Stream, StreamExt, TryStreamExt};
|
||||||
use futures::*;
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
@ -14,9 +14,16 @@ use tokio_stream::wrappers::ReceiverStream;
|
|||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
|
|
||||||
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
use pbs_tools::format::HumanByte;
|
||||||
|
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||||
|
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||||
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
|
use pbs_datastore::index::IndexFile;
|
||||||
|
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
|
||||||
|
|
||||||
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||||
use crate::backup::*;
|
|
||||||
use crate::tools::format::HumanByte;
|
|
||||||
|
|
||||||
use super::{H2Client, HttpClient};
|
use super::{H2Client, HttpClient};
|
||||||
|
|
||||||
@ -282,7 +289,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
if let Some(manifest) = options.previous_manifest {
|
if let Some(manifest) = options.previous_manifest {
|
||||||
// try, but ignore errors
|
// try, but ignore errors
|
||||||
match archive_type(archive_name) {
|
match ArchiveType::from_path(archive_name) {
|
||||||
Ok(ArchiveType::FixedIndex) => {
|
Ok(ArchiveType::FixedIndex) => {
|
||||||
let _ = self
|
let _ = self
|
||||||
.download_previous_fixed_index(
|
.download_previous_fixed_index(
|
||||||
@ -333,7 +340,7 @@ impl BackupWriter {
|
|||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
crate::tools::format::strip_server_file_extension(archive_name)
|
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||||
};
|
};
|
||||||
if archive_name != CATALOG_NAME {
|
if archive_name != CATALOG_NAME {
|
||||||
let speed: HumanByte =
|
let speed: HumanByte =
|
||||||
@ -452,7 +459,7 @@ impl BackupWriter {
|
|||||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
||||||
match (response, merged_chunk_info) {
|
match (response, merged_chunk_info) {
|
||||||
(Some(response), MergedChunkInfo::Known(list)) => {
|
(Some(response), MergedChunkInfo::Known(list)) => {
|
||||||
future::Either::Left(
|
Either::Left(
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
@ -462,7 +469,7 @@ impl BackupWriter {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
(None, MergedChunkInfo::Known(list)) => {
|
(None, MergedChunkInfo::Known(list)) => {
|
||||||
future::Either::Right(future::ok(MergedChunkInfo::Known(list)))
|
Either::Right(future::ok(MergedChunkInfo::Known(list)))
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
@ -735,7 +742,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
||||||
|
|
||||||
future::Either::Left(h2.send_request(request, upload_data).and_then(
|
Either::Left(h2.send_request(request, upload_data).and_then(
|
||||||
move |response| async move {
|
move |response| async move {
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((new_info, Some(response)))
|
.send((new_info, Some(response)))
|
||||||
@ -746,7 +753,7 @@ impl BackupWriter {
|
|||||||
},
|
},
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
future::Either::Right(async move {
|
Either::Right(async move {
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((merged_chunk_info, None))
|
.send((merged_chunk_info, None))
|
||||||
.await
|
.await
|
@ -18,13 +18,14 @@ use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineI
|
|||||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
use pxar::{EntryKind, Metadata};
|
use pxar::{EntryKind, Metadata};
|
||||||
|
|
||||||
use crate::backup::catalog::{self, DirEntryAttribute};
|
use pbs_runtime::block_in_place;
|
||||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||||
use crate::pxar::Flags;
|
use pbs_tools::ops::ControlFlow;
|
||||||
use crate::tools::runtime::block_in_place;
|
|
||||||
use crate::tools::ControlFlow;
|
|
||||||
|
|
||||||
type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
|
use crate::pxar::Flags;
|
||||||
|
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||||
|
|
||||||
|
type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>;
|
||||||
|
|
||||||
const MAX_SYMLINK_COUNT: usize = 40;
|
const MAX_SYMLINK_COUNT: usize = 40;
|
||||||
|
|
||||||
@ -78,13 +79,13 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
|||||||
"restore-selected",
|
"restore-selected",
|
||||||
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
|
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
|
||||||
.arg_param(&["target"])
|
.arg_param(&["target"])
|
||||||
.completion_cb("target", crate::tools::complete_file_name),
|
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"restore",
|
"restore",
|
||||||
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
|
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
|
||||||
.arg_param(&["target"])
|
.arg_param(&["target"])
|
||||||
.completion_cb("target", crate::tools::complete_file_name),
|
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"find",
|
"find",
|
||||||
@ -985,7 +986,8 @@ impl Shell {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
let extractor = crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
|
let extractor =
|
||||||
|
crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
|
||||||
|
|
||||||
let mut extractor = ExtractorState::new(
|
let mut extractor = ExtractorState::new(
|
||||||
&mut self.catalog,
|
&mut self.catalog,
|
@ -6,7 +6,7 @@ use anyhow::{Error};
|
|||||||
use futures::ready;
|
use futures::ready;
|
||||||
use futures::stream::{Stream, TryStream};
|
use futures::stream::{Stream, TryStream};
|
||||||
|
|
||||||
use super::Chunker;
|
use pbs_datastore::Chunker;
|
||||||
|
|
||||||
/// Split input stream into dynamic sized chunks
|
/// Split input stream into dynamic sized chunks
|
||||||
pub struct ChunkStream<S: Unpin> {
|
pub struct ChunkStream<S: Unpin> {
|
230
pbs-client/src/dynamic_index.rs
Normal file
230
pbs-client/src/dynamic_index.rs
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
use std::io::{self, Seek, SeekFrom};
|
||||||
|
use std::ops::Range;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::task::Context;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
use pbs_datastore::read_chunk::ReadChunk;
|
||||||
|
use pbs_datastore::index::IndexFile;
|
||||||
|
use pbs_tools::lru_cache::LruCache;
|
||||||
|
|
||||||
|
struct CachedChunk {
|
||||||
|
range: Range<u64>,
|
||||||
|
data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedChunk {
|
||||||
|
/// Perform sanity checks on the range and data size:
|
||||||
|
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||||
|
if data.len() as u64 != range.end - range.start {
|
||||||
|
bail!(
|
||||||
|
"read chunk with wrong size ({} != {})",
|
||||||
|
data.len(),
|
||||||
|
range.end - range.start,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(Self { range, data })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BufferedDynamicReader<S> {
|
||||||
|
store: S,
|
||||||
|
index: DynamicIndexReader,
|
||||||
|
archive_size: u64,
|
||||||
|
read_buffer: Vec<u8>,
|
||||||
|
buffered_chunk_idx: usize,
|
||||||
|
buffered_chunk_start: u64,
|
||||||
|
read_offset: u64,
|
||||||
|
lru_cache: LruCache<usize, CachedChunk>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ChunkCacher<'a, S> {
|
||||||
|
store: &'a mut S,
|
||||||
|
index: &'a DynamicIndexReader,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||||
|
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||||
|
let info = match self.index.chunk_info(index) {
|
||||||
|
Some(info) => info,
|
||||||
|
None => bail!("chunk index out of range"),
|
||||||
|
};
|
||||||
|
let range = info.range;
|
||||||
|
let data = self.store.read_chunk(&info.digest)?;
|
||||||
|
CachedChunk::new(range, data).map(Some)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||||
|
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||||
|
let archive_size = index.index_bytes();
|
||||||
|
Self {
|
||||||
|
store,
|
||||||
|
index,
|
||||||
|
archive_size,
|
||||||
|
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||||
|
buffered_chunk_idx: 0,
|
||||||
|
buffered_chunk_start: 0,
|
||||||
|
read_offset: 0,
|
||||||
|
lru_cache: LruCache::new(32),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn archive_size(&self) -> u64 {
|
||||||
|
self.archive_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||||
|
//let (start, end, data) = self.lru_cache.access(
|
||||||
|
let cached_chunk = self.lru_cache.access(
|
||||||
|
idx,
|
||||||
|
&mut ChunkCacher {
|
||||||
|
store: &mut self.store,
|
||||||
|
index: &self.index,
|
||||||
|
},
|
||||||
|
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||||
|
|
||||||
|
// fixme: avoid copy
|
||||||
|
self.read_buffer.clear();
|
||||||
|
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||||
|
|
||||||
|
self.buffered_chunk_idx = idx;
|
||||||
|
|
||||||
|
self.buffered_chunk_start = cached_chunk.range.start;
|
||||||
|
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: ReadChunk> pbs_tools::io::BufferedRead for BufferedDynamicReader<S> {
|
||||||
|
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
||||||
|
if offset == self.archive_size {
|
||||||
|
return Ok(&self.read_buffer[0..0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let buffer_len = self.read_buffer.len();
|
||||||
|
let index = &self.index;
|
||||||
|
|
||||||
|
// optimization for sequential read
|
||||||
|
if buffer_len > 0
|
||||||
|
&& ((self.buffered_chunk_idx + 1) < index.index().len())
|
||||||
|
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||||
|
{
|
||||||
|
let next_idx = self.buffered_chunk_idx + 1;
|
||||||
|
let next_end = index.chunk_end(next_idx);
|
||||||
|
if offset < next_end {
|
||||||
|
self.buffer_chunk(next_idx)?;
|
||||||
|
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||||
|
return Ok(&self.read_buffer[buffer_offset..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buffer_len == 0)
|
||||||
|
|| (offset < self.buffered_chunk_start)
|
||||||
|
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||||
|
{
|
||||||
|
let end_idx = index.index().len() - 1;
|
||||||
|
let end = index.chunk_end(end_idx);
|
||||||
|
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||||
|
self.buffer_chunk(idx)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||||
|
Ok(&self.read_buffer[buffer_offset..])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||||
|
use pbs_tools::io::BufferedRead;
|
||||||
|
use std::io::{Error, ErrorKind};
|
||||||
|
|
||||||
|
let data = match self.buffered_read(self.read_offset) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let n = if data.len() > buf.len() {
|
||||||
|
buf.len()
|
||||||
|
} else {
|
||||||
|
data.len()
|
||||||
|
};
|
||||||
|
|
||||||
|
buf[0..n].copy_from_slice(&data[0..n]);
|
||||||
|
|
||||||
|
self.read_offset += n as u64;
|
||||||
|
|
||||||
|
Ok(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
||||||
|
let new_offset = match pos {
|
||||||
|
SeekFrom::Start(start_offset) => start_offset as i64,
|
||||||
|
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
||||||
|
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
||||||
|
};
|
||||||
|
|
||||||
|
use std::io::{Error, ErrorKind};
|
||||||
|
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
||||||
|
return Err(Error::new(
|
||||||
|
ErrorKind::Other,
|
||||||
|
format!(
|
||||||
|
"seek is out of range {} ([0..{}])",
|
||||||
|
new_offset, self.archive_size
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.read_offset = new_offset as u64;
|
||||||
|
|
||||||
|
Ok(self.read_offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||||
|
/// async use!
|
||||||
|
///
|
||||||
|
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||||
|
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||||
|
/// duplicate simultaneous reads over http.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||||
|
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||||
|
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Mutex::new(inner)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||||
|
fn start_read_at<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_cx: &mut Context,
|
||||||
|
buf: &'a mut [u8],
|
||||||
|
offset: u64,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
use std::io::Read;
|
||||||
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||||
|
let mut reader = self.inner.lock().unwrap();
|
||||||
|
reader.seek(SeekFrom::Start(offset))?;
|
||||||
|
Ok(reader.read(buf)?)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_op: ReadAtOperation<'a>,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||||
|
}
|
||||||
|
}
|
@ -23,14 +23,14 @@ use proxmox::{
|
|||||||
use proxmox_http::client::HttpsConnector;
|
use proxmox_http::client::HttpsConnector;
|
||||||
use proxmox_http::uri::build_authority;
|
use proxmox_http::uri::build_authority;
|
||||||
|
|
||||||
|
use pbs_api_types::{Authid, Userid};
|
||||||
|
use pbs_tools::broadcast_future::BroadcastFuture;
|
||||||
|
use pbs_tools::json::json_object_to_query;
|
||||||
|
use pbs_tools::ticket;
|
||||||
|
use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
|
||||||
|
|
||||||
use super::pipe_to_stream::PipeToSendStream;
|
use super::pipe_to_stream::PipeToSendStream;
|
||||||
use crate::api2::types::{Authid, Userid};
|
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
||||||
use crate::tools::{
|
|
||||||
self,
|
|
||||||
BroadcastFuture,
|
|
||||||
DEFAULT_ENCODE_SET,
|
|
||||||
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
||||||
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
||||||
@ -236,7 +236,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
|||||||
|
|
||||||
let mut new_data = json!({});
|
let mut new_data = json!({});
|
||||||
|
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
|
||||||
|
|
||||||
let empty = serde_json::map::Map::new();
|
let empty = serde_json::map::Map::new();
|
||||||
for (server, info) in data.as_object().unwrap_or(&empty) {
|
for (server, info) in data.as_object().unwrap_or(&empty) {
|
||||||
@ -262,7 +262,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
|||||||
let path = base.place_runtime_file("tickets").ok()?;
|
let path = base.place_runtime_file("tickets").ok()?;
|
||||||
let data = file_get_json(&path, None).ok()?;
|
let data = file_get_json(&path, None).ok()?;
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
|
||||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||||
let age = now - timestamp;
|
let age = now - timestamp;
|
||||||
@ -640,7 +640,7 @@ impl HttpClient {
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let query = match data {
|
let query = match data {
|
||||||
Some(data) => Some(tools::json_object_to_query(data)?),
|
Some(data) => Some(json_object_to_query(data)?),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
let url = build_uri(&self.server, self.port, path, query)?;
|
let url = build_uri(&self.server, self.port, path, query)?;
|
||||||
@ -788,7 +788,7 @@ impl HttpClient {
|
|||||||
.body(Body::from(data.to_string()))?;
|
.body(Body::from(data.to_string()))?;
|
||||||
Ok(request)
|
Ok(request)
|
||||||
} else {
|
} else {
|
||||||
let query = tools::json_object_to_query(data)?;
|
let query = json_object_to_query(data)?;
|
||||||
let url = build_uri(server, port, path, Some(query))?;
|
let url = build_uri(server, port, path, Some(query))?;
|
||||||
let request = Request::builder()
|
let request = Request::builder()
|
||||||
.method(method)
|
.method(method)
|
||||||
@ -991,7 +991,7 @@ impl H2Client {
|
|||||||
let content_type = content_type.unwrap_or("application/x-www-form-urlencoded");
|
let content_type = content_type.unwrap_or("application/x-www-form-urlencoded");
|
||||||
let query = match param {
|
let query = match param {
|
||||||
Some(param) => {
|
Some(param) => {
|
||||||
let query = tools::json_object_to_query(param)?;
|
let query = json_object_to_query(param)?;
|
||||||
// We detected problem with hyper around 6000 characters - so we try to keep on the safe side
|
// We detected problem with hyper around 6000 characters - so we try to keep on the safe side
|
||||||
if query.len() > 4096 {
|
if query.len() > 4096 {
|
||||||
bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
|
bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
|
@ -5,13 +5,15 @@
|
|||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use crate::{
|
use pbs_api_types::{Authid, Userid};
|
||||||
api2::types::{Userid, Authid},
|
use pbs_tools::ticket::Ticket;
|
||||||
tools::ticket::Ticket,
|
use pbs_tools::cert::CertInfo;
|
||||||
auth_helpers::private_auth_key,
|
use pbs_tools::auth::private_auth_key;
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
pub mod catalog_shell;
|
||||||
|
pub mod dynamic_index;
|
||||||
|
pub mod pxar;
|
||||||
|
pub mod tools;
|
||||||
|
|
||||||
mod merge_known_chunks;
|
mod merge_known_chunks;
|
||||||
pub mod pipe_to_stream;
|
pub mod pipe_to_stream;
|
||||||
@ -43,7 +45,10 @@ pub use backup_repo::*;
|
|||||||
mod backup_specification;
|
mod backup_specification;
|
||||||
pub use backup_specification::*;
|
pub use backup_specification::*;
|
||||||
|
|
||||||
pub mod pull;
|
mod chunk_stream;
|
||||||
|
pub use chunk_stream::{ChunkStream, FixedChunkStream};
|
||||||
|
|
||||||
|
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||||
|
|
||||||
/// Connect to localhost:8007 as root@pam
|
/// Connect to localhost:8007 as root@pam
|
||||||
///
|
///
|
||||||
@ -55,7 +60,7 @@ pub fn connect_to_localhost() -> Result<HttpClient, Error> {
|
|||||||
let client = if uid.is_root() {
|
let client = if uid.is_root() {
|
||||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||||
.sign(private_auth_key(), None)?;
|
.sign(private_auth_key(), None)?;
|
||||||
let fingerprint = crate::tools::cert::CertInfo::new()?.fingerprint()?;
|
let fingerprint = CertInfo::new()?.fingerprint()?;
|
||||||
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
|
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
|
||||||
|
|
||||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
@ -1,11 +1,11 @@
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::{ready, Stream};
|
||||||
use pin_project::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
|
|
||||||
use crate::backup::ChunkInfo;
|
use pbs_datastore::data_blob::ChunkInfo;
|
||||||
|
|
||||||
pub enum MergedChunkInfo {
|
pub enum MergedChunkInfo {
|
||||||
Known(Vec<(u64, [u8; 32])>),
|
Known(Vec<(u64, [u8; 32])>),
|
||||||
@ -16,11 +16,12 @@ pub trait MergeKnownChunks: Sized {
|
|||||||
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
|
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pin_project]
|
pin_project! {
|
||||||
pub struct MergeKnownChunksQueue<S> {
|
pub struct MergeKnownChunksQueue<S> {
|
||||||
#[pin]
|
#[pin]
|
||||||
input: S,
|
input: S,
|
||||||
buffer: Option<MergedChunkInfo>,
|
buffer: Option<MergedChunkInfo>,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S> MergeKnownChunks for S
|
impl<S> MergeKnownChunks for S
|
@ -5,8 +5,8 @@
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use bytes::Bytes;
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
use bytes::Bytes;
|
||||||
use futures::{ready, Future};
|
use futures::{ready, Future};
|
||||||
use h2::SendStream;
|
use h2::SendStream;
|
||||||
|
|
@ -23,12 +23,15 @@ use proxmox::c_str;
|
|||||||
use proxmox::sys::error::SysError;
|
use proxmox::sys::error::SysError;
|
||||||
use proxmox::tools::fd::RawFdNum;
|
use proxmox::tools::fd::RawFdNum;
|
||||||
use proxmox::tools::vec;
|
use proxmox::tools::vec;
|
||||||
|
use proxmox::tools::fd::Fd;
|
||||||
|
|
||||||
|
use pbs_datastore::catalog::BackupCatalogWriter;
|
||||||
|
use pbs_tools::{acl, fs, xattr};
|
||||||
|
use pbs_tools::str::strip_ascii_whitespace;
|
||||||
|
|
||||||
use crate::pxar::catalog::BackupCatalogWriter;
|
|
||||||
use crate::pxar::metadata::errno_is_unsupported;
|
use crate::pxar::metadata::errno_is_unsupported;
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
use crate::pxar::tools::assert_single_path_component;
|
use crate::pxar::tools::assert_single_path_component;
|
||||||
use crate::tools::{acl, fs, xattr, Fd};
|
|
||||||
|
|
||||||
/// Pxar options for creating a pxar archive/stream
|
/// Pxar options for creating a pxar archive/stream
|
||||||
#[derive(Default, Clone)]
|
#[derive(Default, Clone)]
|
||||||
@ -360,7 +363,7 @@ impl Archiver {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let line = crate::tools::strip_ascii_whitespace(&line);
|
let line = strip_ascii_whitespace(&line);
|
||||||
|
|
||||||
if line.is_empty() || line[0] == b'#' {
|
if line.is_empty() || line[0] == b'#' {
|
||||||
continue;
|
continue;
|
@ -27,12 +27,12 @@ use proxmox::tools::{
|
|||||||
io::{sparse_copy, sparse_copy_async},
|
io::{sparse_copy, sparse_copy_async},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use pbs_tools::zip::{ZipEncoder, ZipEntry};
|
||||||
|
|
||||||
use crate::pxar::dir_stack::PxarDirStack;
|
use crate::pxar::dir_stack::PxarDirStack;
|
||||||
use crate::pxar::metadata;
|
use crate::pxar::metadata;
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
|
|
||||||
use crate::tools::zip::{ZipEncoder, ZipEntry};
|
|
||||||
|
|
||||||
pub struct PxarExtractOptions<'a> {
|
pub struct PxarExtractOptions<'a> {
|
||||||
pub match_list: &'a[MatchEntry],
|
pub match_list: &'a[MatchEntry],
|
||||||
pub extract_match_default: bool,
|
pub extract_match_default: bool,
|
||||||
@ -215,7 +215,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Common state for file extraction.
|
/// Common state for file extraction.
|
||||||
pub(crate) struct Extractor {
|
pub struct Extractor {
|
||||||
feature_flags: Flags,
|
feature_flags: Flags,
|
||||||
allow_existing_dirs: bool,
|
allow_existing_dirs: bool,
|
||||||
dir_stack: PxarDirStack,
|
dir_stack: PxarDirStack,
|
@ -26,7 +26,7 @@ use pxar::accessor::{self, EntryRangeInfo, ReadAt};
|
|||||||
use proxmox_fuse::requests::{self, FuseRequest};
|
use proxmox_fuse::requests::{self, FuseRequest};
|
||||||
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
|
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
|
||||||
|
|
||||||
use crate::tools::xattr;
|
use pbs_tools::xattr;
|
||||||
|
|
||||||
/// We mark inodes for regular files this way so we know how to access them.
|
/// We mark inodes for regular files this way so we know how to access them.
|
||||||
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
|
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
|
@ -13,9 +13,10 @@ use proxmox::c_result;
|
|||||||
use proxmox::sys::error::SysError;
|
use proxmox::sys::error::SysError;
|
||||||
use proxmox::tools::fd::RawFdNum;
|
use proxmox::tools::fd::RawFdNum;
|
||||||
|
|
||||||
|
use pbs_tools::{acl, fs, xattr};
|
||||||
|
|
||||||
use crate::pxar::tools::perms_from_metadata;
|
use crate::pxar::tools::perms_from_metadata;
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
use crate::tools::{acl, fs, xattr};
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// utility functions
|
// utility functions
|
@ -47,12 +47,11 @@
|
|||||||
//! (user, group, acl, ...) because this is already defined by the
|
//! (user, group, acl, ...) because this is already defined by the
|
||||||
//! linked `ENTRY`.
|
//! linked `ENTRY`.
|
||||||
|
|
||||||
pub mod catalog;
|
|
||||||
pub(crate) mod create;
|
pub(crate) mod create;
|
||||||
pub(crate) mod dir_stack;
|
pub(crate) mod dir_stack;
|
||||||
pub(crate) mod extract;
|
pub(crate) mod extract;
|
||||||
pub(crate) mod metadata;
|
|
||||||
pub mod fuse;
|
pub mod fuse;
|
||||||
|
pub(crate) mod metadata;
|
||||||
pub(crate) mod tools;
|
pub(crate) mod tools;
|
||||||
|
|
||||||
mod flags;
|
mod flags;
|
@ -12,11 +12,9 @@ use nix::dir::Dir;
|
|||||||
use nix::fcntl::OFlag;
|
use nix::fcntl::OFlag;
|
||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
use crate::backup::CatalogWriter;
|
use pbs_datastore::catalog::CatalogWriter;
|
||||||
use crate::tools::{
|
use pbs_tools::sync::StdChannelWriter;
|
||||||
StdChannelWriter,
|
use pbs_tools::tokio::TokioWriterAdapter;
|
||||||
TokioWriterAdapter,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Stream implementation to encode and upload .pxar archives.
|
/// Stream implementation to encode and upload .pxar archives.
|
||||||
///
|
///
|
||||||
@ -113,7 +111,7 @@ impl Stream for PxarBackupStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match crate::tools::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
||||||
Ok(data) => Poll::Ready(Some(data)),
|
Ok(data) => Poll::Ready(Some(data)),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
let error = self.error.lock().unwrap();
|
let error = self.error.lock().unwrap();
|
@ -5,9 +5,14 @@ use std::sync::{Arc, Mutex};
|
|||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
use pbs_api_types::CryptMode;
|
||||||
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
|
use pbs_datastore::read_chunk::ReadChunk;
|
||||||
|
use pbs_datastore::read_chunk::AsyncReadChunk;
|
||||||
|
use pbs_runtime::block_on;
|
||||||
|
|
||||||
use super::BackupReader;
|
use super::BackupReader;
|
||||||
use crate::backup::{AsyncReadChunk, CryptConfig, CryptMode, DataBlob, ReadChunk};
|
|
||||||
use crate::tools::runtime::block_on;
|
|
||||||
|
|
||||||
/// Read chunks from remote host using ``BackupReader``
|
/// Read chunks from remote host using ``BackupReader``
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
@ -7,15 +7,9 @@ use futures::*;
|
|||||||
|
|
||||||
use proxmox::api::cli::format_and_print_result;
|
use proxmox::api::cli::format_and_print_result;
|
||||||
|
|
||||||
use super::HttpClient;
|
use pbs_tools::percent_encoding::percent_encode_component;
|
||||||
use crate::{
|
|
||||||
server::{
|
|
||||||
worker_is_active_local,
|
|
||||||
UPID,
|
|
||||||
},
|
|
||||||
tools,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
use super::HttpClient;
|
||||||
|
|
||||||
/// Display task log on console
|
/// Display task log on console
|
||||||
///
|
///
|
||||||
@ -54,13 +48,13 @@ pub async fn display_task_log(
|
|||||||
|
|
||||||
let abort = abort_count.load(Ordering::Relaxed);
|
let abort = abort_count.load(Ordering::Relaxed);
|
||||||
if abort > 0 {
|
if abort > 0 {
|
||||||
let path = format!("api2/json/nodes/localhost/tasks/{}", tools::percent_encode_component(upid_str));
|
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
|
||||||
let _ = client.delete(&path, None).await?;
|
let _ = client.delete(&path, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = json!({ "start": start, "limit": limit, "test-status": true });
|
let param = json!({ "start": start, "limit": limit, "test-status": true });
|
||||||
|
|
||||||
let path = format!("api2/json/nodes/localhost/tasks/{}/log", tools::percent_encode_component(upid_str));
|
let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
|
||||||
let result = client.get(&path, Some(param)).await?;
|
let result = client.get(&path, Some(param)).await?;
|
||||||
|
|
||||||
let active = result["active"].as_bool().unwrap();
|
let active = result["active"].as_bool().unwrap();
|
||||||
@ -121,23 +115,3 @@ pub async fn view_task_result(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for a locally spanned worker task
|
|
||||||
///
|
|
||||||
/// Note: local workers should print logs to stdout, so there is no
|
|
||||||
/// need to fetch/display logs. We just wait for the worker to finish.
|
|
||||||
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let upid: UPID = upid_str.parse()?;
|
|
||||||
|
|
||||||
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if worker_is_active_local(&upid) {
|
|
||||||
tokio::time::sleep(sleep_duration).await;
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -10,7 +10,7 @@ use proxmox::api::schema::*;
|
|||||||
use proxmox::sys::linux::tty;
|
use proxmox::sys::linux::tty;
|
||||||
use proxmox::tools::fs::file_get_contents;
|
use proxmox::tools::fs::file_get_contents;
|
||||||
|
|
||||||
use proxmox_backup::backup::CryptMode;
|
use pbs_api_types::CryptMode;
|
||||||
|
|
||||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||||
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||||
@ -343,13 +343,8 @@ pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>
|
|||||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
// fixme: implement other input methods
|
// fixme: implement other input methods
|
||||||
|
|
||||||
use std::env::VarError::*;
|
if let Some(password) = super::get_secret_from_env("PBS_ENCRYPTION_PASSWORD")? {
|
||||||
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
return Ok(password.as_bytes().to_vec());
|
||||||
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
|
||||||
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
|
||||||
Err(NotPresent) => {
|
|
||||||
// Try another method
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're on a TTY, query the user for a password
|
// If we're on a TTY, query the user for a password
|
||||||
@ -360,6 +355,20 @@ pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
|||||||
bail!("no password input mechanism available");
|
bail!("no password input mechanism available");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
fn create_testdir(name: &str) -> Result<String, Error> {
|
||||||
|
// FIXME:
|
||||||
|
//let mut testdir: PathBuf = format!("{}/testout", env!("CARGO_TARGET_TMPDIR")).into();
|
||||||
|
let mut testdir: PathBuf = "./target/testout".to_string().into();
|
||||||
|
testdir.push(std::module_path!());
|
||||||
|
testdir.push(name);
|
||||||
|
|
||||||
|
let _ = std::fs::remove_dir_all(&testdir);
|
||||||
|
let _ = std::fs::create_dir_all(&testdir);
|
||||||
|
|
||||||
|
Ok(testdir.to_str().unwrap().to_string())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
|
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
|
||||||
// safe w.r.t. concurrency
|
// safe w.r.t. concurrency
|
||||||
@ -373,9 +382,11 @@ fn test_crypto_parameters_handling() -> Result<(), Error> {
|
|||||||
let some_master_key = vec![3;1];
|
let some_master_key = vec![3;1];
|
||||||
let default_master_key = vec![4;1];
|
let default_master_key = vec![4;1];
|
||||||
|
|
||||||
let keypath = "./target/testout/keyfile.test";
|
let testdir = create_testdir("key_source")?;
|
||||||
let master_keypath = "./target/testout/masterkeyfile.test";
|
|
||||||
let invalid_keypath = "./target/testout/invalid_keyfile.test";
|
let keypath = format!("{}/keyfile.test", testdir);
|
||||||
|
let master_keypath = format!("{}/masterkeyfile.test", testdir);
|
||||||
|
let invalid_keypath = format!("{}/invalid_keyfile.test", testdir);
|
||||||
|
|
||||||
let no_key_res = CryptoParams {
|
let no_key_res = CryptoParams {
|
||||||
enc_key: None,
|
enc_key: None,
|
@ -1,5 +1,10 @@
|
|||||||
//! Shared tools useful for common CLI clients.
|
//! Shared tools useful for common CLI clients.
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::os::unix::io::FromRawFd;
|
||||||
|
use std::env::VarError::{NotUnicode, NotPresent};
|
||||||
|
use std::io::{BufReader, BufRead};
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Context, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -7,15 +12,15 @@ use xdg::BaseDirectories;
|
|||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
api::schema::*,
|
api::schema::*,
|
||||||
|
api::cli::shellword_split,
|
||||||
tools::fs::file_get_json,
|
tools::fs::file_get_json,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_backup::api2::access::user::UserWithTokens;
|
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
|
||||||
use proxmox_backup::api2::types::*;
|
use pbs_datastore::BackupDir;
|
||||||
use proxmox_backup::backup::BackupDir;
|
use pbs_tools::json::json_object_to_query;
|
||||||
use proxmox_backup::buildcfg;
|
|
||||||
use proxmox_backup::client::*;
|
use crate::{BackupRepository, HttpClient, HttpClientOptions};
|
||||||
use proxmox_backup::tools;
|
|
||||||
|
|
||||||
pub mod key_source;
|
pub mod key_source;
|
||||||
|
|
||||||
@ -33,6 +38,80 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
|
|||||||
.default(4096)
|
.default(4096)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
/// Helper to read a secret through a environment variable (ENV).
|
||||||
|
///
|
||||||
|
/// Tries the following variable names in order and returns the value
|
||||||
|
/// it will resolve for the first defined one:
|
||||||
|
///
|
||||||
|
/// BASE_NAME => use value from ENV(BASE_NAME) directly as secret
|
||||||
|
/// BASE_NAME_FD => read the secret from the specified file descriptor
|
||||||
|
/// BASE_NAME_FILE => read the secret from the specified file name
|
||||||
|
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
|
||||||
|
///
|
||||||
|
/// Only return the first line of data (without CRLF).
|
||||||
|
pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
||||||
|
|
||||||
|
let firstline = |data: String| -> String {
|
||||||
|
match data.lines().next() {
|
||||||
|
Some(line) => line.to_string(),
|
||||||
|
None => String::new(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let firstline_file = |file: &mut File| -> Result<String, Error> {
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
match reader.lines().next() {
|
||||||
|
Some(Ok(line)) => Ok(line),
|
||||||
|
Some(Err(err)) => Err(err.into()),
|
||||||
|
None => Ok(String::new()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match std::env::var(base_name) {
|
||||||
|
Ok(p) => return Ok(Some(firstline(p))),
|
||||||
|
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", base_name)),
|
||||||
|
Err(NotPresent) => {},
|
||||||
|
};
|
||||||
|
|
||||||
|
let env_name = format!("{}_FD", base_name);
|
||||||
|
match std::env::var(&env_name) {
|
||||||
|
Ok(fd_str) => {
|
||||||
|
let fd: i32 = fd_str.parse()
|
||||||
|
.map_err(|err| format_err!("unable to parse file descriptor in ENV({}): {}", env_name, err))?;
|
||||||
|
let mut file = unsafe { File::from_raw_fd(fd) };
|
||||||
|
return Ok(Some(firstline_file(&mut file)?));
|
||||||
|
}
|
||||||
|
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
|
||||||
|
Err(NotPresent) => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
let env_name = format!("{}_FILE", base_name);
|
||||||
|
match std::env::var(&env_name) {
|
||||||
|
Ok(filename) => {
|
||||||
|
let mut file = std::fs::File::open(filename)
|
||||||
|
.map_err(|err| format_err!("unable to open file in ENV({}): {}", env_name, err))?;
|
||||||
|
return Ok(Some(firstline_file(&mut file)?));
|
||||||
|
}
|
||||||
|
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
|
||||||
|
Err(NotPresent) => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
let env_name = format!("{}_CMD", base_name);
|
||||||
|
match std::env::var(&env_name) {
|
||||||
|
Ok(ref command) => {
|
||||||
|
let args = shellword_split(command)?;
|
||||||
|
let mut command = Command::new(&args[0]);
|
||||||
|
command.args(&args[1..]);
|
||||||
|
let output = pbs_tools::run_command(command, None)?;
|
||||||
|
return Ok(Some(firstline(output)));
|
||||||
|
}
|
||||||
|
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
|
||||||
|
Err(NotPresent) => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_default_repository() -> Option<String> {
|
pub fn get_default_repository() -> Option<String> {
|
||||||
std::env::var("PBS_REPOSITORY").ok()
|
std::env::var("PBS_REPOSITORY").ok()
|
||||||
}
|
}
|
||||||
@ -65,13 +144,7 @@ pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
|
|||||||
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
|
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
|
||||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||||
|
|
||||||
use std::env::VarError::*;
|
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
|
||||||
let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
|
|
||||||
Ok(p) => Some(p),
|
|
||||||
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
|
|
||||||
Err(NotPresent) => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let options = HttpClientOptions::new_interactive(password, fingerprint);
|
let options = HttpClientOptions::new_interactive(password, fingerprint);
|
||||||
|
|
||||||
HttpClient::new(server, port, auth_id, options)
|
HttpClient::new(server, port, auth_id, options)
|
||||||
@ -81,7 +154,7 @@ fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, E
|
|||||||
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||||
|
|
||||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||||
let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
|
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
|
||||||
|
|
||||||
// ticket cache, but no questions asked
|
// ticket cache, but no questions asked
|
||||||
let options = HttpClientOptions::new_interactive(password, fingerprint)
|
let options = HttpClientOptions::new_interactive(password, fingerprint)
|
||||||
@ -106,7 +179,7 @@ pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
|
pbs_runtime::main(async { complete_backup_group_do(param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
|
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||||
@ -136,7 +209,7 @@ pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<St
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
@ -155,7 +228,7 @@ pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, St
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
|
pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
|
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||||
@ -187,7 +260,7 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
|
pbs_runtime::main(async { complete_server_file_name_do(param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
|
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||||
@ -209,7 +282,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
|||||||
_ => return result,
|
_ => return result,
|
||||||
};
|
};
|
||||||
|
|
||||||
let query = tools::json_object_to_query(json!({
|
let query = json_object_to_query(json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group().backup_type(),
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group().backup_id(),
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.backup_time(),
|
||||||
@ -233,7 +306,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
|||||||
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| tools::format::strip_server_file_extension(&v))
|
.map(|v| pbs_tools::format::strip_server_file_extension(&v))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +315,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|name| {
|
.filter_map(|name| {
|
||||||
if name.ends_with(".pxar.didx") {
|
if name.ends_with(".pxar.didx") {
|
||||||
Some(tools::format::strip_server_file_extension(name))
|
Some(pbs_tools::format::strip_server_file_extension(name))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -255,7 +328,7 @@ pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|name| {
|
.filter_map(|name| {
|
||||||
if name.ends_with(".img.fidx") {
|
if name.ends_with(".img.fidx") {
|
||||||
Some(tools::format::strip_server_file_extension(name))
|
Some(pbs_tools::format::strip_server_file_extension(name))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -278,7 +351,7 @@ pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_auth_id_do(param).await })
|
pbs_runtime::main(async { complete_auth_id_do(param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
|
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||||
@ -340,7 +413,7 @@ pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
let files = tools::complete_file_name(data[1], param);
|
let files = pbs_tools::fs::complete_file_name(data[1], param);
|
||||||
|
|
||||||
for file in files {
|
for file in files {
|
||||||
result.push(format!("{}:{}", data[0], file));
|
result.push(format!("{}:{}", data[0], file));
|
||||||
@ -373,15 +446,3 @@ pub fn place_xdg_file(
|
|||||||
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
|
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
|
||||||
.with_context(|| format!("failed to place {} in xdg home", description))
|
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a runtime dir owned by the current user.
|
|
||||||
/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
|
|
||||||
/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
|
|
||||||
pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
|
|
||||||
let uid = nix::unistd::Uid::current();
|
|
||||||
let mut path: std::path::PathBuf = buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
|
|
||||||
path.push(uid.to_string());
|
|
||||||
tools::create_run_dir()?;
|
|
||||||
std::fs::create_dir_all(&path)?;
|
|
||||||
Ok(path)
|
|
||||||
}
|
|
@ -1,21 +1,18 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
|
||||||
use core::task::Context;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::task::Poll;
|
|
||||||
|
|
||||||
use http::Uri;
|
use http::Uri;
|
||||||
use http::{Request, Response};
|
use http::{Request, Response};
|
||||||
use hyper::client::connect::{Connected, Connection};
|
use hyper::client::connect::{Connected, Connection};
|
||||||
use hyper::client::Client;
|
use hyper::client::Client;
|
||||||
use hyper::Body;
|
use hyper::Body;
|
||||||
use pin_project::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||||
use tokio::net::UnixStream;
|
use tokio::net::UnixStream;
|
||||||
|
|
||||||
use crate::tools;
|
|
||||||
use proxmox::api::error::HttpError;
|
use proxmox::api::error::HttpError;
|
||||||
|
|
||||||
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
||||||
@ -23,11 +20,12 @@ pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct VsockConnector;
|
struct VsockConnector;
|
||||||
|
|
||||||
#[pin_project]
|
pin_project! {
|
||||||
/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
|
/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
|
||||||
struct UnixConnection {
|
struct UnixConnection {
|
||||||
#[pin]
|
#[pin]
|
||||||
stream: UnixStream,
|
stream: UnixStream,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tower_service::Service<Uri> for VsockConnector {
|
impl tower_service::Service<Uri> for VsockConnector {
|
||||||
@ -242,7 +240,7 @@ impl VsockClient {
|
|||||||
let request = builder.body(Body::from(data.to_string()))?;
|
let request = builder.body(Body::from(data.to_string()))?;
|
||||||
return Ok(request);
|
return Ok(request);
|
||||||
} else {
|
} else {
|
||||||
let query = tools::json_object_to_query(data)?;
|
let query = pbs_tools::json::json_object_to_query(data)?;
|
||||||
let url: Uri =
|
let url: Uri =
|
||||||
format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
|
format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
|
||||||
let builder = make_builder("application/x-www-form-urlencoded", &url);
|
let builder = make_builder("application/x-www-form-urlencoded", &url);
|
23
pbs-config/Cargo.toml
Normal file
23
pbs-config/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-config"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "Configuration file management for PBS"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
libc = "0.2"
|
||||||
|
anyhow = "1.0"
|
||||||
|
lazy_static = "1.4"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
openssl = "0.10"
|
||||||
|
nix = "0.19.1"
|
||||||
|
regex = "1.2"
|
||||||
|
once_cell = "1.3.1"
|
||||||
|
|
||||||
|
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
||||||
|
|
||||||
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
pbs-tools = { path = "../pbs-tools" }
|
@ -8,228 +8,11 @@ use anyhow::{bail, Error};
|
|||||||
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use ::serde::{Deserialize, Serialize};
|
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat, ApiType};
|
||||||
use serde::de::{value, IntoDeserializer};
|
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
use pbs_api_types::{Authid, Userid, Role, ROLE_NAME_NO_ACCESS};
|
||||||
use proxmox::constnamedbitmap;
|
|
||||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
|
||||||
|
|
||||||
use crate::api2::types::{Authid, Userid};
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
|
|
||||||
// define Privilege bitfield
|
|
||||||
|
|
||||||
constnamedbitmap! {
|
|
||||||
/// Contains a list of privilege name to privilege value mappings.
|
|
||||||
///
|
|
||||||
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
|
||||||
/// allow easy matching of privileges as bitflags.
|
|
||||||
PRIVILEGES: u64 => {
|
|
||||||
/// Sys.Audit allows knowing about the system and its status
|
|
||||||
PRIV_SYS_AUDIT("Sys.Audit");
|
|
||||||
/// Sys.Modify allows modifying system-level configuration
|
|
||||||
PRIV_SYS_MODIFY("Sys.Modify");
|
|
||||||
/// Sys.Modify allows to poweroff/reboot/.. the system
|
|
||||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
|
||||||
|
|
||||||
/// Datastore.Audit allows knowing about a datastore,
|
|
||||||
/// including reading the configuration entry and listing its contents
|
|
||||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
|
||||||
/// Datastore.Allocate allows creating or deleting datastores
|
|
||||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
|
||||||
/// Datastore.Modify allows modifying a datastore and its contents
|
|
||||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
|
||||||
/// Datastore.Read allows reading arbitrary backup contents
|
|
||||||
PRIV_DATASTORE_READ("Datastore.Read");
|
|
||||||
/// Allows verifying a datastore
|
|
||||||
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
|
||||||
|
|
||||||
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
|
||||||
/// but also requires backup ownership
|
|
||||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
|
||||||
/// Datastore.Prune allows deleting snapshots,
|
|
||||||
/// but also requires backup ownership
|
|
||||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
|
||||||
|
|
||||||
/// Permissions.Modify allows modifying ACLs
|
|
||||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
|
||||||
|
|
||||||
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
|
||||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
|
||||||
/// Remote.Modify allows modifying remote.cfg
|
|
||||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
|
||||||
/// Remote.Read allows reading data from a configured `Remote`
|
|
||||||
PRIV_REMOTE_READ("Remote.Read");
|
|
||||||
|
|
||||||
/// Sys.Console allows access to the system's console
|
|
||||||
PRIV_SYS_CONSOLE("Sys.Console");
|
|
||||||
|
|
||||||
/// Tape.Audit allows reading tape backup configuration and status
|
|
||||||
PRIV_TAPE_AUDIT("Tape.Audit");
|
|
||||||
/// Tape.Modify allows modifying tape backup configuration
|
|
||||||
PRIV_TAPE_MODIFY("Tape.Modify");
|
|
||||||
/// Tape.Write allows writing tape media
|
|
||||||
PRIV_TAPE_WRITE("Tape.Write");
|
|
||||||
/// Tape.Read allows reading tape backup configuration and media contents
|
|
||||||
PRIV_TAPE_READ("Tape.Read");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Admin always has all privileges. It can do everything except a few actions
|
|
||||||
/// which are limited to the 'root@pam` superuser
|
|
||||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
|
||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Audit can view configuration and status information, but not modify it.
|
|
||||||
pub const ROLE_AUDIT: u64 = 0
|
|
||||||
| PRIV_SYS_AUDIT
|
|
||||||
| PRIV_DATASTORE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Admin can do anything on the datastore.
|
|
||||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT
|
|
||||||
| PRIV_DATASTORE_MODIFY
|
|
||||||
| PRIV_DATASTORE_READ
|
|
||||||
| PRIV_DATASTORE_VERIFY
|
|
||||||
| PRIV_DATASTORE_BACKUP
|
|
||||||
| PRIV_DATASTORE_PRUNE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Reader can read/verify datastore content and do restore
|
|
||||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT
|
|
||||||
| PRIV_DATASTORE_VERIFY
|
|
||||||
| PRIV_DATASTORE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Backup can do backup and restore, but no prune.
|
|
||||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
|
||||||
| PRIV_DATASTORE_BACKUP;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
|
||||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
|
||||||
| PRIV_DATASTORE_PRUNE
|
|
||||||
| PRIV_DATASTORE_BACKUP;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Audit can audit the datastore.
|
|
||||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.Audit can audit the remote
|
|
||||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.Admin can do anything on the remote.
|
|
||||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT
|
|
||||||
| PRIV_REMOTE_MODIFY
|
|
||||||
| PRIV_REMOTE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.SyncOperator can do read and prune on the remote.
|
|
||||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT
|
|
||||||
| PRIV_REMOTE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Audit can audit the tape backup configuration and media content
|
|
||||||
pub const ROLE_TAPE_AUDIT: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Admin can do anything on the tape backup
|
|
||||||
pub const ROLE_TAPE_ADMIN: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_MODIFY
|
|
||||||
| PRIV_TAPE_READ
|
|
||||||
| PRIV_TAPE_WRITE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
|
||||||
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_READ
|
|
||||||
| PRIV_TAPE_WRITE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Reader can do read and inspect tape content
|
|
||||||
pub const ROLE_TAPE_READER: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_READ;
|
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
|
||||||
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
type_text: "<role>",
|
|
||||||
)]
|
|
||||||
#[repr(u64)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// Enum representing roles via their [PRIVILEGES] combination.
|
|
||||||
///
|
|
||||||
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
|
||||||
/// single, unique `u64` value that is used in this enum definition.
|
|
||||||
pub enum Role {
|
|
||||||
/// Administrator
|
|
||||||
Admin = ROLE_ADMIN,
|
|
||||||
/// Auditor
|
|
||||||
Audit = ROLE_AUDIT,
|
|
||||||
/// Disable Access
|
|
||||||
NoAccess = ROLE_NO_ACCESS,
|
|
||||||
/// Datastore Administrator
|
|
||||||
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
|
||||||
/// Datastore Reader (inspect datastore content and do restores)
|
|
||||||
DatastoreReader = ROLE_DATASTORE_READER,
|
|
||||||
/// Datastore Backup (backup and restore owned backups)
|
|
||||||
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
|
||||||
/// Datastore PowerUser (backup, restore and prune owned backup)
|
|
||||||
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
|
||||||
/// Datastore Auditor
|
|
||||||
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
|
||||||
/// Remote Auditor
|
|
||||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
|
||||||
/// Remote Administrator
|
|
||||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
|
||||||
/// Syncronisation Opertator
|
|
||||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
|
||||||
/// Tape Auditor
|
|
||||||
TapeAudit = ROLE_TAPE_AUDIT,
|
|
||||||
/// Tape Administrator
|
|
||||||
TapeAdmin = ROLE_TAPE_ADMIN,
|
|
||||||
/// Tape Operator
|
|
||||||
TapeOperator = ROLE_TAPE_OPERATOR,
|
|
||||||
/// Tape Reader
|
|
||||||
TapeReader = ROLE_TAPE_READER,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Role {
|
|
||||||
type Err = value::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Self::deserialize(s.into_deserializer())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
/// Map of pre-defined [Roles](Role) to their associated [privileges](PRIVILEGES) combination and
|
/// Map of pre-defined [Roles](Role) to their associated [privileges](PRIVILEGES) combination and
|
||||||
@ -251,7 +34,7 @@ lazy_static! {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn split_acl_path(path: &str) -> Vec<&str> {
|
pub fn split_acl_path(path: &str) -> Vec<&str> {
|
||||||
let items = path.split('/');
|
let items = path.split('/');
|
||||||
|
|
||||||
let mut components = vec![];
|
let mut components = vec![];
|
||||||
@ -283,11 +66,17 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
match components[1] {
|
match components[1] {
|
||||||
"acl" | "users" => {
|
"acl" | "users" | "domains" => {
|
||||||
if components_len == 2 {
|
if components_len == 2 {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// /access/openid/{endpoint}
|
||||||
|
"openid" => {
|
||||||
|
if components_len <= 3 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -386,8 +175,8 @@ pub struct AclTree {
|
|||||||
/// Node representing ACLs for a certain ACL path.
|
/// Node representing ACLs for a certain ACL path.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct AclTreeNode {
|
pub struct AclTreeNode {
|
||||||
/// [User](crate::config::user::User) or
|
/// [User](pbs_api_types::User) or
|
||||||
/// [Token](crate::config::user::ApiToken) ACLs for this node.
|
/// [Token](pbs_api_types::ApiToken) ACLs for this node.
|
||||||
pub users: HashMap<Authid, HashMap<String, bool>>,
|
pub users: HashMap<Authid, HashMap<String, bool>>,
|
||||||
/// `Group` ACLs for this node (not yet implemented)
|
/// `Group` ACLs for this node (not yet implemented)
|
||||||
pub groups: HashMap<String, HashMap<String, bool>>,
|
pub groups: HashMap<String, HashMap<String, bool>>,
|
||||||
@ -406,9 +195,9 @@ impl AclTreeNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns applicable [Role] and their propagation status for a given
|
/// Returns applicable [Role] and their propagation status for a given
|
||||||
/// [Authid](crate::api2::types::Authid).
|
/// [Authid](pbs_api_types::Authid).
|
||||||
///
|
///
|
||||||
/// If the `Authid` is a [User](crate::config::user::User) that has no specific `Roles` configured on this node,
|
/// If the `Authid` is a [User](pbs_api_types::User) that has no specific `Roles` configured on this node,
|
||||||
/// applicable `Group` roles will be returned instead.
|
/// applicable `Group` roles will be returned instead.
|
||||||
///
|
///
|
||||||
/// If `leaf` is `false`, only those roles where the propagate flag in the ACL is set to `true`
|
/// If `leaf` is `false`, only those roles where the propagate flag in the ACL is set to `true`
|
||||||
@ -783,8 +572,8 @@ impl AclTree {
|
|||||||
Ok((tree, digest))
|
Ok((tree, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
/// This is used for testing
|
||||||
pub(crate) fn from_raw(raw: &str) -> Result<Self, Error> {
|
pub fn from_raw(raw: &str) -> Result<Self, Error> {
|
||||||
let mut tree = Self::new();
|
let mut tree = Self::new();
|
||||||
for (linenr, line) in raw.lines().enumerate() {
|
for (linenr, line) in raw.lines().enumerate() {
|
||||||
let line = line.trim();
|
let line = line.trim();
|
||||||
@ -837,6 +626,11 @@ pub const ACL_CFG_FILENAME: &str = "/etc/proxmox-backup/acl.cfg";
|
|||||||
/// Path used to lock the [AclTree] when modifying.
|
/// Path used to lock the [AclTree] when modifying.
|
||||||
pub const ACL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.acl.lck";
|
pub const ACL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.acl.lck";
|
||||||
|
|
||||||
|
/// Get exclusive lock
|
||||||
|
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||||
|
open_backup_lockfile(ACL_CFG_LOCKFILE, None, true)
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads the [AclTree] from the [default path](ACL_CFG_FILENAME).
|
/// Reads the [AclTree] from the [default path](ACL_CFG_FILENAME).
|
||||||
pub fn config() -> Result<(AclTree, [u8; 32]), Error> {
|
pub fn config() -> Result<(AclTree, [u8; 32]), Error> {
|
||||||
let path = PathBuf::from(ACL_CFG_FILENAME);
|
let path = PathBuf::from(ACL_CFG_FILENAME);
|
||||||
@ -903,18 +697,7 @@ pub fn save_config(acl: &AclTree) -> Result<(), Error> {
|
|||||||
|
|
||||||
acl.write_config(&mut raw)?;
|
acl.write_config(&mut raw)?;
|
||||||
|
|
||||||
let backup_user = crate::backup::backup_user()?;
|
replace_backup_config(ACL_CFG_FILENAME, &raw)
|
||||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
|
||||||
// set the correct owner/group/permissions while saving file
|
|
||||||
// owner(rw) = root, group(r)= backup
|
|
||||||
let options = CreateOptions::new()
|
|
||||||
.perm(mode)
|
|
||||||
.owner(nix::unistd::ROOT)
|
|
||||||
.group(backup_user.gid);
|
|
||||||
|
|
||||||
replace_file(ACL_CFG_FILENAME, &raw, options)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -922,7 +705,7 @@ mod test {
|
|||||||
use super::AclTree;
|
use super::AclTree;
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use crate::api2::types::Authid;
|
use pbs_api_types::Authid;
|
||||||
|
|
||||||
fn check_roles(tree: &AclTree, auth_id: &Authid, path: &str, expected_roles: &str) {
|
fn check_roles(tree: &AclTree, auth_id: &Authid, path: &str, expected_roles: &str) {
|
||||||
let path_vec = super::split_acl_path(path);
|
let path_vec = super::split_acl_path(path);
|
@ -7,10 +7,12 @@ use anyhow::{Error, bail};
|
|||||||
use proxmox::api::section_config::SectionConfigData;
|
use proxmox::api::section_config::SectionConfigData;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use proxmox::api::UserInformation;
|
use proxmox::api::UserInformation;
|
||||||
|
use proxmox::tools::time::epoch_i64;
|
||||||
|
|
||||||
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
|
use pbs_api_types::{Authid, Userid, User, ApiToken, ROLE_ADMIN};
|
||||||
use super::user::{ApiToken, User};
|
|
||||||
use crate::api2::types::{Authid, Userid};
|
use crate::acl::{AclTree, ROLE_NAMES};
|
||||||
|
use crate::memcom::Memcom;
|
||||||
|
|
||||||
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
||||||
pub struct CachedUserInfo {
|
pub struct CachedUserInfo {
|
||||||
@ -18,16 +20,15 @@ pub struct CachedUserInfo {
|
|||||||
acl_tree: Arc<AclTree>,
|
acl_tree: Arc<AclTree>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn now() -> i64 { unsafe { libc::time(std::ptr::null_mut()) } }
|
|
||||||
|
|
||||||
struct ConfigCache {
|
struct ConfigCache {
|
||||||
data: Option<Arc<CachedUserInfo>>,
|
data: Option<Arc<CachedUserInfo>>,
|
||||||
last_update: i64,
|
last_update: i64,
|
||||||
|
last_user_cache_generation: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref CACHED_CONFIG: RwLock<ConfigCache> = RwLock::new(
|
static ref CACHED_CONFIG: RwLock<ConfigCache> = RwLock::new(
|
||||||
ConfigCache { data: None, last_update: 0 }
|
ConfigCache { data: None, last_update: 0, last_user_cache_generation: 0 }
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,10 +36,16 @@ impl CachedUserInfo {
|
|||||||
|
|
||||||
/// Returns a cached instance (up to 5 seconds old).
|
/// Returns a cached instance (up to 5 seconds old).
|
||||||
pub fn new() -> Result<Arc<Self>, Error> {
|
pub fn new() -> Result<Arc<Self>, Error> {
|
||||||
let now = now();
|
let now = epoch_i64();
|
||||||
|
|
||||||
|
let memcom = Memcom::new()?;
|
||||||
|
let user_cache_generation = memcom.user_cache_generation();
|
||||||
|
|
||||||
{ // limit scope
|
{ // limit scope
|
||||||
let cache = CACHED_CONFIG.read().unwrap();
|
let cache = CACHED_CONFIG.read().unwrap();
|
||||||
if (now - cache.last_update) < 5 {
|
if (user_cache_generation == cache.last_user_cache_generation) &&
|
||||||
|
((now - cache.last_update) < 5)
|
||||||
|
{
|
||||||
if let Some(ref config) = cache.data {
|
if let Some(ref config) = cache.data {
|
||||||
return Ok(config.clone());
|
return Ok(config.clone());
|
||||||
}
|
}
|
||||||
@ -46,53 +53,47 @@ impl CachedUserInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let config = Arc::new(CachedUserInfo {
|
let config = Arc::new(CachedUserInfo {
|
||||||
user_cfg: super::user::cached_config()?,
|
user_cfg: crate::user::cached_config()?,
|
||||||
acl_tree: super::acl::cached_config()?,
|
acl_tree: crate::acl::cached_config()?,
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut cache = CACHED_CONFIG.write().unwrap();
|
let mut cache = CACHED_CONFIG.write().unwrap();
|
||||||
cache.last_update = now;
|
cache.last_update = now;
|
||||||
|
cache.last_user_cache_generation = user_cache_generation;
|
||||||
cache.data = Some(config.clone());
|
cache.data = Some(config.clone());
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
/// Only exposed for testing
|
||||||
pub(crate) fn test_new(user_cfg: SectionConfigData, acl_tree: AclTree) -> Self {
|
#[doc(hidden)]
|
||||||
|
pub fn test_new(user_cfg: SectionConfigData, acl_tree: AclTree) -> Self {
|
||||||
Self {
|
Self {
|
||||||
user_cfg: Arc::new(user_cfg),
|
user_cfg: Arc::new(user_cfg),
|
||||||
acl_tree: Arc::new(acl_tree),
|
acl_tree: Arc::new(acl_tree),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Test if a user_id is enabled and not expired
|
||||||
|
pub fn is_active_user_id(&self, userid: &Userid) -> bool {
|
||||||
|
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
||||||
|
info.is_active()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Test if a authentication id is enabled and not expired
|
/// Test if a authentication id is enabled and not expired
|
||||||
pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
|
pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
|
||||||
let userid = auth_id.user();
|
let userid = auth_id.user();
|
||||||
|
|
||||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
if !self.is_active_user_id(userid) {
|
||||||
if !info.enable.unwrap_or(true) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if let Some(expire) = info.expire {
|
|
||||||
if expire > 0 && expire <= now() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if auth_id.is_token() {
|
if auth_id.is_token() {
|
||||||
if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
|
if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
|
||||||
if !info.enable.unwrap_or(true) {
|
return info.is_active();
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if let Some(expire) = info.expire {
|
|
||||||
if expire > 0 && expire <= now() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
100
pbs-config/src/datastore.rs
Normal file
100
pbs-config/src/datastore.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
schema::{ApiType, Schema},
|
||||||
|
section_config::{
|
||||||
|
SectionConfig,
|
||||||
|
SectionConfigData,
|
||||||
|
SectionConfigPlugin,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
||||||
|
|
||||||
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref CONFIG: SectionConfig = init();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init() -> SectionConfig {
|
||||||
|
let obj_schema = match DataStoreConfig::API_SCHEMA {
|
||||||
|
Schema::Object(ref obj_schema) => obj_schema,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let plugin = SectionConfigPlugin::new("datastore".to_string(), Some(String::from("name")), obj_schema);
|
||||||
|
let mut config = SectionConfig::new(&DATASTORE_SCHEMA);
|
||||||
|
config.register_plugin(plugin);
|
||||||
|
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const DATASTORE_CFG_FILENAME: &str = "/etc/proxmox-backup/datastore.cfg";
|
||||||
|
pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
||||||
|
|
||||||
|
/// Get exclusive lock
|
||||||
|
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||||
|
open_backup_lockfile(DATASTORE_CFG_LOCKFILE, None, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
|
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||||
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
|
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
||||||
|
Ok((data, digest))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
|
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?;
|
||||||
|
replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// shell completion helper
|
||||||
|
pub fn complete_datastore_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
match config() {
|
||||||
|
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||||
|
Err(_) => return vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
list.push(String::from("/"));
|
||||||
|
list.push(String::from("/datastore"));
|
||||||
|
list.push(String::from("/datastore/"));
|
||||||
|
|
||||||
|
if let Ok((data, _digest)) = config() {
|
||||||
|
for id in data.sections.keys() {
|
||||||
|
list.push(format!("/datastore/{}", id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(String::from("/remote"));
|
||||||
|
list.push(String::from("/remote/"));
|
||||||
|
|
||||||
|
list.push(String::from("/tape"));
|
||||||
|
list.push(String::from("/tape/"));
|
||||||
|
list.push(String::from("/tape/drive"));
|
||||||
|
list.push(String::from("/tape/drive/"));
|
||||||
|
list.push(String::from("/tape/changer"));
|
||||||
|
list.push(String::from("/tape/changer/"));
|
||||||
|
list.push(String::from("/tape/pool"));
|
||||||
|
list.push(String::from("/tape/pool/"));
|
||||||
|
list.push(String::from("/tape/job"));
|
||||||
|
list.push(String::from("/tape/job/"));
|
||||||
|
|
||||||
|
list
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn complete_calendar_event(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
// just give some hints about possible values
|
||||||
|
["minutely", "hourly", "daily", "mon..fri", "0:0"]
|
||||||
|
.iter().map(|s| String::from(*s)).collect()
|
||||||
|
}
|
136
pbs-config/src/domains.rs
Normal file
136
pbs-config/src/domains.rs
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
schema::*,
|
||||||
|
section_config::{
|
||||||
|
SectionConfig,
|
||||||
|
SectionConfigData,
|
||||||
|
SectionConfigPlugin,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
use pbs_api_types::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
||||||
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref CONFIG: SectionConfig = init();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Use the value of this attribute/claim as unique user name. It is
|
||||||
|
/// up to the identity provider to guarantee the uniqueness. The
|
||||||
|
/// OpenID specification only guarantees that Subject ('sub') is unique. Also
|
||||||
|
/// make sure that the user is not allowed to change that attribute by
|
||||||
|
/// himself!
|
||||||
|
pub enum OpenIdUserAttribute {
|
||||||
|
/// Subject (OpenId 'sub' claim)
|
||||||
|
Subject,
|
||||||
|
/// Username (OpenId 'preferred_username' claim)
|
||||||
|
Username,
|
||||||
|
/// Email (OpenId 'email' claim)
|
||||||
|
Email,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
realm: {
|
||||||
|
schema: REALM_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"client-key": {
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
autocreate: {
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"username-claim": {
|
||||||
|
type: OpenIdUserAttribute,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// OpenID configuration properties.
|
||||||
|
pub struct OpenIdRealmConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub realm: String,
|
||||||
|
/// OpenID Issuer Url
|
||||||
|
pub issuer_url: String,
|
||||||
|
/// OpenID Client ID
|
||||||
|
pub client_id: String,
|
||||||
|
/// OpenID Client Key
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub client_key: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
/// Automatically create users if they do not exist.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub autocreate: Option<bool>,
|
||||||
|
#[updater(skip)]
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub username_claim: Option<OpenIdUserAttribute>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init() -> SectionConfig {
|
||||||
|
let obj_schema = match OpenIdRealmConfig::API_SCHEMA {
|
||||||
|
Schema::Object(ref obj_schema) => obj_schema,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let plugin = SectionConfigPlugin::new("openid".to_string(), Some(String::from("realm")), obj_schema);
|
||||||
|
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
|
||||||
|
config.register_plugin(plugin);
|
||||||
|
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const DOMAINS_CFG_FILENAME: &str = "/etc/proxmox-backup/domains.cfg";
|
||||||
|
pub const DOMAINS_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.domains.lck";
|
||||||
|
|
||||||
|
/// Get exclusive lock
|
||||||
|
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||||
|
open_backup_lockfile(DOMAINS_CFG_LOCKFILE, None, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
|
let content = proxmox::tools::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?
|
||||||
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
|
let data = CONFIG.parse(DOMAINS_CFG_FILENAME, &content)?;
|
||||||
|
Ok((data, digest))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
|
let raw = CONFIG.write(DOMAINS_CFG_FILENAME, &config)?;
|
||||||
|
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// shell completion helper
|
||||||
|
pub fn complete_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
match config() {
|
||||||
|
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||||
|
Err(_) => return vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn complete_openid_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
match config() {
|
||||||
|
Ok((data, _digest)) => data.sections.iter()
|
||||||
|
.filter_map(|(id, (t, _))| if t == "openid" { Some(id.to_string()) } else { None })
|
||||||
|
.collect(),
|
||||||
|
Err(_) => return vec![],
|
||||||
|
}
|
||||||
|
}
|
@ -25,22 +25,15 @@ use proxmox::{
|
|||||||
SectionConfigPlugin,
|
SectionConfigPlugin,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
tools::fs::{
|
|
||||||
open_file_locked,
|
|
||||||
replace_file,
|
|
||||||
CreateOptions,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
api2::types::{
|
|
||||||
DRIVE_NAME_SCHEMA,
|
use pbs_api_types::{
|
||||||
VirtualTapeDrive,
|
DRIVE_NAME_SCHEMA, VirtualTapeDrive, LtoTapeDrive, ScsiTapeChanger,
|
||||||
LtoTapeDrive,
|
|
||||||
ScsiTapeChanger,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
/// Static [`SectionConfig`] to access parser/writer functions.
|
/// Static [`SectionConfig`] to access parser/writer functions.
|
||||||
pub static ref CONFIG: SectionConfig = init();
|
pub static ref CONFIG: SectionConfig = init();
|
||||||
@ -79,8 +72,8 @@ pub const DRIVE_CFG_FILENAME: &str = "/etc/proxmox-backup/tape.cfg";
|
|||||||
pub const DRIVE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.tape.lck";
|
pub const DRIVE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.tape.lck";
|
||||||
|
|
||||||
/// Get exclusive lock
|
/// Get exclusive lock
|
||||||
pub fn lock() -> Result<std::fs::File, Error> {
|
pub fn lock() -> Result<BackupLockGuard, Error> {
|
||||||
open_file_locked(DRIVE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
open_backup_lockfile(DRIVE_CFG_LOCKFILE, None, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read and parse the configuration file
|
/// Read and parse the configuration file
|
||||||
@ -97,19 +90,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
|||||||
/// Save the configuration file
|
/// Save the configuration file
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(DRIVE_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(DRIVE_CFG_FILENAME, &config)?;
|
||||||
|
replace_backup_config(DRIVE_CFG_FILENAME, raw.as_bytes())
|
||||||
let backup_user = crate::backup::backup_user()?;
|
|
||||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
|
||||||
// set the correct owner/group/permissions while saving file
|
|
||||||
// owner(rw) = root, group(r)= backup
|
|
||||||
let options = CreateOptions::new()
|
|
||||||
.perm(mode)
|
|
||||||
.owner(nix::unistd::ROOT)
|
|
||||||
.group(backup_user.gid);
|
|
||||||
|
|
||||||
replace_file(DRIVE_CFG_FILENAME, raw.as_bytes(), options)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if the specified drive name exists in the config.
|
/// Check if the specified drive name exists in the config.
|
@ -1,15 +1,15 @@
|
|||||||
use anyhow::{bail, format_err, Context, Error};
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::backup::{CryptConfig, Fingerprint};
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
|
|
||||||
use crate::api2::types::{KeyInfo, Kdf};
|
use pbs_api_types::{Kdf, KeyInfo, Fingerprint};
|
||||||
|
|
||||||
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
/// Key derivation function configuration
|
/// Key derivation function configuration
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
@ -100,7 +100,7 @@ impl From<&KeyConfig> for KeyInfo {
|
|||||||
fingerprint: key_config
|
fingerprint: key_config
|
||||||
.fingerprint
|
.fingerprint
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|fp| crate::tools::format::as_fingerprint(fp.bytes())),
|
.map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())),
|
||||||
hint: key_config.hint.clone(),
|
hint: key_config.hint.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,7 +120,7 @@ impl KeyConfig {
|
|||||||
pub fn without_password(raw_key: [u8; 32]) -> Result<Self, Error> {
|
pub fn without_password(raw_key: [u8; 32]) -> Result<Self, Error> {
|
||||||
// always compute fingerprint
|
// always compute fingerprint
|
||||||
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
||||||
let fingerprint = Some(crypt_config.fingerprint());
|
let fingerprint = Some(Fingerprint::new(crypt_config.fingerprint()));
|
||||||
|
|
||||||
let created = proxmox::tools::time::epoch_i64();
|
let created = proxmox::tools::time::epoch_i64();
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -187,7 +187,7 @@ impl KeyConfig {
|
|||||||
|
|
||||||
// always compute fingerprint
|
// always compute fingerprint
|
||||||
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
let crypt_config = CryptConfig::new(raw_key.clone())?;
|
||||||
let fingerprint = Some(crypt_config.fingerprint());
|
let fingerprint = Some(Fingerprint::new(crypt_config.fingerprint()));
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
kdf: Some(kdf),
|
kdf: Some(kdf),
|
||||||
@ -258,7 +258,7 @@ impl KeyConfig {
|
|||||||
result.copy_from_slice(&key);
|
result.copy_from_slice(&key);
|
||||||
|
|
||||||
let crypt_config = CryptConfig::new(result.clone())?;
|
let crypt_config = CryptConfig::new(result.clone())?;
|
||||||
let fingerprint = crypt_config.fingerprint();
|
let fingerprint = Fingerprint::new(crypt_config.fingerprint());
|
||||||
if let Some(ref stored_fingerprint) = self.fingerprint {
|
if let Some(ref stored_fingerprint) = self.fingerprint {
|
||||||
if &fingerprint != stored_fingerprint {
|
if &fingerprint != stored_fingerprint {
|
||||||
bail!(
|
bail!(
|
106
pbs-config/src/lib.rs
Normal file
106
pbs-config/src/lib.rs
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
pub mod acl;
|
||||||
|
mod cached_user_info;
|
||||||
|
pub use cached_user_info::CachedUserInfo;
|
||||||
|
pub mod datastore;
|
||||||
|
pub mod domains;
|
||||||
|
pub mod drive;
|
||||||
|
pub mod key_config;
|
||||||
|
pub mod media_pool;
|
||||||
|
pub mod network;
|
||||||
|
pub mod remote;
|
||||||
|
pub mod sync;
|
||||||
|
pub mod tape_encryption_keys;
|
||||||
|
pub mod tape_job;
|
||||||
|
pub mod token_shadow;
|
||||||
|
pub mod user;
|
||||||
|
pub mod verify;
|
||||||
|
|
||||||
|
pub(crate) mod memcom;
|
||||||
|
|
||||||
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
|
pub use pbs_buildcfg::{BACKUP_USER_NAME, BACKUP_GROUP_NAME};
|
||||||
|
|
||||||
|
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
||||||
|
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||||
|
pbs_tools::sys::query_user(BACKUP_USER_NAME)?
|
||||||
|
.ok_or_else(|| format_err!("Unable to lookup '{}' user.", BACKUP_USER_NAME))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return Group info for the 'backup' group (``getgrnam(3)``)
|
||||||
|
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||||
|
pbs_tools::sys::query_group(BACKUP_GROUP_NAME)?
|
||||||
|
.ok_or_else(|| format_err!("Unable to lookup '{}' group.", BACKUP_GROUP_NAME))
|
||||||
|
}
|
||||||
|
pub struct BackupLockGuard(Option<std::fs::File>);
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
/// Note: do not use for production code, this is only intended for tests
|
||||||
|
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
|
||||||
|
BackupLockGuard(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open or create a lock file owned by user "backup" and lock it.
|
||||||
|
///
|
||||||
|
/// Owner/Group of the file is set to backup/backup.
|
||||||
|
/// File mode is 0660.
|
||||||
|
/// Default timeout is 10 seconds.
|
||||||
|
///
|
||||||
|
/// Note: This method needs to be called by user "root" or "backup".
|
||||||
|
pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
|
||||||
|
path: P,
|
||||||
|
timeout: Option<std::time::Duration>,
|
||||||
|
exclusive: bool,
|
||||||
|
) -> Result<BackupLockGuard, Error> {
|
||||||
|
let user = backup_user()?;
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.perm(nix::sys::stat::Mode::from_bits_truncate(0o660))
|
||||||
|
.owner(user.uid)
|
||||||
|
.group(user.gid);
|
||||||
|
|
||||||
|
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
|
||||||
|
|
||||||
|
let file = proxmox::tools::fs::open_file_locked(&path, timeout, exclusive, options)?;
|
||||||
|
Ok(BackupLockGuard(Some(file)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically write data to file owned by "root:backup" with permission "0640"
|
||||||
|
///
|
||||||
|
/// Only the superuser can write those files, but group 'backup' can read them.
|
||||||
|
pub fn replace_backup_config<P: AsRef<std::path::Path>>(
|
||||||
|
path: P,
|
||||||
|
data: &[u8],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let backup_user = backup_user()?;
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
||||||
|
// set the correct owner/group/permissions while saving file
|
||||||
|
// owner(rw) = root, group(r)= backup
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(backup_user.gid);
|
||||||
|
|
||||||
|
proxmox::tools::fs::replace_file(path, data, options)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically write data to file owned by "root:root" with permission "0600"
|
||||||
|
///
|
||||||
|
/// Only the superuser can read and write those files.
|
||||||
|
pub fn replace_secret_config<P: AsRef<std::path::Path>>(
|
||||||
|
path: P,
|
||||||
|
data: &[u8],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||||
|
// set the correct owner/group/permissions while saving file
|
||||||
|
// owner(rw) = root, group(r)= root
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(nix::unistd::Gid::from_raw(0));
|
||||||
|
|
||||||
|
proxmox::tools::fs::replace_file(path, data, options)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -20,19 +20,11 @@ use proxmox::{
|
|||||||
SectionConfigPlugin,
|
SectionConfigPlugin,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
tools::fs::{
|
|
||||||
open_file_locked,
|
|
||||||
replace_file,
|
|
||||||
CreateOptions,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use pbs_api_types::{MEDIA_POOL_NAME_SCHEMA, MediaPoolConfig};
|
||||||
api2::types::{
|
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
MediaPoolConfig,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
/// Static [`SectionConfig`] to access parser/writer functions.
|
/// Static [`SectionConfig`] to access parser/writer functions.
|
||||||
@ -57,10 +49,9 @@ pub const MEDIA_POOL_CFG_FILENAME: &str = "/etc/proxmox-backup/media-pool.cfg";
|
|||||||
/// Lock file name (used to prevent concurrent access)
|
/// Lock file name (used to prevent concurrent access)
|
||||||
pub const MEDIA_POOL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.media-pool.lck";
|
pub const MEDIA_POOL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.media-pool.lck";
|
||||||
|
|
||||||
|
|
||||||
/// Get exclusive lock
|
/// Get exclusive lock
|
||||||
pub fn lock() -> Result<std::fs::File, Error> {
|
pub fn lock() -> Result<BackupLockGuard, Error> {
|
||||||
open_file_locked(MEDIA_POOL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
open_backup_lockfile(MEDIA_POOL_CFG_LOCKFILE, None, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read and parse the configuration file
|
/// Read and parse the configuration file
|
||||||
@ -77,19 +68,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
|||||||
/// Save the configuration file
|
/// Save the configuration file
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, &config)?;
|
||||||
|
replace_backup_config(MEDIA_POOL_CFG_FILENAME, raw.as_bytes())
|
||||||
let backup_user = crate::backup::backup_user()?;
|
|
||||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
|
||||||
// set the correct owner/group/permissions while saving file
|
|
||||||
// owner(rw) = root, group(r)= backup
|
|
||||||
let options = CreateOptions::new()
|
|
||||||
.perm(mode)
|
|
||||||
.owner(nix::unistd::ROOT)
|
|
||||||
.group(backup_user.gid);
|
|
||||||
|
|
||||||
replace_file(MEDIA_POOL_CFG_FILENAME, raw.as_bytes(), options)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shell completion helper
|
// shell completion helper
|
81
pbs-config/src/memcom.rs
Normal file
81
pbs-config/src/memcom.rs
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
//! Memory based communication channel between proxy & daemon for things such as cache
|
||||||
|
//! invalidation.
|
||||||
|
|
||||||
|
use std::os::unix::io::AsRawFd;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use nix::fcntl::OFlag;
|
||||||
|
use nix::sys::mman::{MapFlags, ProtFlags};
|
||||||
|
use nix::sys::stat::Mode;
|
||||||
|
use once_cell::sync::OnceCell;
|
||||||
|
|
||||||
|
use proxmox::tools::fs::CreateOptions;
|
||||||
|
use proxmox::tools::mmap::Mmap;
|
||||||
|
|
||||||
|
/// In-memory communication channel.
|
||||||
|
pub struct Memcom {
|
||||||
|
mmap: Mmap<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
struct Head {
|
||||||
|
// User (user.cfg) cache generation/version.
|
||||||
|
user_cache_generation: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
static INSTANCE: OnceCell<Arc<Memcom>> = OnceCell::new();
|
||||||
|
|
||||||
|
const MEMCOM_FILE_PATH: &str = pbs_buildcfg::rundir!("/proxmox-backup-memcom");
|
||||||
|
const EMPTY_PAGE: [u8; 4096] = [0u8; 4096];
|
||||||
|
|
||||||
|
impl Memcom {
|
||||||
|
/// Open the memory based communication channel singleton.
|
||||||
|
pub fn new() -> Result<Arc<Self>, Error> {
|
||||||
|
INSTANCE.get_or_try_init(Self::open).map(Arc::clone)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actual work of `new`:
|
||||||
|
fn open() -> Result<Arc<Self>, Error> {
|
||||||
|
let user = crate::backup_user()?;
|
||||||
|
let options = CreateOptions::new()
|
||||||
|
.perm(Mode::from_bits_truncate(0o660))
|
||||||
|
.owner(user.uid)
|
||||||
|
.group(user.gid);
|
||||||
|
|
||||||
|
let file = proxmox::tools::fs::atomic_open_or_create_file(
|
||||||
|
MEMCOM_FILE_PATH,
|
||||||
|
OFlag::O_RDWR | OFlag::O_CLOEXEC,
|
||||||
|
&EMPTY_PAGE, options)?;
|
||||||
|
|
||||||
|
let mmap = unsafe {
|
||||||
|
Mmap::<u8>::map_fd(
|
||||||
|
file.as_raw_fd(),
|
||||||
|
0,
|
||||||
|
4096,
|
||||||
|
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||||
|
MapFlags::MAP_SHARED | MapFlags::MAP_NORESERVE | MapFlags::MAP_POPULATE,
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Arc::new(Self { mmap }))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shortcut to get the mapped `Head` as a `Head`.
|
||||||
|
fn head(&self) -> &Head {
|
||||||
|
unsafe { &*(self.mmap.as_ptr() as *const u8 as *const Head) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the user cache generation number.
|
||||||
|
pub fn user_cache_generation(&self) -> usize {
|
||||||
|
self.head().user_cache_generation.load(Ordering::Acquire)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increase the user cache generation number.
|
||||||
|
pub fn increase_user_cache_generation(&self) {
|
||||||
|
self.head()
|
||||||
|
.user_cache_generation
|
||||||
|
.fetch_add(1, Ordering::AcqRel);
|
||||||
|
}
|
||||||
|
}
|
@ -188,7 +188,7 @@ pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error>
|
|||||||
.output()
|
.output()
|
||||||
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
|
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
|
||||||
|
|
||||||
let diff = crate::tools::command_output_as_string(output, Some(|c| c == 0 || c == 1))
|
let diff = pbs_tools::command_output_as_string(output, Some(|c| c == 0 || c == 1))
|
||||||
.map_err(|err| format_err!("diff failed: {}", err))?;
|
.map_err(|err| format_err!("diff failed: {}", err))?;
|
||||||
|
|
||||||
Ok(diff)
|
Ok(diff)
|
||||||
@ -209,7 +209,7 @@ pub fn network_reload() -> Result<(), Error> {
|
|||||||
.output()
|
.output()
|
||||||
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
||||||
|
|
||||||
crate::tools::command_output(output, None)
|
pbs_tools::command_output(output, None)
|
||||||
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user