Compare commits
283 Commits
Author | SHA1 | Date | |
---|---|---|---|
bd00ff10e4 | |||
149b969d9a | |||
56d3b59c71 | |||
c1e6efa8e1 | |||
3b5473a682 | |||
4954d3130b | |||
064497756e | |||
ce3c7a1bda | |||
50a39bbc1f | |||
154d01b042 | |||
1f3352018b | |||
b721783c48 | |||
76ee3085a4 | |||
5d5a53059f | |||
77d8c593b3 | |||
c450a3cafd | |||
f8f4d7cab4 | |||
91abfef049 | |||
963b7ec51b | |||
16aab0c137 | |||
bf8b8be976 | |||
e201104d0b | |||
d63db0863d | |||
7a36833103 | |||
ca6e66aa5a | |||
94a6b33680 | |||
2d5287fbbc | |||
6eb756bcab | |||
5647219049 | |||
b810972823 | |||
3a07cdf574 | |||
193ec30c2b | |||
c94723062c | |||
0eadfdf670 | |||
ebf8ce20bc | |||
e7acdde758 | |||
f2c9da2349 | |||
ff344655e2 | |||
3490d9460c | |||
fdf9373f9e | |||
ba80611324 | |||
07a579c632 | |||
188a37fbed | |||
f251367c33 | |||
ac4e399a10 | |||
4fe77c36df | |||
118515dbd0 | |||
42ba4cd399 | |||
ab1c07a622 | |||
930a71460f | |||
a58a5cf795 | |||
92a8f0bc82 | |||
bf298a16ef | |||
9a1b24b6b1 | |||
ea67cd70c9 | |||
281a5dd1fc | |||
df3b3d1798 | |||
f5e2b4726d | |||
daaeea8b4b | |||
6f6df501a0 | |||
d5790a9f27 | |||
860eaec58f | |||
26e949d5fe | |||
ac7dbba458 | |||
7c2431d42c | |||
25c1420a12 | |||
c1a1e1ae8f | |||
a9df9df25d | |||
10beed1199 | |||
df32530750 | |||
062edce27f | |||
efd2713aa8 | |||
8a21566c8a | |||
c8c5c7f571 | |||
91357c2034 | |||
097ccfe1d5 | |||
61ef4ae8cb | |||
01ae7bfaf2 | |||
1b52122a1f | |||
1d9bc184f5 | |||
5f83d3f636 | |||
71e534631f | |||
6e9e6c7a54 | |||
e2e7560d5e | |||
0ceb97538a | |||
3e276f6fb6 | |||
2b00c5abca | |||
15cc41b6cb | |||
729bd1fd16 | |||
9a7431e2e0 | |||
52fbc86fc9 | |||
afe6c79ce3 | |||
9407810fe1 | |||
c42a54795d | |||
96ec3801a9 | |||
c4707d0c1d | |||
24f9af9e0f | |||
a0172d766b | |||
09f999337a | |||
e3eb062c09 | |||
de21d4efdc | |||
d5f58006d3 | |||
cb80ffc1de | |||
1859a0eb8b | |||
9e7132c0b3 | |||
bf013be1c4 | |||
b935209584 | |||
efd4ddc17b | |||
e511e0e553 | |||
610150a4b4 | |||
485b2438ac | |||
bfd12e871f | |||
0c136bfab1 | |||
245e2aea23 | |||
b9d588ffde | |||
e4bc3e0e8d | |||
2419dc0de9 | |||
68fd9ca6d6 | |||
4beb7d2dbe | |||
2bc1250c28 | |||
9b1e2ae83c | |||
9b5ecbe2ff | |||
342ed4aea0 | |||
d4e9d5470e | |||
5c1cabdea1 | |||
38517ca053 | |||
e33758d1b8 | |||
aba6189c4f | |||
adcc21716b | |||
87e17fb4d1 | |||
8292d3d20e | |||
5cc7d89139 | |||
343392613d | |||
de91418b79 | |||
fe9c47ab4f | |||
02db72678f | |||
db4b469285 | |||
92c5cf42d1 | |||
e9558f290a | |||
572e6594d2 | |||
88691284d8 | |||
85c622807e | |||
9d42e0475b | |||
181a335bfa | |||
0a33951e9e | |||
7a356a748a | |||
1c402740a2 | |||
e0a19d3313 | |||
6b8329ee34 | |||
1d4448998a | |||
d6473f5359 | |||
f5f9ec81d2 | |||
fea950155f | |||
ef2944bc24 | |||
934c8724e2 | |||
98eb435d90 | |||
bd10af6eda | |||
7f381a6246 | |||
c17fbbbc07 | |||
ac2ca6c341 | |||
d26865c52c | |||
2b05008a11 | |||
45700e2ecf | |||
f84304235b | |||
0ca41155b2 | |||
a291ab59ba | |||
fce7cd0d36 | |||
658357c5a8 | |||
7484fce24d | |||
f28a713e2b | |||
a9017805b7 | |||
2e3f94e12f | |||
d531c7ae61 | |||
7df1580fa6 | |||
58f70bccbb | |||
fae4f6c509 | |||
ddafb28572 | |||
642c7b9915 | |||
5a8726e6d2 | |||
b3f279e2d9 | |||
82f5ad18f0 | |||
bacc99c7f8 | |||
6728d0977b | |||
bff7c027c9 | |||
79b3113361 | |||
5885767b91 | |||
ec08247e5c | |||
400f081487 | |||
03664514ab | |||
c68fa58a59 | |||
426dda0730 | |||
eb37d4ece2 | |||
1198f8d4e6 | |||
4b709ade68 | |||
fa49d0fde9 | |||
1d44f175c6 | |||
890b88cbef | |||
27709b49d5 | |||
7ccbce03d3 | |||
5fb852afed | |||
60589e6066 | |||
717ce40612 | |||
75442e813e | |||
853c55a049 | |||
6ef1b649d9 | |||
e3f3359c86 | |||
0e1edf19b1 | |||
de55fff226 | |||
b3a67f1f14 | |||
3cc23ca6cc | |||
3def6bfc64 | |||
18e8bc17e4 | |||
f66d66aafe | |||
7380c48dff | |||
0191759316 | |||
dbc42e6f75 | |||
d1c3bc5350 | |||
a97301350f | |||
09340f28f5 | |||
20497c6346 | |||
d0f7d0d9c1 | |||
608806e884 | |||
48176b0a77 | |||
3483a3b3a1 | |||
347e0d4c57 | |||
ae9b5c077a | |||
747446eb50 | |||
e1c8c27f47 | |||
63cec1622a | |||
31142ef291 | |||
058b4b9708 | |||
9a1330c72e | |||
0a6df20986 | |||
6680878b5c | |||
593043ed53 | |||
038f385089 | |||
b914b94773 | |||
2194bc59c8 | |||
a98a288e2d | |||
49e25688f1 | |||
d7eedbd24b | |||
5b17a02da4 | |||
8735247f29 | |||
0d5d15c9d1 | |||
2e44983a37 | |||
c76ff4b472 | |||
aaf4f40285 | |||
e64f77b716 | |||
fd1b65cc3c | |||
11148dce43 | |||
38da8ca1bc | |||
a0ffd4a413 | |||
450105b0c3 | |||
b62edce929 | |||
67678ec39c | |||
bf95fba72e | |||
d265420025 | |||
01a080215d | |||
8cf445ecc4 | |||
20def38e96 | |||
be5b43cb87 | |||
6f0565fa60 | |||
99940358e3 | |||
53daae8e89 | |||
8a23ea4656 | |||
c95c1c83b0 | |||
b446fa14c5 | |||
6d5d305d9d | |||
af2eb422d5 | |||
bbd57396d7 | |||
0fd55b08d9 | |||
619cd5cbcb | |||
1ec0d70d09 | |||
c8449217dc | |||
f7348a23cd | |||
ae18c436dd | |||
b0e20a71e2 | |||
b9700a9fe5 | |||
81867f0539 | |||
0a33fba49c | |||
049a22a3a3 | |||
4d4f94dedf | |||
a844fa0ba0 |
39
Cargo.toml
39
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "2.0.10"
|
||||
version = "2.1.2"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -25,9 +25,8 @@ members = [
|
||||
"pbs-config",
|
||||
"pbs-datastore",
|
||||
"pbs-fuse-loop",
|
||||
"pbs-runtime",
|
||||
"proxmox-rest-server",
|
||||
"proxmox-systemd",
|
||||
"proxmox-rrd",
|
||||
"pbs-tape",
|
||||
"pbs-tools",
|
||||
|
||||
@ -44,9 +43,10 @@ path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
apt-pkg-native = "0.3.2"
|
||||
base64 = "0.12"
|
||||
base64 = "0.13"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "1.0"
|
||||
cidr = "0.2.1"
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
env_logger = "0.7"
|
||||
@ -56,6 +56,7 @@ thiserror = "1.0"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.3", features = [ "stream" ] }
|
||||
handlebars = "3.0"
|
||||
hex = "0.4.3"
|
||||
http = "0.2"
|
||||
hyper = { version = "0.14", features = [ "full" ] }
|
||||
lazy_static = "1.4"
|
||||
@ -79,11 +80,10 @@ tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||
tower-service = "0.3.0"
|
||||
udev = ">= 0.3, <0.5"
|
||||
udev = "0.4"
|
||||
url = "2.1"
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
walkdir = "2"
|
||||
webauthn-rs = "0.2.5"
|
||||
xdg = "2.2"
|
||||
nom = "5.1"
|
||||
crossbeam-channel = "0.5"
|
||||
@ -94,20 +94,31 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||
pathpatterns = "0.1.2"
|
||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||
|
||||
proxmox = { version = "0.13.3", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
||||
proxmox-acme-rs = "0.2.1"
|
||||
proxmox-apt = "0.7.0"
|
||||
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||
proxmox-openid = "0.7.0"
|
||||
proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
|
||||
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
|
||||
proxmox-io = "1"
|
||||
proxmox-lang = "1"
|
||||
proxmox-router = { version = "1.1", features = [ "cli" ] }
|
||||
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||
proxmox-section-config = "1"
|
||||
proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
|
||||
proxmox-time = "1"
|
||||
proxmox-uuid = "1"
|
||||
proxmox-shared-memory = "0.1.1"
|
||||
proxmox-sys = "0.1.2"
|
||||
|
||||
proxmox-acme-rs = "0.3"
|
||||
proxmox-apt = "0.8.0"
|
||||
proxmox-async = "0.2"
|
||||
proxmox-openid = "0.9.0"
|
||||
|
||||
pbs-api-types = { path = "pbs-api-types" }
|
||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||
pbs-client = { path = "pbs-client" }
|
||||
pbs-config = { path = "pbs-config" }
|
||||
pbs-datastore = { path = "pbs-datastore" }
|
||||
pbs-runtime = { path = "pbs-runtime" }
|
||||
proxmox-rest-server = { path = "proxmox-rest-server" }
|
||||
proxmox-systemd = { path = "proxmox-systemd" }
|
||||
proxmox-rrd = { path = "proxmox-rrd" }
|
||||
pbs-tools = { path = "pbs-tools" }
|
||||
pbs-tape = { path = "pbs-tape" }
|
||||
|
||||
@ -116,6 +127,8 @@ pbs-tape = { path = "pbs-tape" }
|
||||
[patch.crates-io]
|
||||
#proxmox = { path = "../proxmox/proxmox" }
|
||||
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
||||
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
||||
#pxar = { path = "../pxar" }
|
||||
|
||||
[features]
|
||||
|
13
Makefile
13
Makefile
@ -38,9 +38,8 @@ SUBCRATES := \
|
||||
pbs-config \
|
||||
pbs-datastore \
|
||||
pbs-fuse-loop \
|
||||
pbs-runtime \
|
||||
proxmox-rest-server \
|
||||
proxmox-systemd \
|
||||
proxmox-rrd \
|
||||
pbs-tape \
|
||||
pbs-tools \
|
||||
proxmox-backup-banner \
|
||||
@ -171,14 +170,11 @@ cargo-build:
|
||||
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||
.do-cargo-build:
|
||||
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||
--bin proxmox-backup-api \
|
||||
--bin proxmox-backup-proxy \
|
||||
--bin proxmox-backup-manager \
|
||||
--bin docgen \
|
||||
--package proxmox-backup-banner \
|
||||
--bin proxmox-backup-banner \
|
||||
--package proxmox-backup-client \
|
||||
--bin proxmox-backup-client \
|
||||
--bin dump-catalog-shell-cli \
|
||||
--bin proxmox-backup-debug \
|
||||
--package proxmox-file-restore \
|
||||
--bin proxmox-file-restore \
|
||||
@ -190,7 +186,10 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
||||
--package proxmox-restore-daemon \
|
||||
--bin proxmox-restore-daemon \
|
||||
--package proxmox-backup \
|
||||
--bin dump-catalog-shell-cli \
|
||||
--bin docgen \
|
||||
--bin proxmox-backup-api \
|
||||
--bin proxmox-backup-manager \
|
||||
--bin proxmox-backup-proxy \
|
||||
--bin proxmox-daily-update \
|
||||
--bin proxmox-file-restore \
|
||||
--bin proxmox-tape \
|
||||
|
150
debian/changelog
vendored
150
debian/changelog
vendored
@ -1,4 +1,152 @@
|
||||
rust-proxmox-backup (2.0.10-1) UNRELEASED; urgency=medium
|
||||
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
|
||||
|
||||
* docs: backup-client: fix wrong reference
|
||||
|
||||
* docs: remotes: note that protected flags will not be synced
|
||||
|
||||
* sync job: correctly apply rate limit
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Nov 2021 13:56:15 +0100
|
||||
|
||||
rust-proxmox-backup (2.1.1-2) bullseye; urgency=medium
|
||||
|
||||
* docs: update and add traffic control related screenshots
|
||||
|
||||
* docs: mention traffic control (bandwidth limits) for sync jobs
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 16:07:39 +0100
|
||||
|
||||
rust-proxmox-backup (2.1.1-1) bullseye; urgency=medium
|
||||
|
||||
* fix proxmox-backup-manager sync-job list
|
||||
|
||||
* ui, api: sync-job: allow one to configure a rate limit
|
||||
|
||||
* api: snapshot list: set default for 'protected' flag
|
||||
|
||||
* ui: datastore content: rework rendering protection state
|
||||
|
||||
* docs: update traffic control docs (use HumanBytes)
|
||||
|
||||
* ui: traffic-control: include ipv6 in 'all' networks
|
||||
|
||||
* ui: traffic-control edit: add spaces between networks for more
|
||||
readabillity
|
||||
|
||||
* tape: fix passing-through key-fingerprint
|
||||
|
||||
* avoid a bogus error regarding logrotate-path due to a reversed check
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 12:24:31 +0100
|
||||
|
||||
rust-proxmox-backup (2.1.0-1) bullseye; urgency=medium
|
||||
|
||||
* rest server: make successful-ticket auth log a debug one to avoid
|
||||
syslog spam
|
||||
|
||||
* traffic-controls: add API/CLI to show current traffic
|
||||
|
||||
* docs: add traffic control section
|
||||
|
||||
* ui: use TFA widgets from widget toolkit
|
||||
|
||||
* sync: allow pulling groups selectively
|
||||
|
||||
* fix #3533: tape backup: filter groups according to config
|
||||
|
||||
* proxmox-tape: add missing notify-user option to backup command
|
||||
|
||||
* openid: allow arbitrary username-claims
|
||||
|
||||
* openid: support configuring the prompt, scopes and ACR values
|
||||
|
||||
* use human-byte for traffic-control rate-in/out and burst-in/out config
|
||||
|
||||
* ui: add traffic control view and editor
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Sat, 20 Nov 2021 22:44:07 +0100
|
||||
|
||||
rust-proxmox-backup (2.0.14-1) bullseye; urgency=medium
|
||||
|
||||
* fix directory permission problems
|
||||
|
||||
* add traffic control configuration config with API
|
||||
|
||||
* proxmox-backup-proxy: implement traffic control
|
||||
|
||||
* proxmox-backup-client: add rate/burst parameter to backup/restore CLI
|
||||
|
||||
* openid_login: vertify that firstname, lastname and email fits our
|
||||
schema definitions
|
||||
|
||||
* docs: add info about protection flag to client docs
|
||||
|
||||
* fix #3602: ui: datastore/Content: add action to set protection status
|
||||
|
||||
* ui: add protected icon to snapshot (if they are protected)
|
||||
|
||||
* ui: PruneInputPanel: add keepReason 'protected' for protected backups
|
||||
|
||||
* proxmox-backup-client: add 'protected' commands
|
||||
|
||||
* acme: interpret no TOS as accepted
|
||||
|
||||
* acme: new_account: prevent replacing existing accounts
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Nov 2021 08:04:55 +0100
|
||||
|
||||
rust-proxmox-backup (2.0.13-1) bullseye; urgency=medium
|
||||
|
||||
* tape: simplify export_media_set for pool writer
|
||||
|
||||
* tape: improve export_media error message for not found tape
|
||||
|
||||
* rest-server: use hashmap for parameter errors
|
||||
|
||||
* proxmox-rrd: use new file firmat with higher resolution
|
||||
|
||||
* proxmox-rrd: use a journal to reduce amount of bytes written
|
||||
|
||||
* use new fsync parameter to replace_file and atomic_open_or_create
|
||||
|
||||
* docs: langauge and formatting fixup
|
||||
|
||||
* docs: Update for new features/functionality
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 08:17:00 +0200
|
||||
|
||||
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
|
||||
|
||||
* proxmox-backup-proxy: clean up old tasks when their reference was rotated
|
||||
out of the task-log index
|
||||
|
||||
* api daemons: fix sending log-reopen command
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Oct 2021 10:48:28 +0200
|
||||
|
||||
rust-proxmox-backup (2.0.11-1) bullseye; urgency=medium
|
||||
|
||||
* drop aritifical limits for task-UPID length
|
||||
|
||||
* tools: smart: only throw error for the fatal usage errors of smartctl
|
||||
|
||||
* api: improve returning errors for extjs formatter
|
||||
|
||||
* proxmox-rest-server: improve logging
|
||||
|
||||
* subscription: switch verification domain over to shop.proxmox.com
|
||||
|
||||
* rest-server/daemon: use new sd_notify_barrier helper for handling
|
||||
synchronization with systemd on service reloading
|
||||
|
||||
* ui: datastore/Content: add empty text for no snapshots
|
||||
|
||||
* ui: datastore/Content: move first store-load into activate listener to
|
||||
ensure we've a proper loading mask for better UX
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 05 Oct 2021 16:34:14 +0200
|
||||
|
||||
rust-proxmox-backup (2.0.10-1) bullseye; urgency=medium
|
||||
|
||||
* ui: fix order of prune keep reasons
|
||||
|
||||
|
59
debian/control
vendored
59
debian/control
vendored
@ -8,9 +8,10 @@ Build-Depends: debhelper (>= 12),
|
||||
libstd-rust-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||
librust-base64-0.12+default-dev,
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-1+default-dev,
|
||||
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.5+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
@ -22,9 +23,10 @@ Build-Depends: debhelper (>= 12),
|
||||
librust-h2-0.3+default-dev,
|
||||
librust-h2-0.3+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.14+default-dev,
|
||||
librust-hyper-0.14+full-dev,
|
||||
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
|
||||
librust-hyper-0.14+full-dev (>= 0.14.5-~~),
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
@ -38,26 +40,43 @@ Build-Depends: debhelper (>= 12),
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-lite-0.2+default-dev,
|
||||
librust-proxmox-0.13+api-macro-dev,
|
||||
librust-proxmox-0.13+cli-dev,
|
||||
librust-proxmox-0.13+default-dev,
|
||||
librust-proxmox-0.13+router-dev,
|
||||
librust-proxmox-0.13+sortable-macro-dev,
|
||||
librust-proxmox-0.13+tfa-dev,
|
||||
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-apt-0.7+default-dev,
|
||||
librust-proxmox-0.15+default-dev (>= 0.15.3-~~),
|
||||
librust-proxmox-0.15+sortable-macro-dev (>= 0.15.3-~~),
|
||||
librust-proxmox-0.15+tokio-dev (>= 0.15.3-~~),
|
||||
librust-proxmox-acme-rs-0.3+default-dev,
|
||||
librust-proxmox-apt-0.8+default-dev,
|
||||
librust-proxmox-async-0.2+default-dev,
|
||||
librust-proxmox-borrow-1+default-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-http-0.4+client-dev,
|
||||
librust-proxmox-http-0.4+default-dev ,
|
||||
librust-proxmox-http-0.4+http-helpers-dev,
|
||||
librust-proxmox-http-0.4+websocket-dev,
|
||||
librust-proxmox-openid-0.7+default-dev,
|
||||
librust-proxmox-http-0.5+client-dev (>= 0.5.4-~~),
|
||||
librust-proxmox-http-0.5+default-dev (>= 0.5.4-~~),
|
||||
librust-proxmox-http-0.5+http-helpers-dev (>= 0.5.4-~~),
|
||||
librust-proxmox-http-0.5+websocket-dev (>= 0.5.4-~~),
|
||||
librust-proxmox-io-1+default-dev,
|
||||
librust-proxmox-io-1+tokio-dev,
|
||||
librust-proxmox-lang-1+default-dev,
|
||||
librust-proxmox-openid-0.9+default-dev,
|
||||
librust-proxmox-router-1+cli-dev (>= 1.1-~~),
|
||||
librust-proxmox-router-1+default-dev (>= 1.1-~~),
|
||||
librust-proxmox-schema-1+api-macro-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-schema-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-section-config-1+default-dev,
|
||||
librust-proxmox-shared-memory-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-sys-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-proxmox-tfa-1+api-dev (>= 1.3-~~),
|
||||
librust-proxmox-tfa-1+api-types-dev (>= 1.3-~~),
|
||||
librust-proxmox-tfa-1+default-dev (>= 1.3-~~),
|
||||
librust-proxmox-time-1+default-dev (>= 1.1-~~),
|
||||
librust-proxmox-uuid-1+default-dev,
|
||||
librust-proxmox-uuid-1+serde-dev,
|
||||
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-7+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-4+default-dev,
|
||||
@ -73,6 +92,7 @@ Build-Depends: debhelper (>= 12),
|
||||
librust-tokio-1+rt-dev (>= 1.6-~~),
|
||||
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
||||
librust-tokio-1+signal-dev (>= 1.6-~~),
|
||||
librust-tokio-1+sync-dev (>= 1.6-~~),
|
||||
librust-tokio-1+time-dev (>= 1.6-~~),
|
||||
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||
librust-tokio-stream-0.1+default-dev,
|
||||
@ -80,16 +100,15 @@ Build-Depends: debhelper (>= 12),
|
||||
librust-tokio-util-0.6+default-dev,
|
||||
librust-tokio-util-0.6+io-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||
librust-udev-0.4+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.6+bindgen-dev,
|
||||
librust-zstd-0.6+default-dev,
|
||||
libacl1-dev,
|
||||
libfuse3-dev,
|
||||
libsystemd-dev,
|
||||
libsystemd-dev (>= 246-~~),
|
||||
uuid-dev,
|
||||
libsgutils2-dev,
|
||||
bash-completion,
|
||||
@ -131,7 +150,7 @@ Depends: fonts-font-awesome,
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 3.3-2),
|
||||
proxmox-widget-toolkit (>= 3.4-3),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
sg3-utils,
|
||||
smartmontools,
|
||||
|
31
debian/postinst
vendored
31
debian/postinst
vendored
@ -4,6 +4,14 @@ set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
update_sync_job() {
|
||||
job="$1"
|
||||
|
||||
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
|
||||
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|
||||
|| echo "Failed, please check sync.cfg manually!"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# need to have user backup in the tape group
|
||||
@ -32,6 +40,29 @@ case "$1" in
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||
fi
|
||||
|
||||
if dpkg --compare-versions "$2" 'lt' '7.1-1' && test -e /etc/proxmox-backup/sync.cfg; then
|
||||
prev_job=""
|
||||
|
||||
# read from HERE doc because POSIX sh limitations
|
||||
while read -r key value; do
|
||||
if test "$key" = "sync:"; then
|
||||
if test -n "$prev_job"; then
|
||||
# previous job doesn't have an explicit value
|
||||
update_sync_job "$prev_job"
|
||||
fi
|
||||
prev_job=$value
|
||||
else
|
||||
prev_job=""
|
||||
fi
|
||||
done <<EOF
|
||||
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
|
||||
EOF
|
||||
if test -n "$prev_job"; then
|
||||
# last job doesn't have an explicit value
|
||||
update_sync_job "$prev_job"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
|
||||
|
@ -1,31 +1,33 @@
|
||||
Backup Client Usage
|
||||
===================
|
||||
|
||||
The command line client is called :command:`proxmox-backup-client`.
|
||||
The command line client for Proxmox Backup Server is called
|
||||
:command:`proxmox-backup-client`.
|
||||
|
||||
.. _client_repository:
|
||||
|
||||
Backup Repository Locations
|
||||
---------------------------
|
||||
|
||||
The client uses the following notation to specify a datastore repository
|
||||
on the backup server.
|
||||
The client uses the following format to specify a datastore repository
|
||||
on the backup server (where username is specified in the form of user@realm):
|
||||
|
||||
[[username@]server[:port]:]datastore
|
||||
|
||||
The default value for ``username`` is ``root@pam``. If no server is specified,
|
||||
the default is the local host (``localhost``).
|
||||
|
||||
You can specify a port if your backup server is only reachable on a different
|
||||
port (e.g. with NAT and port forwarding).
|
||||
You can specify a port if your backup server is only reachable on a non-default
|
||||
port (for example, with NAT and port forwarding configurations).
|
||||
|
||||
Note that if the server is an IPv6 address, you have to write it with square
|
||||
Note that if the server uses an IPv6 address, you have to write it with square
|
||||
brackets (for example, `[fe80::01]`).
|
||||
|
||||
You can pass the repository with the ``--repository`` command line option, or
|
||||
by setting the ``PBS_REPOSITORY`` environment variable.
|
||||
|
||||
Here some examples of valid repositories and the real values
|
||||
Below are some examples of valid repositories and their corresponding real
|
||||
values:
|
||||
|
||||
================================ ================== ================== ===========
|
||||
Example User Host:Port Datastore
|
||||
@ -46,8 +48,8 @@ Environment Variables
|
||||
The default backup repository.
|
||||
|
||||
``PBS_PASSWORD``
|
||||
When set, this value is used for the password required for the backup server.
|
||||
You can also set this to a API token secret.
|
||||
When set, this value is used as the password for the backup server.
|
||||
You can also set this to an API token secret.
|
||||
|
||||
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
||||
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
||||
@ -63,15 +65,14 @@ Environment Variables
|
||||
a file name or from the `stdout` of a command, respectively. The first
|
||||
defined environment variable from the order above is preferred.
|
||||
|
||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||
certificate (only used if the system CA certificates cannot validate the
|
||||
certificate).
|
||||
``PBS_FINGERPRINT``
|
||||
When set, this value is used to verify the server certificate (only used if
|
||||
the system CA certificates cannot validate the certificate).
|
||||
|
||||
|
||||
.. Note:: Passwords must be valid UTF8 an may not contain
|
||||
newlines. For your convienience, we just use the first line as
|
||||
password, so you can add arbitrary comments after the
|
||||
first newline.
|
||||
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
|
||||
convienience, Proxmox Backup Server only uses the first line as password, so
|
||||
you can add arbitrary comments after the first newline.
|
||||
|
||||
|
||||
Output Format
|
||||
@ -86,14 +87,15 @@ Creating Backups
|
||||
----------------
|
||||
|
||||
This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain
|
||||
file and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||
.. Note:: If you want to backup virtual machines or containers on Proxmox VE,
|
||||
see :ref:`pve-integration`.
|
||||
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
In the following examples we use ``backup-server:store1``.
|
||||
For the following example, you need to have a backup server set up, have working
|
||||
credentials, and know the repository name.
|
||||
In the following examples, we use ``backup-server:store1``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -107,12 +109,12 @@ In the following examples we use ``backup-server:store1``.
|
||||
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
||||
End Time: 2019-12-03T10:36:29+01:00
|
||||
|
||||
This will prompt you for a password and then uploads a file archive named
|
||||
This will prompt you for a password, then upload a file archive named
|
||||
``root.pxar`` containing all the files in the ``/`` directory.
|
||||
|
||||
.. Caution:: Please note that the proxmox-backup-client does not
|
||||
.. Caution:: Please note that proxmox-backup-client does not
|
||||
automatically include mount points. Instead, you will see a short
|
||||
``skip mount point`` notice for each of them. The idea is to
|
||||
``skip mount point`` message for each of them. The idea is to
|
||||
create a separate file archive for each mounted disk. You can
|
||||
explicitly include them using the ``--include-dev`` option
|
||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||
@ -120,18 +122,18 @@ This will prompt you for a password and then uploads a file archive named
|
||||
|
||||
The ``--repository`` option can get quite long and is used by all
|
||||
commands. You can avoid having to enter this value by setting the
|
||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
||||
over multiple sessions, you should instead add the below line to your
|
||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to
|
||||
remain set over multiple sessions, you should instead add the below line to your
|
||||
``.bashrc`` file.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# export PBS_REPOSITORY=backup-server:store1
|
||||
|
||||
After this you can execute all commands without specifying the ``--repository``
|
||||
option.
|
||||
After this, you can execute all commands without having to specify the
|
||||
``--repository`` option.
|
||||
|
||||
One single backup is allowed to contain more than one archive. For example, if
|
||||
A single backup is allowed to contain more than one archive. For example, if
|
||||
you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
||||
|
||||
.. code-block:: console
|
||||
@ -146,26 +148,26 @@ archive source at the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
||||
device images. To create a backup of a block device run the following command:
|
||||
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||
device images. To create a backup of a block device, run the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
||||
|
||||
|
||||
Excluding files/folders from a backup
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Excluding Files/Directories from a Backup
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||
Sometimes it is desired to exclude certain files or directories from a backup archive.
|
||||
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||
place a text file named ``.pxarexclude`` in the filesystem hierarchy.
|
||||
Whenever the backup client encounters such a file in a directory, it interprets
|
||||
each line as glob match patterns for files and directories that are to be excluded
|
||||
each line as a glob match pattern for files and directories that are to be excluded
|
||||
from the backup.
|
||||
|
||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, which indicates a comment.
|
||||
The file must contain a single glob pattern per line. Empty lines and lines
|
||||
starting with ``#`` (indicating a comment) are ignored.
|
||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||
directory except for a few single files/subdirectories.
|
||||
@ -176,23 +178,24 @@ the given patterns. It is only possible to match files in this directory and its
|
||||
``\`` is used to escape special glob characters.
|
||||
``?`` matches any single character.
|
||||
``*`` matches any character, including an empty string.
|
||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||
following pattern ``**/*.tmp``.
|
||||
``**`` is used to match current directory and subdirectories. For example, with
|
||||
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
|
||||
a directory and its subdirectories.
|
||||
``[...]`` matches a single character from any of the provided characters within
|
||||
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||
not contained within the brackets. It is also possible to specify ranges with two
|
||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||
alphabetic character, and ``[0-9]`` matches any single digit.
|
||||
|
||||
The order of the glob match patterns defines whether a file is included or
|
||||
excluded, that is to say later entries override previous ones.
|
||||
excluded, that is to say, later entries override earlier ones.
|
||||
This is also true for match patterns encountered deeper down the directory tree,
|
||||
which can override a previous exclusion.
|
||||
Be aware that excluded directories will **not** be read by the backup client.
|
||||
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||
backup archive.
|
||||
|
||||
.. Note:: Excluded directories will **not** be read by the backup client. Thus,
|
||||
a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in
|
||||
the backup archive.
|
||||
|
||||
For example, consider the following directory structure:
|
||||
|
||||
@ -280,7 +283,7 @@ You can avoid entering the passwords by setting the environment
|
||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
|
||||
|
||||
Using a master key to store and recover encryption keys
|
||||
Using a Master Key to Store and Recover Encryption Keys
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||
@ -360,7 +363,7 @@ To set up a master key:
|
||||
keep keys ordered and in a place that is separate from the contents being
|
||||
backed up. It can happen, for example, that you back up an entire system, using
|
||||
a key on that system. If the system then becomes inaccessible for any reason
|
||||
and needs to be restored, this will not be possible as the encryption key will be
|
||||
and needs to be restored, this will not be possible, as the encryption key will be
|
||||
lost along with the broken system.
|
||||
|
||||
It is recommended that you keep your master key safe, but easily accessible, in
|
||||
@ -382,10 +385,10 @@ version of your master key. The following command sends the output of the
|
||||
Restoring Data
|
||||
--------------
|
||||
|
||||
The regular creation of backups is a necessary step to avoiding data
|
||||
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in
|
||||
case of problems.
|
||||
The regular creation of backups is a necessary step in avoiding data loss. More
|
||||
importantly, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in case of
|
||||
disaster.
|
||||
|
||||
First, you need to find the snapshot which you want to restore. The snapshot
|
||||
list command provides a list of all the snapshots on the server:
|
||||
@ -444,23 +447,22 @@ to use the interactive recovery shell.
|
||||
|
||||
The interactive recovery shell is a minimal command line interface that
|
||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||
search files in a file archive.
|
||||
search for files in a file archive.
|
||||
To restore files, you can select them individually or match them with a glob
|
||||
pattern.
|
||||
|
||||
Using the catalog for navigation reduces the overhead considerably because only
|
||||
the catalog needs to be downloaded and, optionally, decrypted.
|
||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
||||
or for the actual restore.
|
||||
The actual chunks are only accessed if the metadata in the catalog is
|
||||
insufficient or for the actual restore.
|
||||
|
||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
||||
Similar to common UNIX shells, ``cd`` and ``ls`` are the commands used to change
|
||||
working directory and list directory contents in the archive.
|
||||
``pwd`` shows the full path of the current working directory with respect to the
|
||||
archive root.
|
||||
|
||||
Being able to quickly search the contents of the archive is a commonly needed feature.
|
||||
That's where the catalog is most valuable.
|
||||
For example:
|
||||
The ability to quickly search the contents of the archive is a commonly required
|
||||
feature. That's where the catalog is most valuable. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -471,8 +473,8 @@ For example:
|
||||
pxar:/ > restore-selected /target/path
|
||||
...
|
||||
|
||||
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
|
||||
subdirectory and add the corresponding pattern to the list for subsequent restores.
|
||||
This will find and print all files ending in ``.txt`` located in ``etc/`` or its
|
||||
subdirectories, and add the corresponding pattern to the list for subsequent restores.
|
||||
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||
host. This will scan the whole archive.
|
||||
@ -513,7 +515,7 @@ This allows you to access the full contents of the archive in a seamless manner.
|
||||
load on your host, depending on the operations you perform on the mounted
|
||||
filesystem.
|
||||
|
||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
To unmount the filesystem, use the ``umount`` command on the mount point:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -522,7 +524,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
Login and Logout
|
||||
----------------
|
||||
|
||||
The client tool prompts you to enter the logon password as soon as you
|
||||
The client tool prompts you to enter the login password as soon as you
|
||||
want to access the backup server. The server checks your credentials
|
||||
and responds with a ticket that is valid for two hours. The client
|
||||
tool automatically stores that ticket and uses it for further requests
|
||||
@ -652,6 +654,25 @@ shows the list of existing snapshots and what actions prune would take.
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`client_garbage-collection`.
|
||||
|
||||
It is also possible to protect single snapshots from being pruned or deleted:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client snapshot protected update <snapshot> true
|
||||
|
||||
This will set the protected flag on the snapshot and prevent pruning or manual
|
||||
deletion of this snapshot untilt he flag is removed again with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client snapshot protected update <snapshot> false
|
||||
|
||||
When a group is with a protected snapshot is deleted, only the non-protected
|
||||
ones are removed and the group will remain.
|
||||
|
||||
.. note:: This flag will not be synced when using pull or sync jobs. If you
|
||||
want to protect a synced snapshot, you have to manually to this again on
|
||||
the target backup server.
|
||||
|
||||
.. _client_garbage-collection:
|
||||
|
||||
@ -677,7 +698,7 @@ unused data blocks are removed.
|
||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||
by default. This results in a better performance by only updating the
|
||||
``atime`` property if the last access has been at least 24 hours ago. The
|
||||
downside is, that touching a chunk within these 24 hours will not always
|
||||
downside is that touching a chunk within these 24 hours will not always
|
||||
update its ``atime`` property.
|
||||
|
||||
Chunks in the grace period will be logged at the end of the garbage
|
||||
@ -701,8 +722,8 @@ unused data blocks are removed.
|
||||
Average chunk size: 2486565
|
||||
TASK OK
|
||||
|
||||
|
||||
.. todo:: howto run garbage-collection at regular intervals (cron)
|
||||
Garbage collection can also be scheduled using ``promxox-backup-manager`` or
|
||||
from the Proxmox Backup Server's web interface.
|
||||
|
||||
Benchmarking
|
||||
------------
|
||||
|
@ -1,10 +1,10 @@
|
||||
Backup Protocol
|
||||
===============
|
||||
|
||||
Proxmox Backup Server uses a REST based API. While the management
|
||||
interface use normal HTTP, the actual backup and restore interface use
|
||||
Proxmox Backup Server uses a REST-based API. While the management
|
||||
interface uses normal HTTP, the actual backup and restore interface uses
|
||||
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
|
||||
standards, so the following section assumes that you are familiar on
|
||||
standards, so the following section assumes that you are familiar with
|
||||
how to use them.
|
||||
|
||||
|
||||
@ -13,35 +13,35 @@ Backup Protocol API
|
||||
|
||||
To start a new backup, the API call ``GET /api2/json/backup`` needs to
|
||||
be upgraded to a HTTP/2 connection using
|
||||
``proxmox-backup-protocol-v1`` as protocol name::
|
||||
``proxmox-backup-protocol-v1`` as the protocol name::
|
||||
|
||||
GET /api2/json/backup HTTP/1.1
|
||||
UPGRADE: proxmox-backup-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
The server replies with the ``HTTP 101 Switching Protocol`` status code,
|
||||
and you can then issue REST commands on the updated HTTP/2 connection.
|
||||
|
||||
The backup protocol allows you to upload three different kind of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
- Fixed indexes (List of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
- Dynamic indexes (List of chunks with variable size)
|
||||
|
||||
The following section gives a short introduction how to upload such
|
||||
The following section provides a short introduction on how to upload such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for
|
||||
details about available REST commands.
|
||||
details about the available REST commands.
|
||||
|
||||
|
||||
Upload Blobs
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
Blobs are uploaded using ``POST /blob``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||
|
||||
The file name needs to end with ``.blob``, and is automatically added
|
||||
to the backup manifest.
|
||||
The file name must end with ``.blob``, and is automatically added
|
||||
to the backup manifest, following the call to ``POST /finish``.
|
||||
|
||||
|
||||
Upload Chunks
|
||||
@ -56,40 +56,41 @@ encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
Upload Fixed Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Fixed indexes are use to store VM image data. The VM image is split
|
||||
Fixed indexes are used to store VM image data. The VM image is split
|
||||
into equally sized chunks, which are uploaded individually. The index
|
||||
file simply contains a list to chunk digests.
|
||||
file simply contains a list of chunk digests.
|
||||
|
||||
You create a fixed index with ``POST /fixed_index``. Then upload
|
||||
You create a fixed index with ``POST /fixed_index``. Then, upload
|
||||
chunks with ``POST /fixed_chunk``, and append them to the index with
|
||||
``PUT /fixed_index``. When finished, you need to close the index using
|
||||
``POST /fixed_close``.
|
||||
|
||||
The file name needs to end with ``.fidx``, and is automatically added
|
||||
to the backup manifest.
|
||||
to the backup manifest, following the call to ``POST /finish``.
|
||||
|
||||
|
||||
Upload Dynamic Indexes
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Dynamic indexes are use to store file archive data. The archive data
|
||||
Dynamic indexes are used to store file archive data. The archive data
|
||||
is split into dynamically sized chunks, which are uploaded
|
||||
individually. The index file simply contains a list to chunk digests
|
||||
individually. The index file simply contains a list of chunk digests
|
||||
and offsets.
|
||||
|
||||
You create a dynamic sized index with ``POST /dynamic_index``. Then
|
||||
You can create a dynamically sized index with ``POST /dynamic_index``. Then,
|
||||
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
|
||||
``PUT /dynamic_index``. When finished, you need to close the index using
|
||||
``POST /dynamic_close``.
|
||||
|
||||
The filename needs to end with ``.didx``, and is automatically added
|
||||
to the backup manifest.
|
||||
to the backup manifest, following the call to ``POST /finish``.
|
||||
|
||||
|
||||
Finish Backup
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Once you have uploaded all data, you need to call ``POST
|
||||
/finish``. This commits all data and ends the backup protocol.
|
||||
Once you have uploaded all data, you need to call ``POST /finish``. This
|
||||
commits all data and ends the backup protocol.
|
||||
|
||||
|
||||
Restore/Reader Protocol API
|
||||
@ -102,39 +103,39 @@ be upgraded to a HTTP/2 connection using
|
||||
GET /api2/json/reader HTTP/1.1
|
||||
UPGRADE: proxmox-backup-reader-protocol-v1
|
||||
|
||||
The server replies with HTTP 101 Switching Protocol status code,
|
||||
The server replies with the ``HTTP 101 Switching Protocol`` status code,
|
||||
and you can then issue REST commands on that updated HTTP/2 connection.
|
||||
|
||||
The reader protocol allows you to download three different kind of files:
|
||||
The reader protocol allows you to download three different kinds of files:
|
||||
|
||||
- Chunks and blobs (binary data)
|
||||
|
||||
- Fixed Indexes (List of chunks with fixed size)
|
||||
- Fixed indexes (list of chunks with fixed size)
|
||||
|
||||
- Dynamic Indexes (List of chunk with variable size)
|
||||
- Dynamic indexes (list of chunks with variable size)
|
||||
|
||||
The following section gives a short introduction how to download such
|
||||
The following section provides a short introduction on how to download such
|
||||
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
|
||||
available REST commands.
|
||||
the available REST commands.
|
||||
|
||||
|
||||
Download Blobs
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Downloading blobs is done using ``GET /download``. The HTTP body contains the
|
||||
Blobs are downloaded using ``GET /download``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||
|
||||
|
||||
Download Chunks
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`).
|
||||
Chunks are downloaded using ``GET /chunk``. The HTTP body contains the
|
||||
data encoded as :ref:`Data Blob <data-blob-format>`.
|
||||
|
||||
|
||||
Download Index Files
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Downloading index files is done using ``GET /download``. The HTTP body
|
||||
Index files are downloaded using ``GET /download``. The HTTP body
|
||||
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
|
||||
or :ref:`Dynamic Index <dynamic-index-format>`.
|
||||
|
@ -37,7 +37,7 @@ Each field can contain multiple values in the following formats:
|
||||
* and a combination of the above: e.g., 01,05..10,12/02
|
||||
* or a `*` for every possible value: e.g., \*:00
|
||||
|
||||
There are some special values that have specific meaning:
|
||||
There are some special values that have a specific meaning:
|
||||
|
||||
================================= ==============================
|
||||
Value Syntax
|
||||
@ -81,19 +81,19 @@ Not all features of systemd calendar events are implemented:
|
||||
|
||||
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||
a specific point in time
|
||||
* no timezone: all schedules use the set timezone on the server
|
||||
* no timezone: all schedules use the timezone of the server
|
||||
* no sub-second resolution
|
||||
* no reverse day syntax (e.g. 2020-03~01)
|
||||
* no repetition of ranges (e.g. 1..10/2)
|
||||
|
||||
Notes on scheduling
|
||||
Notes on Scheduling
|
||||
-------------------
|
||||
|
||||
In `Proxmox Backup`_ scheduling for most tasks is done in the
|
||||
In `Proxmox Backup`_, scheduling for most tasks is done in the
|
||||
`proxmox-backup-proxy`. This daemon checks all job schedules
|
||||
if they are due every minute. This means that even if
|
||||
every minute, to see if any are due. This means that even though
|
||||
`calendar events` can contain seconds, it will only be checked
|
||||
once a minute.
|
||||
once per minute.
|
||||
|
||||
Also, all schedules will be checked against the timezone set
|
||||
in the `Proxmox Backup`_ server.
|
||||
|
@ -10,7 +10,7 @@ Command Syntax
|
||||
Catalog Shell Commands
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Those command are available when you start an interactive restore shell:
|
||||
The following commands are available in an interactive restore shell:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -2,13 +2,13 @@ This file contains the access control list for the Proxmox Backup
|
||||
Server API.
|
||||
|
||||
Each line starts with ``acl:``, followed by 4 additional values
|
||||
separated by collon.
|
||||
separated by colon.
|
||||
|
||||
:propagate: Propagate permissions down the hierachrchy
|
||||
:propagate: Propagate permissions down the hierarchy
|
||||
|
||||
:path: The object path
|
||||
|
||||
:User/Token: List of users and token
|
||||
:User/Token: List of users and tokens
|
||||
|
||||
:Role: List of assigned roles
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
The file contains a list of datastore configuration sections. Each
|
||||
section starts with a header ``datastore: <name>``, followed by the
|
||||
This file contains a list of datastore configuration sections. Each
|
||||
section starts with the header ``datastore: <name>``, followed by the
|
||||
datastore configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,4 +1,4 @@
|
||||
Each entry starts with a header ``pool: <name>``, followed by the
|
||||
Each entry starts with the header ``pool: <name>``, followed by the
|
||||
media pool configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,6 +1,6 @@
|
||||
This file contains information used to access remote servers.
|
||||
|
||||
Each entry starts with a header ``remote: <name>``, followed by the
|
||||
Each entry starts with the header ``remote: <name>``, followed by the
|
||||
remote configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,4 +1,4 @@
|
||||
Each entry starts with a header ``sync: <name>``, followed by the
|
||||
Each entry starts with the header ``sync: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,4 +1,4 @@
|
||||
Each entry starts with a header ``backup: <name>``, followed by the
|
||||
Each entry starts with the header ``backup: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,7 +1,7 @@
|
||||
Each LTO drive configuration section starts with a header ``lto: <name>``,
|
||||
Each LTO drive configuration section starts with the header ``lto: <name>``,
|
||||
followed by the drive configuration options.
|
||||
|
||||
Tape changer configurations starts with ``changer: <name>``,
|
||||
Tape changer configurations start with the header ``changer: <name>``,
|
||||
followed by the changer configuration options.
|
||||
|
||||
::
|
||||
@ -18,5 +18,5 @@ followed by the changer configuration options.
|
||||
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
|
||||
commands to manipulate this file.
|
||||
|
||||
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
|
||||
.. NOTE:: The ``virtual:`` drive type is experimental and should only be used
|
||||
for debugging.
|
||||
|
@ -1,9 +1,9 @@
|
||||
This file contains the list of API users and API tokens.
|
||||
|
||||
Each user configuration section starts with a header ``user: <name>``,
|
||||
Each user configuration section starts with the header ``user: <name>``,
|
||||
followed by the user configuration options.
|
||||
|
||||
API token configuration starts with a header ``token:
|
||||
API token configuration starts with the header ``token:
|
||||
<userid!token_name>``, followed by the token configuration. The data
|
||||
used to authenticate tokens is stored in a separate file
|
||||
(``token.shadow``).
|
||||
|
@ -1,4 +1,4 @@
|
||||
Each entry starts with a header ``verification: <name>``, followed by the
|
||||
Each entry starts with the header ``verification: <name>``, followed by the
|
||||
job configuration options.
|
||||
|
||||
::
|
||||
|
@ -1,7 +1,7 @@
|
||||
Configuration Files
|
||||
===================
|
||||
|
||||
All Proxmox Backup Server configuration files resides inside directory
|
||||
All Proxmox Backup Server configuration files reside in the directory
|
||||
``/etc/proxmox-backup/``.
|
||||
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. FIXME
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
@ -23,6 +22,7 @@
|
||||
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
|
||||
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
|
||||
.. _QEMU: https://www.qemu.org/
|
||||
.. _LXC: https://linuxcontainers.org/lxc/introduction/
|
||||
|
||||
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
|
||||
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption
|
||||
|
@ -69,6 +69,6 @@ be able to read the data.
|
||||
Is the backup incremental/deduplicated?
|
||||
---------------------------------------
|
||||
|
||||
With Proxmox Backup Server, backups are sent incremental and data is
|
||||
deduplicated on the server.
|
||||
This minimizes both the storage consumed and the network impact.
|
||||
With Proxmox Backup Server, backups are sent incrementally to the server, and
|
||||
data is then deduplicated on the server. This minimizes both the storage
|
||||
consumed and the impact on the network.
|
||||
|
@ -14,7 +14,8 @@ Proxmox File Archive Format (``.pxar``)
|
||||
Data Blob Format (``.blob``)
|
||||
----------------------------
|
||||
|
||||
The data blob format is used to store small binary data. The magic number decides the exact format:
|
||||
The data blob format is used to store small binary data. The magic number
|
||||
decides the exact format:
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
@ -32,7 +33,8 @@ The data blob format is used to store small binary data. The magic number decide
|
||||
- encrypted
|
||||
- compressed
|
||||
|
||||
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
|
||||
The compression algorithm used is ``zstd``. The encryption cipher is
|
||||
``AES_256_GCM``.
|
||||
|
||||
Unencrypted blobs use the following format:
|
||||
|
||||
@ -43,9 +45,9 @@ Unencrypted blobs use the following format:
|
||||
* - ``CRC32: [u8; 4]``
|
||||
* - ``Data: (max 16MiB)``
|
||||
|
||||
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
|
||||
byte Authenticated Encyryption (AE) tag, followed by the encrypted
|
||||
data:
|
||||
Encrypted blobs additionally contain a 16 byte initialization vector (IV),
|
||||
followed by a 16 byte authenticated encryption (AE) tag, followed by the
|
||||
encrypted data:
|
||||
|
||||
.. list-table::
|
||||
|
||||
@ -72,19 +74,19 @@ All numbers are stored as little-endian.
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||
- SHA-256 over the index (without header) ``SHA256(digest1||digest2||...)``
|
||||
* - ``size: u64``,
|
||||
- Image size
|
||||
* - ``chunk_size: u64``,
|
||||
- Chunk size
|
||||
* - ``reserved: [u8; 4016]``,
|
||||
- overall header size is one page (4096 bytes)
|
||||
- Overall header size is one page (4096 bytes)
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
- First chunk digest
|
||||
* - ``digest2: [u8; 32]``
|
||||
- next chunk
|
||||
- Second chunk digest
|
||||
* - ...
|
||||
- next chunk ...
|
||||
- Next chunk digest ...
|
||||
|
||||
|
||||
.. _dynamic-index-format:
|
||||
@ -103,16 +105,16 @@ All numbers are stored as little-endian.
|
||||
* - ``ctime: i64``,
|
||||
- Creation Time (epoch)
|
||||
* - ``index_csum: [u8; 32]``,
|
||||
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
- SHA-256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
* - ``reserved: [u8; 4032]``,
|
||||
- Overall header size is one page (4096 bytes)
|
||||
* - ``offset1: u64``
|
||||
- End of first chunk
|
||||
* - ``digest1: [u8; 32]``
|
||||
- first chunk digest
|
||||
- First chunk digest
|
||||
* - ``offset2: u64``
|
||||
- End of second chunk
|
||||
* - ``digest2: [u8; 32]``
|
||||
- second chunk digest
|
||||
- Second chunk digest
|
||||
* - ...
|
||||
- next chunk offset/digest
|
||||
- Next chunk offset/digest
|
||||
|
@ -11,7 +11,7 @@ Glossary
|
||||
`Container`_
|
||||
|
||||
A container is an isolated user space. Programs run directly on
|
||||
the host's kernel, but with limited access to the host resources.
|
||||
the host's kernel, but with limited access to the host's resources.
|
||||
|
||||
Datastore
|
||||
|
||||
@ -23,19 +23,19 @@ Glossary
|
||||
Rust is a new, fast and memory-efficient system programming
|
||||
language. It has no runtime or garbage collector. Rust’s rich type
|
||||
system and ownership model guarantee memory-safety and
|
||||
thread-safety. I can eliminate many classes of bugs
|
||||
thread-safety. This can eliminate many classes of bugs
|
||||
at compile-time.
|
||||
|
||||
`Sphinx`_
|
||||
|
||||
Is a tool that makes it easy to create intelligent and
|
||||
beautiful documentation. It was originally created for the
|
||||
documentation of the Python programming language. It has excellent facilities for the
|
||||
Is a tool that makes it easy to create intelligent and nicely formatted
|
||||
documentation. It was originally created for the documentation of the
|
||||
Python programming language. It has excellent facilities for the
|
||||
documentation of software projects in a range of languages.
|
||||
|
||||
`reStructuredText`_
|
||||
|
||||
Is an easy-to-read, what-you-see-is-what-you-get plaintext
|
||||
Is an easy-to-read, what-you-see-is-what-you-get, plaintext
|
||||
markup syntax and parser system.
|
||||
|
||||
`FUSE`
|
||||
|
52
docs/gui.rst
52
docs/gui.rst
@ -8,8 +8,9 @@ tools. The web interface also provides a built-in console, so if you prefer the
|
||||
command line or need some extra control, you have this option.
|
||||
|
||||
The web interface can be accessed via https://youripaddress:8007. The default
|
||||
login is `root`, and the password is the one specified during the installation
|
||||
process.
|
||||
login is `root`, and the password is either the one specified during the
|
||||
installation process or the password of the root user, in case of installation
|
||||
on top of Debian.
|
||||
|
||||
|
||||
Features
|
||||
@ -48,12 +49,13 @@ GUI Overview
|
||||
|
||||
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||
|
||||
* **Header**: At the top. This shows version information, and contains buttons to view
|
||||
documentation, monitor running tasks, set the language and logout.
|
||||
* **Sidebar**: On the left. This contains the configuration options for
|
||||
* **Header**: At the top. This shows version information and contains buttons to
|
||||
view documentation, monitor running tasks, set the language, configure various
|
||||
display settings, and logout.
|
||||
* **Sidebar**: On the left. This contains the administration options for
|
||||
the server.
|
||||
* **Configuration Panel**: In the center. This contains the control interface for the
|
||||
configuration options in the *Sidebar*.
|
||||
* **Configuration Panel**: In the center. This contains the respective control
|
||||
interfaces for the administration options in the *Sidebar*.
|
||||
|
||||
|
||||
Sidebar
|
||||
@ -74,12 +76,14 @@ previous and currently running tasks, and subscription information.
|
||||
Configuration
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
The Configuration section contains some system configuration options, such as
|
||||
time and network configuration. It also contains the following subsections:
|
||||
The Configuration section contains some system options, such as time, network,
|
||||
WebAuthn, and HTTP proxy configuration. It also contains the following
|
||||
subsections:
|
||||
|
||||
* **Access Control**: Add and manage users, API tokens, and the permissions
|
||||
associated with these items
|
||||
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||
* **Certificates**: Manage ACME accounts and create SSL certificates.
|
||||
* **Subscription**: Upload a subscription key, view subscription status and
|
||||
access a text-based system report.
|
||||
|
||||
@ -98,6 +102,7 @@ tasks and information. These are:
|
||||
resource usage statistics
|
||||
* **Services**: Manage and monitor system services
|
||||
* **Updates**: An interface for upgrading packages
|
||||
* **Repositories**: An interface for configuring APT repositories
|
||||
* **Syslog**: View log messages from the server
|
||||
* **Tasks**: Task history with multiple filter options
|
||||
|
||||
@ -119,11 +124,20 @@ Tape Backup
|
||||
:align: right
|
||||
:alt: Tape Backup: Tape changer overview
|
||||
|
||||
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
||||
inventories, drives, changers and the tape backup jobs itself.
|
||||
The `Tape Backup`_ section contains a top panel, with options for managing tape
|
||||
media sets, inventories, drives, changers, encryption keys, and the tape backup
|
||||
jobs itself. The tabs are as follows:
|
||||
|
||||
It also contains a subsection per standalone drive and per changer, with a
|
||||
status and management view for those devices.
|
||||
* **Content**: Information on the contents of the tape backup
|
||||
* **Inventory**: Manage the tapes attached to the system
|
||||
* **Changers**: Manage tape loading devices
|
||||
* **Drives**: Manage drives used for reading and writing to tapes
|
||||
* **Media Pools**: Manage logical pools of tapes
|
||||
* **Encryption Keys**: Manage tape backup encryption keys
|
||||
* **Backup Jobs**: Manage tape backup jobs
|
||||
|
||||
The section also contains a subsection per standalone drive and per changer,
|
||||
with a status and management view for those devices.
|
||||
|
||||
Datastore
|
||||
^^^^^^^^^
|
||||
@ -133,9 +147,9 @@ Datastore
|
||||
:alt: Datastore Configuration
|
||||
|
||||
The Datastore section contains interfaces for creating and managing
|
||||
datastores. It contains a button to create a new datastore on the server, as
|
||||
well as a subsection for each datastore on the system, in which you can use the
|
||||
top panel to view:
|
||||
datastores. It also contains a button for creating a new datastore on the
|
||||
server, as well as a subsection for each datastore on the system, in which you
|
||||
can use the top panel to view:
|
||||
|
||||
* **Summary**: Access a range of datastore usage statistics
|
||||
* **Content**: Information on the datastore's backup groups and their respective
|
||||
@ -144,5 +158,7 @@ top panel to view:
|
||||
collection <client_garbage-collection>` operations, and run garbage collection
|
||||
manually
|
||||
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
||||
datastore
|
||||
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs
|
||||
on the datastore
|
||||
* **Options**: Configure notification and verification settings
|
||||
* **Permissions**: Manage permissions on the datastore
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 24 KiB |
BIN
docs/images/screenshots/pbs-gui-traffic-control-add.png
Normal file
BIN
docs/images/screenshots/pbs-gui-traffic-control-add.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 32 KiB |
@ -19,24 +19,24 @@ for various management tasks such as disk management.
|
||||
`Proxmox Backup`_ without the server part.
|
||||
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||
as well as all necessary packages for the `Proxmox Backup`_ Server.
|
||||
|
||||
The installer will guide you through the setup process and allow
|
||||
you to partition the local disk(s), apply basic system configurations
|
||||
(e.g. timezone, language, network), and install all required packages.
|
||||
you to partition the local disk(s), apply basic system configuration
|
||||
(for example timezone, language, network), and install all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
|
||||
Alternatively, `Proxmox Backup`_ server can be installed on top of an
|
||||
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
|
||||
existing Debian system.
|
||||
|
||||
Install `Proxmox Backup`_ with the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Install `Proxmox Backup`_ Server using the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Download the ISO from |DOWNLOADS|.
|
||||
It includes the following:
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
* The `Proxmox Backup`_ Server installer, which partitions the local
|
||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
@ -63,7 +63,7 @@ standard Debian installation. After configuring the
|
||||
# apt-get update
|
||||
# apt-get install proxmox-backup-server
|
||||
|
||||
The commands above keep the current (Debian) kernel and install a minimal
|
||||
The above commands keep the current (Debian) kernel and install a minimal
|
||||
set of required packages.
|
||||
|
||||
If you want to install the same set of packages as the installer
|
||||
|
@ -4,15 +4,15 @@ Introduction
|
||||
What is Proxmox Backup Server?
|
||||
------------------------------
|
||||
|
||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||
Proxmox Backup Server is an enterprise-class, client-server backup solution that
|
||||
is capable of backing up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||
platform and allows you to back up your data securely, even between remote
|
||||
sites, providing easy management with a web-based user interface.
|
||||
sites, providing easy management through a web-based user interface.
|
||||
|
||||
It supports deduplication, compression, and authenticated
|
||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees
|
||||
high performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
Proxmox Backup uses state of the art cryptography for both client-server
|
||||
communication and backup content :ref:`encryption <client_encryption>`. All
|
||||
@ -28,22 +28,23 @@ Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||
backup data and provides an API to create and manage datastores. With the
|
||||
API, it's also possible to manage disks and other server-side resources.
|
||||
|
||||
The backup client uses this API to access the backed up data. With the command
|
||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||
The backup client uses this API to access the backed up data. You can use the
|
||||
``proxmox-backup-client`` command line tool to create and restore file backups.
|
||||
For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
|
||||
integrated client.
|
||||
|
||||
A single backup is allowed to contain several archives. For example, when you
|
||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||
inside that backup. The VM configuration itself is stored as an extra file.
|
||||
This way, it's easy to access and restore only important parts of the backup,
|
||||
without the need to scan the whole backup.
|
||||
This way, it's easy to access and restore only the important parts of the
|
||||
backup, without the need to scan the whole backup.
|
||||
|
||||
|
||||
Main Features
|
||||
-------------
|
||||
|
||||
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||
supported and you can easily backup :term:`virtual machine`\ s and
|
||||
supported, and you can easily backup :term:`virtual machine`\ s and
|
||||
:term:`container`\ s.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
@ -70,6 +71,10 @@ Main Features
|
||||
modern hardware. In addition to client-side encryption, all data is
|
||||
transferred via a secure TLS connection.
|
||||
|
||||
:Tape backup: For long-term archiving of data, Proxmox Backup Server also
|
||||
provides extensive support for backing up to tape and managing tape
|
||||
libraries.
|
||||
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||
user interface.
|
||||
|
||||
@ -80,7 +85,7 @@ Main Features
|
||||
backup-clients.
|
||||
|
||||
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||
form of `Proxmox Backup Server Subscription Plans
|
||||
the form of `Proxmox Backup Server Subscription Plans
|
||||
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||
@ -173,7 +178,7 @@ Bug Tracker
|
||||
~~~~~~~~~~~
|
||||
|
||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||
issue appears, file your report there. An issue can be a bug as well as a
|
||||
issue appears, file your report there. An issue can be a bug, as well as a
|
||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||
of the issue and will send a notification once it has been solved.
|
||||
|
||||
@ -224,5 +229,6 @@ requirements.
|
||||
|
||||
In July 2020, we released the first beta version of Proxmox Backup
|
||||
Server, followed by the first stable version in November 2020. With support for
|
||||
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
||||
network load and saves valuable storage space.
|
||||
encryption and incremental, fully deduplicated backups, Proxmox Backup offers a
|
||||
secure environment, which significantly reduces network load and saves valuable
|
||||
storage space.
|
||||
|
@ -4,17 +4,17 @@
|
||||
ZFS on Linux
|
||||
------------
|
||||
|
||||
ZFS is a combined file system and logical volume manager designed by
|
||||
ZFS is a combined file system and logical volume manager, designed by
|
||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||
packages are included.
|
||||
|
||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||
low budget hardware, but also high performance systems by leveraging
|
||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||
hardware raid cards by moderate CPU and memory load combined with easy
|
||||
low budget hardware, and also high performance systems by leveraging
|
||||
SSD caching or even SSD only setups. ZFS can replace expensive
|
||||
hardware raid cards with moderate CPU and memory load, combined with easy
|
||||
management.
|
||||
|
||||
General ZFS advantages
|
||||
General advantages of ZFS:
|
||||
|
||||
* Easy configuration and management with GUI and CLI.
|
||||
* Reliable
|
||||
@ -34,18 +34,18 @@ General ZFS advantages
|
||||
Hardware
|
||||
~~~~~~~~~
|
||||
|
||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||
practice, use as much you can get for your hardware/budget. To prevent
|
||||
ZFS depends heavily on memory, so it's recommended to have at least 8GB to
|
||||
start. In practice, use as much you can get for your hardware/budget. To prevent
|
||||
data corruption, we recommend the use of high quality ECC RAM.
|
||||
|
||||
If you use a dedicated cache and/or log disk, you should use an
|
||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||
enterprise class SSD (for example, Intel SSD DC S3700 Series). This can
|
||||
increase the overall performance significantly.
|
||||
|
||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||
IMPORTANT: Do not use ZFS on top of a hardware controller which has its
|
||||
own cache management. ZFS needs to directly communicate with disks. An
|
||||
HBA adapter is the way to go, or something like LSI controller flashed
|
||||
in ``IT`` mode.
|
||||
HBA adapter or something like an LSI controller flashed in ``IT`` mode is
|
||||
recommended.
|
||||
|
||||
|
||||
ZFS Administration
|
||||
@ -53,7 +53,7 @@ ZFS Administration
|
||||
|
||||
This section gives you some usage examples for common tasks. ZFS
|
||||
itself is really powerful and provides many options. The main commands
|
||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||
to manage ZFS are `zfs` and `zpool`. Both commands come with extensive
|
||||
manual pages, which can be read with:
|
||||
|
||||
.. code-block:: console
|
||||
@ -123,7 +123,7 @@ Create a new pool with cache (L2ARC)
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (use SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
For `<device>`, you can use multiple devices, as is shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
@ -136,7 +136,7 @@ Create a new pool with log (ZIL)
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
For `<device>`, you can use multiple devices, as is shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
@ -146,8 +146,9 @@ As `<device>` it is possible to use more devices, like it's shown in
|
||||
Add cache and log to an existing pool
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have a pool without cache and log. First partition the SSD in
|
||||
2 partition with `parted` or `gdisk`
|
||||
You can add cache and log devices to a pool after its creation. In this example,
|
||||
we will use a single drive for both cache and log. First, you need to create
|
||||
2 partitions on the SSD with `parted` or `gdisk`
|
||||
|
||||
.. important:: Always use GPT partition tables.
|
||||
|
||||
@ -171,12 +172,12 @@ Changing a failed device
|
||||
Changing a failed bootable device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||
as bootloader.
|
||||
Depending on how Proxmox Backup was installed, it is either using `grub` or
|
||||
`systemd-boot` as a bootloader.
|
||||
|
||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||
different steps are needed which depend on the bootloader in use.
|
||||
In either case, the first steps of copying the partition table, reissuing GUIDs
|
||||
and replacing the ZFS partition are the same. To make the system bootable from
|
||||
the new disk, different steps are needed which depend on the bootloader in use.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -207,7 +208,7 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||
# grub-mkconfig -o /path/to/grub.cfg
|
||||
|
||||
|
||||
Activate E-Mail Notification
|
||||
Activate e-mail notification
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
ZFS comes with an event daemon, which monitors events generated by the
|
||||
@ -219,24 +220,24 @@ and you can install it using `apt-get`:
|
||||
|
||||
# apt-get install zfs-zed
|
||||
|
||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
To activate the daemon, it is necessary to to uncomment the ZED_EMAIL_ADDR
|
||||
setting, in the file `/etc/zfs/zed.d/zed.rc`.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
ZED_EMAIL_ADDR="root"
|
||||
|
||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||
Please note that Proxmox Backup forwards mails to `root` to the email address
|
||||
configured for the root user.
|
||||
|
||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||
other settings are optional.
|
||||
|
||||
Limit ZFS Memory Usage
|
||||
Limit ZFS memory usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is good to use at most 50 percent (which is the default) of the
|
||||
system memory for ZFS ARC to prevent performance shortage of the
|
||||
system memory for ZFS ARC, to prevent performance degradation of the
|
||||
host. Use your preferred editor to change the configuration in
|
||||
`/etc/modprobe.d/zfs.conf` and insert:
|
||||
|
||||
@ -244,25 +245,40 @@ host. Use your preferred editor to change the configuration in
|
||||
|
||||
options zfs zfs_arc_max=8589934592
|
||||
|
||||
This example setting limits the usage to 8GB.
|
||||
The above example limits the usage to 8 GiB ('8 * 2^30^').
|
||||
|
||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||
.. IMPORTANT:: In case your desired `zfs_arc_max` value is lower than or equal
|
||||
to `zfs_arc_min` (which defaults to 1/32 of the system memory), `zfs_arc_max`
|
||||
will be ignored. Thus, for it to work in this case, you must set
|
||||
`zfs_arc_min` to at most `zfs_arc_max - 1`. This would require updating the
|
||||
configuration in `/etc/modprobe.d/zfs.conf`, with:
|
||||
|
||||
.. code-block:: console
|
||||
options zfs zfs_arc_min=8589934591
|
||||
options zfs zfs_arc_max=8589934592
|
||||
|
||||
This example setting limits the usage to 8 GiB ('8 * 2^30^') on
|
||||
systems with more than 256 GiB of total memory, where simply setting
|
||||
`zfs_arc_max` alone would not work.
|
||||
|
||||
.. IMPORTANT:: If your root file system is ZFS, you must update your initramfs
|
||||
every time this value changes.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u
|
||||
|
||||
|
||||
SWAP on ZFS
|
||||
Swap on ZFS
|
||||
^^^^^^^^^^^
|
||||
|
||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||
Swap-space created on a zvol may cause some issues, such as blocking the
|
||||
server or generating a high IO load, often seen when starting a Backup
|
||||
to an external Storage.
|
||||
|
||||
We strongly recommend to use enough memory, so that you normally do not
|
||||
We strongly recommend using enough memory, so that you normally do not
|
||||
run into low memory situations. Should you need or want to add swap, it is
|
||||
preferred to create a partition on a physical disk and use it as swap device.
|
||||
preferred to create a partition on a physical disk and use it as a swap device.
|
||||
You can leave some space free for this purpose in the advanced options of the
|
||||
installer. Additionally, you can lower the `swappiness` value.
|
||||
A good value for servers is 10:
|
||||
@ -291,7 +307,7 @@ an editor of your choice and add the following line:
|
||||
vm.swappiness = 100 The kernel will swap aggressively.
|
||||
==================== ===============================================================
|
||||
|
||||
ZFS Compression
|
||||
ZFS compression
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
To activate compression:
|
||||
@ -300,10 +316,11 @@ To activate compression:
|
||||
# zpool set compression=lz4 <pool>
|
||||
|
||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||
I/O performance.
|
||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer from `1-9`
|
||||
representing the compression ratio, where 1 is fastest and 9 is best
|
||||
compression) are also available. Depending on the algorithm and how
|
||||
compressible the data is, having compression enabled can even increase I/O
|
||||
performance.
|
||||
|
||||
You can disable compression at any time with:
|
||||
.. code-block:: console
|
||||
@ -314,26 +331,26 @@ Only new blocks will be affected by this change.
|
||||
|
||||
.. _local_zfs_special_device:
|
||||
|
||||
ZFS Special Device
|
||||
ZFS special device
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||
Since version 0.8.0, ZFS supports `special` devices. A `special` device in a
|
||||
pool is used to store metadata, deduplication tables, and optionally small
|
||||
file blocks.
|
||||
|
||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||
hard disks with a lot of metadata changes. For example workloads that involve
|
||||
hard disks with a lot of metadata changes. For example, workloads that involve
|
||||
creating, updating or deleting a large number of files will benefit from the
|
||||
presence of a `special` device. ZFS datasets can also be configured to store
|
||||
whole small files on the `special` device which can further improve the
|
||||
small files on the `special` device, which can further improve the
|
||||
performance. Use fast SSDs for the `special` device.
|
||||
|
||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||
pool, since the `special` device is a point of failure for the whole pool.
|
||||
pool, since the `special` device is a point of failure for the entire pool.
|
||||
|
||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||
|
||||
Create a pool with `special` device and RAID-1:
|
||||
To create a pool with `special` device and RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -346,8 +363,8 @@ Adding a `special` device to an existing pool with RAID-1:
|
||||
# zpool add <pool> special mirror <device1> <device2>
|
||||
|
||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||
`0` to disable storing small file blocks on the `special` device or a power of
|
||||
two in the range between `512B` to `128K`. After setting the property new file
|
||||
`0` to disable storing small file blocks on the `special` device, or a power of
|
||||
two in the range between `512B` to `128K`. After setting this property, new file
|
||||
blocks smaller than `size` will be allocated on the `special` device.
|
||||
|
||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||
@ -355,10 +372,10 @@ blocks smaller than `size` will be allocated on the `special` device.
|
||||
the `special` device, so be careful!
|
||||
|
||||
Setting the `special_small_blocks` property on a pool will change the default
|
||||
value of that property for all child ZFS datasets (for example all containers
|
||||
value of that property for all child ZFS datasets (for example, all containers
|
||||
in the pool will opt in for small file blocks).
|
||||
|
||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||
Opt in for all files smaller than 4K-blocks pool-wide:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -379,10 +396,15 @@ Opt out from small file blocks for a single dataset:
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Corrupted cachefile
|
||||
Corrupt cache file
|
||||
""""""""""""""""""
|
||||
|
||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||
boot until mounted manually later.
|
||||
`zfs-import-cache.service` imports ZFS pools using the ZFS cache file. If this
|
||||
file becomes corrupted, the service won't be able to import the pools that it's
|
||||
unable to read from it.
|
||||
|
||||
As a result, in case of a corrupted ZFS cache file, some volumes may not be
|
||||
mounted during boot and must be mounted manually later.
|
||||
|
||||
For each pool, run:
|
||||
|
||||
@ -390,16 +412,13 @@ For each pool, run:
|
||||
|
||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||
|
||||
and afterwards update the `initramfs` by running:
|
||||
then, update the `initramfs` by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u -k all
|
||||
|
||||
and finally reboot your node.
|
||||
|
||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||
doesn't import the pools that aren't present in the cachefile.
|
||||
and finally, reboot the node.
|
||||
|
||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||
which searches and imports pools via device scanning (usually slower).
|
||||
|
@ -14,15 +14,15 @@ following retention options are available:
|
||||
|
||||
``keep-hourly <N>``
|
||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||
backup for a single hour, only the latest is kept.
|
||||
backup for a single hour, only the latest is retained.
|
||||
|
||||
``keep-daily <N>``
|
||||
Keep backups for the last ``<N>`` days. If there is more than one
|
||||
backup for a single day, only the latest is kept.
|
||||
backup for a single day, only the latest is retained.
|
||||
|
||||
``keep-weekly <N>``
|
||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||
backup for a single week, only the latest is kept.
|
||||
backup for a single week, only the latest is retained.
|
||||
|
||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and handles weeks at
|
||||
@ -30,17 +30,17 @@ following retention options are available:
|
||||
|
||||
``keep-monthly <N>``
|
||||
Keep backups for the last ``<N>`` months. If there is more than one
|
||||
backup for a single month, only the latest is kept.
|
||||
backup for a single month, only the latest is retained.
|
||||
|
||||
``keep-yearly <N>``
|
||||
Keep backups for the last ``<N>`` years. If there is more than one
|
||||
backup for a single year, only the latest is kept.
|
||||
backup for a single year, only the latest is retained.
|
||||
|
||||
The retention options are processed in the order given above. Each option
|
||||
only covers backups within its time period. The next option does not take care
|
||||
of already covered backups. It will only consider older backups.
|
||||
|
||||
Unfinished and incomplete backups will be removed by the prune command unless
|
||||
Unfinished and incomplete backups will be removed by the prune command, unless
|
||||
they are newer than the last successful backup. In this case, the last failed
|
||||
backup is retained.
|
||||
|
||||
@ -48,7 +48,7 @@ Prune Simulator
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
||||
to explore the effect of different retetion options with various backup
|
||||
to explore the effect of different retention options with various backup
|
||||
schedules.
|
||||
|
||||
Manual Pruning
|
||||
@ -59,10 +59,10 @@ Manual Pruning
|
||||
:align: right
|
||||
:alt: Prune and garbage collection options
|
||||
|
||||
To access pruning functionality for a specific backup group, you can use the
|
||||
prune command line option discussed in :ref:`backup-pruning`, or navigate to
|
||||
the **Content** tab of the datastore and click the scissors icon in the
|
||||
**Actions** column of the relevant backup group.
|
||||
To manually prune a specific backup group, you can use
|
||||
``proxmox-backup-client``'s ``prune`` subcommand, discussed in
|
||||
:ref:`backup-pruning`, or navigate to the **Content** tab of the datastore and
|
||||
click the scissors icon in the **Actions** column of the relevant backup group.
|
||||
|
||||
Prune Schedules
|
||||
^^^^^^^^^^^^^^^
|
||||
@ -81,7 +81,7 @@ Retention Settings Example
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The backup frequency and retention of old backups may depend on how often data
|
||||
changes, and how important an older state may be, in a specific work load.
|
||||
changes and how important an older state may be in a specific workload.
|
||||
When backups act as a company's document archive, there may also be legal
|
||||
requirements for how long backup snapshots must be kept.
|
||||
|
||||
@ -125,8 +125,8 @@ start garbage collection on an entire datastore and the ``status`` subcommand to
|
||||
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
|
||||
|
||||
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||
GC** from the top panel. From here, you can edit the schedule at which garbage
|
||||
collection runs and manually start the operation.
|
||||
GC** from the top panel of a datastore. From here, you can edit the schedule at
|
||||
which garbage collection runs and manually start the operation.
|
||||
|
||||
|
||||
.. _maintenance_verification:
|
||||
@ -139,13 +139,13 @@ Verification
|
||||
:align: right
|
||||
:alt: Adding a verify job
|
||||
|
||||
Proxmox Backup offers various verification options to ensure that backup data is
|
||||
intact. Verification is generally carried out through the creation of verify
|
||||
jobs. These are scheduled tasks that run verification at a given interval (see
|
||||
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
|
||||
snapshots are ignored, as well as set a time period, after which verified jobs
|
||||
are checked again. The interface for creating verify jobs can be found under the
|
||||
**Verify Jobs** tab of the datastore.
|
||||
Proxmox Backup Server offers various verification options to ensure that backup
|
||||
data is intact. Verification is generally carried out through the creation of
|
||||
verify jobs. These are scheduled tasks that run verification at a given interval
|
||||
(see :ref:`calendar-event-scheduling`). With these, you can also set whether
|
||||
already verified snapshots are ignored, as well as set a time period, after
|
||||
which snapshots are checked again. The interface for creating verify jobs can be
|
||||
found under the **Verify Jobs** tab of the datastore.
|
||||
|
||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||
if a previous verification was successful. This is because physical drives
|
||||
@ -158,9 +158,9 @@ are checked again. The interface for creating verify jobs can be found under the
|
||||
data.
|
||||
|
||||
Aside from using verify jobs, you can also run verification manually on entire
|
||||
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
|
||||
tab of the datastore and either click *Verify All*, or select the *V.* icon from
|
||||
the *Actions* column in the table.
|
||||
datastores, backup groups or snapshots. To do this, navigate to the **Content**
|
||||
tab of the datastore and either click *Verify All* or select the *V.* icon from
|
||||
the **Actions** column in the table.
|
||||
|
||||
.. _maintenance_notification:
|
||||
|
||||
@ -170,8 +170,8 @@ Notifications
|
||||
Proxmox Backup Server can send you notification emails about automatically
|
||||
scheduled verification, garbage-collection and synchronization tasks results.
|
||||
|
||||
By default, notifications are send to the email address configured for the
|
||||
`root@pam` user. You can set that user for each datastore.
|
||||
By default, notifications are sent to the email address configured for the
|
||||
`root@pam` user. You can instead set this user for each datastore.
|
||||
|
||||
You can also change the level of notification received per task type, the
|
||||
following options are available:
|
||||
@ -179,6 +179,6 @@ following options are available:
|
||||
* Always: send a notification for any scheduled task, independent of the
|
||||
outcome
|
||||
|
||||
* Errors: send a notification for any scheduled task resulting in an error
|
||||
* Errors: send a notification for any scheduled task that results in an error
|
||||
|
||||
* Never: do not send any notification at all
|
||||
|
@ -17,8 +17,8 @@ configuration information for remotes is stored in the file
|
||||
:align: right
|
||||
:alt: Add a remote
|
||||
|
||||
To add a remote, you need its hostname or IP, a userid and password on the
|
||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||
To add a remote, you need its hostname or IP address, a userid and password on
|
||||
the remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||
|
||||
@ -60,12 +60,13 @@ Sync Jobs
|
||||
|
||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||
a local datastore. You can manage sync jobs in the web interface, from the
|
||||
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
|
||||
the ``proxmox-backup-manager sync-job`` command. The configuration information
|
||||
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
||||
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
||||
After creating a sync job, you can either start it manually from the GUI or
|
||||
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
|
||||
**Sync Jobs** tab of the **Datastore** panel or from that of the Datastore
|
||||
itself. Alternatively, you can manage them with the ``proxmox-backup-manager
|
||||
sync-job`` command. The configuration information for sync jobs is stored at
|
||||
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||
either start it manually from the GUI or provide it with a schedule (see
|
||||
:ref:`calendar-event-scheduling`) to run regularly.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -79,17 +80,48 @@ provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regular
|
||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||
# proxmox-backup-manager sync-job remove pbs2-local
|
||||
|
||||
For setting up sync jobs, the configuring user needs the following permissions:
|
||||
To set up sync jobs, the configuring user needs the following permissions:
|
||||
|
||||
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
||||
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
||||
|
||||
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
||||
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
||||
``root@pam``) or set to something other than the configuring user,
|
||||
``Datastore.Modify`` is required as well.
|
||||
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
||||
|
||||
.. note:: A sync job can only sync backup groups that the configured remote's
|
||||
user/API token can read. If a remote is configured with a user/API token that
|
||||
only has ``Datastore.Backup`` privileges, only the limited set of accessible
|
||||
snapshots owned by that user/API token can be synced.
|
||||
|
||||
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
||||
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
||||
``root@pam``) or is set to something other than the configuring user,
|
||||
``Datastore.Modify`` is required as well.
|
||||
|
||||
If the ``group-filter`` option is set, only backup groups matching at least one
|
||||
of the specified criteria are synced. The available criteria are:
|
||||
|
||||
* backup type, for example to only sync groups of the `ct` (Container) type:
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job update ID --group-filter type:ct
|
||||
* full group identifier
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100
|
||||
* regular expression matched against the full group identifier
|
||||
.. todo:: add example for regex
|
||||
|
||||
The same filter is applied to local groups for handling of the
|
||||
``remove-vanished`` option.
|
||||
|
||||
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
|
||||
|
||||
Bandwidth Limit
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Syncing datastores to an archive can produce lots of traffic and impact other
|
||||
users of the network. So, to avoid network or storage congetsion you can limit
|
||||
the bandwith of the sync job by setting the ``rate-in`` option either in the
|
||||
web interface or using the ``proxmox-backup-manager`` command-line tool:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||
|
@ -82,9 +82,12 @@ is:
|
||||
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||
command, from the package ``ifupdown2``. This package is included within the
|
||||
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
||||
if you have installed Proxmox Backup Server on top of Debian or a Proxmox VE
|
||||
version prior to version 7.
|
||||
|
||||
You can also configure DNS settings, from the **DNS** section
|
||||
of **Configuration** or by using the ``dns`` subcommand of
|
||||
``proxmox-backup-manager``.
|
||||
|
||||
|
||||
.. include:: traffic-control.rst
|
||||
|
@ -1,5 +1,5 @@
|
||||
Most commands producing output supports the ``--output-format``
|
||||
parameter. It accepts the following values:
|
||||
Most commands that produce output support the ``--output-format``
|
||||
parameter. This accepts the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
This daemon exposes the whole Proxmox Backup Server API on TCP port
|
||||
8007 using HTTPS. It runs as user ``backup`` and has very limited
|
||||
permissions. Operation requiring more permissions are forwarded to
|
||||
permissions. Operations requiring more permissions are forwarded to
|
||||
the local ``proxmox-backup`` service.
|
||||
|
||||
|
@ -3,8 +3,8 @@
|
||||
`Proxmox VE`_ Integration
|
||||
-------------------------
|
||||
|
||||
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
||||
former as a storage in a Proxmox VE standalone or cluster setup.
|
||||
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
|
||||
setup, by adding it as a storage in Proxmox VE.
|
||||
|
||||
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||
@ -14,8 +14,8 @@ of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
||||
Using the Proxmox VE Web-Interface
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
||||
Server since the `Proxmox VE 6.3 release
|
||||
Proxmox VE has native API and web interface integration of Proxmox Backup
|
||||
Server as of `Proxmox VE 6.3
|
||||
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||
|
||||
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||
@ -24,8 +24,8 @@ Using the Proxmox VE Command-Line
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||
node. The following example uses ``store2`` as storage name, and
|
||||
assumes the server address is ``localhost``, and you want to connect
|
||||
node. The following example uses ``store2`` as the storage's name, and
|
||||
assumes the server address is ``localhost`` and you want to connect
|
||||
as ``user1@pbs``.
|
||||
|
||||
.. code-block:: console
|
||||
@ -33,7 +33,7 @@ as ``user1@pbs``.
|
||||
# pvesm add pbs store2 --server localhost --datastore store2
|
||||
# pvesm set store2 --username user1@pbs --password <secret>
|
||||
|
||||
.. note:: If you would rather not pass your password as plain text, you can pass
|
||||
.. note:: If you would rather not enter your password as plain text, you can pass
|
||||
the ``--password`` parameter, without any arguments. This will cause the
|
||||
program to prompt you for a password upon entering the command.
|
||||
|
||||
@ -53,7 +53,7 @@ relationship:
|
||||
|
||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
After that you should be able to see storage status with:
|
||||
After that, you should be able to view storage status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
``pxar`` is a command line utility to create and manipulate archives in the
|
||||
``pxar`` is a command line utility for creating and manipulating archives in the
|
||||
:ref:`pxar-format`.
|
||||
It is inspired by `casync file archive format
|
||||
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
||||
which caters to a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||
Backup Server, for example, efficient storage of hard links.
|
||||
The format is designed to reduce storage space needed on the server by achieving
|
||||
a high level of deduplication.
|
||||
The format is designed to reduce the required storage on the server by
|
||||
achieving a high level of deduplication.
|
||||
|
||||
Creating an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -26,8 +26,8 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
|
||||
By default, ``pxar`` will skip certain mount points and will not follow device
|
||||
boundaries. This design decision is based on the primary use case of creating
|
||||
archives for backups. It makes sense to not back up the contents of certain
|
||||
temporary or system specific files.
|
||||
archives for backups. It makes sense to ignore the contents of certain
|
||||
temporary or system specific files in a backup.
|
||||
To alter this behavior and follow device boundaries, use the
|
||||
``--all-file-systems`` flag.
|
||||
|
||||
@ -41,40 +41,38 @@ by running:
|
||||
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||
|
||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
||||
invoking ``pxar``.
|
||||
In order to avoid this, all globs have to be quoted correctly.
|
||||
Be aware that the shell itself will try to expand glob patterns before invoking
|
||||
``pxar``. In order to avoid this, all globs have to be quoted correctly.
|
||||
|
||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||
match more than one pattern. This allows you to use more complex
|
||||
file exclusion/inclusion behavior. However, it is recommended to use
|
||||
file inclusion/exclusion behavior. However, it is recommended to use
|
||||
``.pxarexclude`` files instead for such cases.
|
||||
|
||||
For example you might want to exclude all ``.txt`` files except for a specific
|
||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
||||
by ``!``.
|
||||
All the glob patterns are relative to the ``source`` directory.
|
||||
For example you might want to exclude all ``.txt`` files except a specific
|
||||
one from the archive. This would be achieved via the negated match pattern,
|
||||
prefixed by ``!``. All the glob patterns are relative to the ``source``
|
||||
directory.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||
|
||||
.. NOTE:: The order of the glob match patterns matters as later ones override
|
||||
previous ones. Permutations of the same patterns lead to different results.
|
||||
.. NOTE:: The order of the glob match patterns matters, as later ones override
|
||||
earlier ones. Permutations of the same patterns lead to different results.
|
||||
|
||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||
command line, in a file called ``.pxarexclude-cli`` at the root of
|
||||
the archive.
|
||||
command line, in a file called ``.pxarexclude-cli``, at the root of the archive.
|
||||
If a file with this name is already present in the source folder during archive
|
||||
creation, this file is not included in the archive and the file containing the
|
||||
new patterns is added to the archive instead, the original file is not altered.
|
||||
creation, this file is not included in the archive, and the file containing the
|
||||
new patterns is added to the archive instead. The original file is not altered.
|
||||
|
||||
A more convenient and persistent way to exclude files from the archive is by
|
||||
placing the glob match patterns in ``.pxarexclude`` files.
|
||||
It is possible to create and place these files in any directory of the filesystem
|
||||
tree.
|
||||
These files must contain one pattern per line, again later patterns win over
|
||||
previous ones.
|
||||
These files must contain one pattern per line, and later patterns override
|
||||
earlier ones.
|
||||
The patterns control file exclusions of files present within the given directory
|
||||
or further below it in the tree.
|
||||
The behavior is the same as described in :ref:`client_creating_backups`.
|
||||
@ -89,7 +87,7 @@ with the following command:
|
||||
|
||||
# pxar extract archive.pxar /path/to/target
|
||||
|
||||
If no target is provided, the content of the archive is extracted to the current
|
||||
If no target is provided, the contents of the archive is extracted to the current
|
||||
working directory.
|
||||
|
||||
In order to restore only parts of an archive, single files, and/or folders,
|
||||
@ -116,7 +114,7 @@ run the following command:
|
||||
# pxar list archive.pxar
|
||||
|
||||
This displays the full path of each file or directory with respect to the
|
||||
archives root.
|
||||
archive's root.
|
||||
|
||||
Mounting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -15,7 +15,7 @@ accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||
disks, create various filesystems, and get information about the disks.
|
||||
|
||||
To view the disks connected to the system, navigate to **Administration ->
|
||||
Disks** in the web interface or use the ``list`` subcommand of
|
||||
Storage/Disks** in the web interface or use the ``list`` subcommand of
|
||||
``disk``:
|
||||
|
||||
.. code-block:: console
|
||||
@ -42,9 +42,9 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||
:alt: Create a directory
|
||||
|
||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
||||
web interface and creating one from there. The following command creates an
|
||||
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
|
||||
in the web interface and creating one from there. The following command creates
|
||||
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||
create a datastore at the location ``/mnt/datastore/store1``:
|
||||
|
||||
@ -57,7 +57,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
|
||||
:alt: Create ZFS
|
||||
|
||||
You can also create a ``zpool`` with various raid levels from **Administration
|
||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
||||
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
|
||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||
mounts it under ``/mnt/datastore/zpool1``:
|
||||
|
||||
@ -102,7 +102,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||
directory entries. This requirement excludes certain filesystems and
|
||||
filesystem configuration from being supported for a datastore. For example,
|
||||
filesystem configurations from being supported for a datastore. For example,
|
||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||
|
||||
|
||||
@ -113,14 +113,15 @@ Datastore Configuration
|
||||
:align: right
|
||||
:alt: Datastore Overview
|
||||
|
||||
You can configure multiple datastores. Minimum one datastore needs to be
|
||||
You can configure multiple datastores. A minimum of one datastore needs to be
|
||||
configured. The datastore is identified by a simple *name* and points to a
|
||||
directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
|
||||
:ref:`garbage collection <client_garbage-collection>` can also be configured to
|
||||
run periodically, based on a configured schedule (see
|
||||
:ref:`calendar-event-scheduling`) per datastore.
|
||||
|
||||
|
||||
.. _storage_datastore_create:
|
||||
@ -146,7 +147,8 @@ window:
|
||||
* *Comment* can be used to add some contextual information to the datastore.
|
||||
|
||||
Alternatively you can create a new datastore from the command line. The
|
||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
following command creates a new datastore called ``store1`` on
|
||||
:file:`/backup/disk1/store1`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -156,7 +158,7 @@ following command creates a new datastore called ``store1`` on :file:`/backup/di
|
||||
Managing Datastores
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To list existing datastores from the command line run:
|
||||
To list existing datastores from the command line, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -216,8 +218,9 @@ After creating a datastore, the following default layout will appear:
|
||||
|
||||
`.lock` is an empty file used for process locking.
|
||||
|
||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||
directories will store the chunked data after a backup operation has been executed.
|
||||
The `.chunks` directory contains folders, starting from `0000` and increasing in
|
||||
hexadecimal values until `ffff`. These directories will store the chunked data,
|
||||
categorized by checksum, after a backup operation has been executed.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -4,8 +4,8 @@ Host System Administration
|
||||
==========================
|
||||
|
||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||
distribution. That means that you have access to the whole world of
|
||||
Debian packages, and the base system is well documented. The `Debian
|
||||
distribution. This means that you have access to the entire range of
|
||||
Debian packages, and that the base system is well documented. The `Debian
|
||||
Administrator's Handbook`_ is available online, and provides a
|
||||
comprehensive introduction to the Debian operating system.
|
||||
|
||||
@ -17,11 +17,11 @@ updates to some Debian packages when necessary.
|
||||
|
||||
We also deliver a specially optimized Linux kernel, where we enable
|
||||
all required virtualization and container features. That kernel
|
||||
includes drivers for ZFS_, and several hardware drivers. For example,
|
||||
includes drivers for ZFS_, as well as several hardware drivers. For example,
|
||||
we ship Intel network card drivers to support their newest hardware.
|
||||
|
||||
The following sections will concentrate on backup related topics. They
|
||||
either explain things which are different on `Proxmox Backup`_, or
|
||||
will explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
|
@ -8,7 +8,7 @@ Datastores
|
||||
|
||||
A Datastore is the logical place where :ref:`Backup Snapshots
|
||||
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
|
||||
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||
manifest, blobs, and dynamic- and fixed-indexes (see :ref:`terms`), and are
|
||||
stored in the following directory structure:
|
||||
|
||||
<datastore-root>/<type>/<id>/<time>/
|
||||
@ -32,8 +32,8 @@ The chunks of a datastore are found in
|
||||
|
||||
<datastore-root>/.chunks/
|
||||
|
||||
This chunk directory is further subdivided by the first four byte of the chunks
|
||||
checksum, so the chunk with the checksum
|
||||
This chunk directory is further subdivided by the first four bytes of the
|
||||
chunk's checksum, so a chunk with the checksum
|
||||
|
||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||
|
||||
@ -47,7 +47,7 @@ per directory can be bad for file system performance.
|
||||
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
|
||||
is created.
|
||||
|
||||
Fixed-sized Chunks
|
||||
Fixed-Sized Chunks
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For block based backups (like VMs), fixed-sized chunks are used. The content
|
||||
@ -58,10 +58,10 @@ often tries to allocate files in contiguous pieces, so new files get new
|
||||
blocks, and changing existing files changes only their own blocks.
|
||||
|
||||
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
|
||||
can track the changed blocks of an image. Since these bitmap are also a
|
||||
can track the changed blocks of an image. Since these bitmaps are also a
|
||||
representation of the image split into chunks, there is a direct relation
|
||||
between dirty blocks of the image and chunks which need to get uploaded, so
|
||||
only modified chunks of the disk have to be uploaded for a backup.
|
||||
between the dirty blocks of the image and chunks which need to be uploaded.
|
||||
Thus, only modified chunks of the disk need to be uploaded to a backup.
|
||||
|
||||
Since the image is always split into chunks of the same size, unchanged blocks
|
||||
will result in identical checksums for those chunks, so such chunks do not need
|
||||
@ -71,13 +71,13 @@ changed blocks.
|
||||
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
|
||||
does not rely on storage snapshots either.
|
||||
|
||||
Dynamically sized Chunks
|
||||
Dynamically Sized Chunks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If one does not want to backup block-based systems but rather file-based
|
||||
systems, using fixed-sized chunks is not a good idea, since every time a file
|
||||
would change in size, the remaining data gets shifted around and this would
|
||||
result in many chunks changing, reducing the amount of deduplication.
|
||||
When working with file-based systems rather than block-based systems,
|
||||
using fixed-sized chunks is not a good idea, since every time a file
|
||||
would change in size, the remaining data would be shifted around,
|
||||
resulting in many chunks changing and the amount of deduplication being reduced.
|
||||
|
||||
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
|
||||
instead. Instead of splitting an image into fixed sizes, it first generates a
|
||||
@ -86,9 +86,9 @@ over this on-the-fly generated archive to calculate chunk boundaries.
|
||||
|
||||
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
|
||||
by continuously calculating a checksum while iterating over the data, and on
|
||||
certain conditions it triggers a hash boundary.
|
||||
certain conditions, it triggers a hash boundary.
|
||||
|
||||
Assuming that most files of the system that is to be backed up have not
|
||||
Assuming that most files on the system that is to be backed up have not
|
||||
changed, eventually the algorithm triggers the boundary on the same data as a
|
||||
previous backup, resulting in chunks that can be reused.
|
||||
|
||||
@ -100,8 +100,8 @@ can be encrypted, and they are handled in a slightly different manner than
|
||||
normal chunks.
|
||||
|
||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||
chunk content, but with the plain-text content concatenated with the encryption
|
||||
key. This way, two chunks of the same data encrypted with different keys
|
||||
chunk content, but with the plain-text content, concatenated with the encryption
|
||||
key. This way, two chunks with the same data but encrypted with different keys
|
||||
generate two different checksums and no collisions occur for multiple
|
||||
encryption keys.
|
||||
|
||||
@ -112,14 +112,14 @@ the previous backup, do not need to be encrypted and uploaded.
|
||||
Caveats and Limitations
|
||||
-----------------------
|
||||
|
||||
Notes on hash collisions
|
||||
Notes on Hash Collisions
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Every hashing algorithm has a chance to produce collisions, meaning two (or
|
||||
more) inputs generate the same checksum. For SHA-256, this chance is
|
||||
negligible. To calculate such a collision, one can use the ideas of the
|
||||
'birthday problem' from probability theory. For big numbers, this is actually
|
||||
infeasible to calculate with regular computers, but there is a good
|
||||
negligible. To calculate the chances of such a collision, one can use the ideas
|
||||
of the 'birthday problem' from probability theory. For big numbers, this is
|
||||
actually unfeasible to calculate with regular computers, but there is a good
|
||||
approximation:
|
||||
|
||||
.. math::
|
||||
@ -127,7 +127,7 @@ approximation:
|
||||
p(n, d) = 1 - e^{-n^2/(2d)}
|
||||
|
||||
Where `n` is the number of tries, and `d` is the number of possibilities.
|
||||
For a concrete example lets assume a large datastore of 1 PiB, and an average
|
||||
For a concrete example, lets assume a large datastore of 1 PiB and an average
|
||||
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
|
||||
2^{256}` possibilities. Inserting those values in the formula from earlier you
|
||||
will see that the probability of a collision in that scenario is:
|
||||
@ -136,94 +136,96 @@ will see that the probability of a collision in that scenario is:
|
||||
|
||||
3.1115 * 10^{-61}
|
||||
|
||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||
a collision is about the same as winning 13 such lotto games *in a row*.
|
||||
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||
chance of a collision is about the same as winning 13 such lottery games *in a
|
||||
row*.
|
||||
|
||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||
accident in a normal datastore.
|
||||
|
||||
Additionally, SHA-256 is prone to length extension attacks, but since there is
|
||||
an upper limit for how big the chunk are, this is not a problem, since a
|
||||
an upper limit for how big the chunks are, this is not a problem, because a
|
||||
potential attacker cannot arbitrarily add content to the data beyond that
|
||||
limit.
|
||||
|
||||
File-based Backup
|
||||
File-Based Backup
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since dynamically sized chunks (for file-based backups) are created on a custom
|
||||
archive format (pxar) and not over the files directly, there is no relation
|
||||
between files and the chunks. This means that the Proxmox Backup client has to
|
||||
between the files and chunks. This means that the Proxmox Backup Client has to
|
||||
read all files again for every backup, otherwise it would not be possible to
|
||||
generate a consistent independent pxar archive where the original chunks can be
|
||||
reused. Note that there will be still only new or change chunks be uploaded.
|
||||
generate a consistent, independent pxar archive where the original chunks can be
|
||||
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
||||
|
||||
Verification of encrypted chunks
|
||||
Verification of Encrypted Chunks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||
available, making it impossible for the server (without the encryption key), to
|
||||
available, making it impossible for the server (without the encryption key) to
|
||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
Index files(.fidx, .didx) contain information about how to rebuild a file, more
|
||||
precisely, they contain an ordered list of references to the chunks the original
|
||||
file was split up in. If there is something wrong with a snapshot it might be
|
||||
useful to find out which chunks are referenced in this specific snapshot, and
|
||||
check wheather all of them are present and intact. The command for getting the
|
||||
list of referenced chunks could look something like this:
|
||||
Index files(*.fidx*, *.didx*) contain information about how to rebuild a file.
|
||||
More precisely, they contain an ordered list of references to the chunks that
|
||||
the original file was split into. If there is something wrong with a snapshot,
|
||||
it might be useful to find out which chunks are referenced in it, and check
|
||||
whether they are present and intact. The ``proxmox-backup-debug`` command line
|
||||
tool can be used to inspect such files and recover their contents. For example,
|
||||
to get a list of the referenced chunks of a *.fidx* index:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
||||
|
||||
The same command can be used to look at .blob file, without ``--decode`` just
|
||||
the size and the encryption type, if any, is printed. If ``--decode`` is set the
|
||||
blob file is decoded into the specified file('-' will decode it directly into
|
||||
stdout).
|
||||
The same command can be used to inspect *.blob* files. Without the ``--decode``
|
||||
parameter, just the size and the encryption type, if any, are printed. If
|
||||
``--decode`` is set, the blob file is decoded into the specified file ('-' will
|
||||
decode it directly to stdout).
|
||||
|
||||
The following example would print the decoded contents of
|
||||
`qemu-server.conf.blob`. If the file you're trying to inspect is encrypted, a
|
||||
path to the key file must be provided using ``--keyfile``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
||||
|
||||
would print the decoded contents of `qemu-server.conf.blob`. If the file you're
|
||||
trying to inspect is encrypted, a path to the keyfile has to be provided using
|
||||
``--keyfile``.
|
||||
|
||||
Checking in which index files a specific chunk file is referenced can be done
|
||||
You can also check in which index files a specific chunk file is referenced
|
||||
with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
||||
|
||||
Here ``--reference-filter`` specifies where index files should be searched, this
|
||||
Here ``--reference-filter`` specifies where index files should be searched. This
|
||||
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
||||
changed you can explicitly specify the digest using ``--digest``, by default the
|
||||
chunk filename is used as the digest to look for. Specifying no
|
||||
``--reference-filter`` will just print the CRC and encryption status of the
|
||||
chunk. You can also decode chunks, to do so ``--decode`` has to be set. If the
|
||||
chunk is encrypted a ``--keyfile`` has to be provided for decoding.
|
||||
changed, you can explicitly specify the digest using ``--digest``. By default, the
|
||||
chunk filename is used as the digest to look for. If no ``--reference-filter``
|
||||
is specified, it will only print the CRC and encryption status of the chunk. You
|
||||
can also decode chunks, by setting the ``--decode`` flag. If the chunk is
|
||||
encrypted, a ``--keyfile`` must be provided, in order to decode it.
|
||||
|
||||
Restore without a running PBS
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Restore without a Running Proxmox Backup Server
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to restore spefiic files of snapshots without a running PBS using
|
||||
the `recover` sub-command, provided you have access to the intact index and
|
||||
chunk files. Note that you also need the corresponding key file if the backup
|
||||
was encrypted.
|
||||
It's possible to restore specific files from a snapshot, without a running
|
||||
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
|
||||
have access to the intact index and chunk files. Note that you also need the
|
||||
corresponding key file if the backup was encrypted.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
||||
|
||||
In above example the `/path/to/.chunks` argument is the path to the directory
|
||||
that contains contains the chunks, and `drive-scsi0.img.fidx` is the index-file
|
||||
of the file you'd lile to restore. Both paths can be absolute or relative. With
|
||||
``--skip-crc`` it is possible to disable the crc checks of the chunks, this will
|
||||
speed up the process slightly and allows for trying to restore (partially)
|
||||
In the above example, the `/path/to/.chunks` argument is the path to the
|
||||
directory that contains the chunks, and `drive-scsi0.img.fidx` is the index file
|
||||
of the file you'd like to restore. Both paths can be absolute or relative. With
|
||||
``--skip-crc``, it's possible to disable the CRC checks of the chunks. This
|
||||
will speed up the process slightly and allow for trying to restore (partially)
|
||||
corrupt chunks. It's recommended to always try without the skip-CRC option
|
||||
first.
|
||||
|
||||
|
@ -41,23 +41,23 @@ Binary Data (BLOBs)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This type is used to store smaller (< 16MB) binary data such as
|
||||
configuration files. Larger files should be stored as image archive.
|
||||
configuration files. Larger files should be stored as image archives.
|
||||
|
||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||
file archive to store whole directory trees.
|
||||
file archive to store entire directory trees.
|
||||
|
||||
|
||||
Catalog File: ``catalog.pcat1``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed up search operations.
|
||||
the list of included files and is used to speed up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The manifest contains the list of all backup files, their
|
||||
The manifest contains a list of all backed up files, and their
|
||||
sizes and checksums. It is used to verify the consistency of a
|
||||
backup.
|
||||
|
||||
@ -68,18 +68,19 @@ Backup Type
|
||||
The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
This type is used for :term:`virtual machine`\ s. It typically
|
||||
consists of the virtual machine's configuration file and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Consists of the container's
|
||||
configuration and a single file archive for the filesystem content.
|
||||
This type is used for :term:`container`\ s. It consists of the container's
|
||||
configuration and a single file archive for the filesystem's contents.
|
||||
|
||||
``host``
|
||||
This type is used for backups created from within the backed up machine.
|
||||
Typically this would be a physical host but could also be a virtual machine
|
||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
||||
This type is used for file/directory backups created from within a machine.
|
||||
Typically this would be a physical host, but could also be a virtual machine
|
||||
or container. Such backups may contain file and image archives; there are no
|
||||
restrictions in this regard.
|
||||
|
||||
|
||||
Backup ID
|
||||
|
101
docs/traffic-control.rst
Normal file
101
docs/traffic-control.rst
Normal file
@ -0,0 +1,101 @@
|
||||
.. _sysadmin_traffic_control:
|
||||
|
||||
Traffic Control
|
||||
---------------
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-traffic-control-add.png
|
||||
:align: right
|
||||
:alt: Add a traffic control limit
|
||||
|
||||
Creating and restoring backups can produce lots of traffic and impact other
|
||||
users of the network or shared storages.
|
||||
|
||||
Proxmox Backup Server allows to limit network traffic for clients within
|
||||
specified networks using a token bucket filter (TBF).
|
||||
|
||||
This allows you to avoid network congestion or to prioritize traffic from
|
||||
certain hosts.
|
||||
|
||||
You can manage the traffic controls either over the web-interface or using the
|
||||
``traffic-control`` commandos of the ``proxmox-backup-manager`` command-line
|
||||
tool.
|
||||
|
||||
.. note:: Sync jobs on the server are not affected by its rate-in limits. If
|
||||
you want to limit the incomming traffic that a pull-based sync job
|
||||
generates, you need to setup a job-specific rate-in limit. See
|
||||
:ref:`syncjobs`.
|
||||
|
||||
The following command adds a traffic control rule to limit all IPv4 clients
|
||||
(network ``0.0.0.0/0``) to 100 MB/s:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control create rule0 --network 0.0.0.0/0 \
|
||||
--rate-in 100MB --rate-out 100MB \
|
||||
--comment "Default rate limit (100MB/s) for all clients"
|
||||
|
||||
.. note:: To limit both IPv4 and IPv6 network spaces you need to pass two
|
||||
network parameters ``::/0`` and ``0.0.0.0/0``.
|
||||
|
||||
It is possible to restrict rules to certain time frames, for example the
|
||||
company office hours:
|
||||
|
||||
.. tip:: You can use SI (base 10: KB, MB, ...) or IEC (base 2: KiB, MiB, ...)
|
||||
units.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control update rule0 \
|
||||
--timeframe "mon..fri 8-12" \
|
||||
--timeframe "mon..fri 14:30-18"
|
||||
|
||||
If there are more rules, the server uses the rule with the smaller network. For
|
||||
example, we can overwrite the setting for our private network (and the server
|
||||
itself) with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control create rule1 \
|
||||
--network 192.168.2.0/24 \
|
||||
--network 127.0.0.0/8 \
|
||||
--rate-in 20GB --rate-out 20GB \
|
||||
--comment "Use 20GB/s for the local network"
|
||||
|
||||
.. note:: The behavior is undefined if there are several rules for the same network.
|
||||
|
||||
If there are multiple rules that match the same network all of them will be
|
||||
applied, which means that the smallest one wins, as it's bucket fills up the
|
||||
fastest.
|
||||
|
||||
To list the current rules use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control list
|
||||
┌───────┬─────────────┬─────────────┬─────────────────────────┬────────────...─┐
|
||||
│ name │ rate-in │ rate-out │ network │ timeframe ... │
|
||||
╞═══════╪═════════════╪═════════════╪═════════════════════════╪════════════...═╡
|
||||
│ rule0 │ 100 MB │ 100 MB │ ["0.0.0.0/0"] │ ["mon..fri ... │
|
||||
├───────┼─────────────┼─────────────┼─────────────────────────┼────────────...─┤
|
||||
│ rule1 │ 20 GB │ 20 GB │ ["192.168.2.0/24", ...] │ ... │
|
||||
└───────┴─────────────┴─────────────┴─────────────────────────┴────────────...─┘
|
||||
|
||||
Rules can also be removed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control remove rule1
|
||||
|
||||
|
||||
To show the state (current data rate) of all configured rules use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager traffic-control traffic
|
||||
┌───────┬─────────────┬──────────────┐
|
||||
│ name │ cur-rate-in │ cur-rate-out │
|
||||
╞═══════╪═════════════╪══════════════╡
|
||||
│ rule0 │ 0 B │ 0 B │
|
||||
├───────┼─────────────┼──────────────┤
|
||||
│ rule1 │ 1.161 GiB │ 19.146 KiB │
|
||||
└───────┴─────────────┴──────────────┘
|
@ -15,17 +15,19 @@ Proxmox Backup Server supports several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users need to exist on the
|
||||
authenticate as a Linux system user (users need to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
|
||||
After installation, there is a single user ``root@pam``, which
|
||||
corresponds to the Unix superuser. User configuration information is stored in the file
|
||||
``/etc/proxmox-backup/user.cfg``. You can use the
|
||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||
users:
|
||||
:openid: OpenID Connect server. Users can authenticate against an external
|
||||
OpenID Connect server.
|
||||
|
||||
After installation, there is a single user, ``root@pam``, which corresponds to
|
||||
the Unix superuser. User configuration information is stored in the file
|
||||
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
|
||||
command line tool to list or manipulate users:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -40,13 +42,13 @@ users:
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
The superuser has full administration rights on everything, so you
|
||||
normally want to add other users with less privileges. You can add a new
|
||||
The superuser has full administration rights on everything, so it's recommended
|
||||
to add other users with less privileges. You can add a new
|
||||
user with the ``user create`` subcommand or through the web
|
||||
interface, under the **User Management** tab of **Configuration -> Access
|
||||
Control**. The ``create`` subcommand lets you specify many options like
|
||||
``--email`` or ``--password``. You can update or change any user properties
|
||||
using the ``update`` subcommand later (**Edit** in the GUI):
|
||||
using the ``user update`` subcommand later (**Edit** in the GUI):
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
@ -71,16 +73,16 @@ The resulting user list looks like this:
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have any permissions. Please read the Access Control
|
||||
Newly created users do not have any permissions. Please read the :ref:`user_acl`
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||
You can disable a user account by setting ``--enable`` to ``0``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the user with:
|
||||
Or completely remove a user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -95,7 +97,7 @@ API Tokens
|
||||
:align: right
|
||||
:alt: API Token Overview
|
||||
|
||||
Any authenticated user can generate API tokens which can in turn be used to
|
||||
Any authenticated user can generate API tokens, which can in turn be used to
|
||||
configure various clients, instead of directly providing the username and
|
||||
password.
|
||||
|
||||
@ -117,7 +119,7 @@ The API token is passed from the client to the server by setting the
|
||||
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
||||
``TOKENID:TOKENSECRET``.
|
||||
|
||||
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
|
||||
You can generate tokens from the GUI or by using ``proxmox-backup-manager``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -154,9 +156,9 @@ section to learn how to set access permissions.
|
||||
Access Control
|
||||
--------------
|
||||
|
||||
By default new users and API tokens do not have any permission. Instead you
|
||||
By default, new users and API tokens do not have any permissions. Instead you
|
||||
need to specify what is allowed and what is not. You can do this by assigning
|
||||
roles to users/tokens on specific objects like datastores or remotes. The
|
||||
roles to users/tokens on specific objects, like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**NoAccess**
|
||||
@ -176,7 +178,7 @@ following roles exist:
|
||||
is not allowed to read the actual data.
|
||||
|
||||
**DatastoreReader**
|
||||
Can Inspect datastore content and can do restores.
|
||||
Can Inspect datastore content and do restores.
|
||||
|
||||
**DatastoreBackup**
|
||||
Can backup and restore owned backups.
|
||||
@ -193,6 +195,18 @@ following roles exist:
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
**TapeAudit**
|
||||
Can view tape related configuration and status
|
||||
|
||||
**TapeAdministrat**
|
||||
Can do anything related to tape backup
|
||||
|
||||
**TapeOperator**
|
||||
Can do tape backup and restore (but no configuration changes)
|
||||
|
||||
**TapeReader**
|
||||
Can read and inspect tape configuration and media content
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||
:align: right
|
||||
:alt: Add permissions for user
|
||||
@ -236,7 +250,8 @@ You can list the ACLs of each user/token using the following command:
|
||||
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
||||
└──────────┴───────────────────┴───────────┴────────────────┘
|
||||
|
||||
A single user/token can be assigned multiple permission sets for different datastores.
|
||||
A single user/token can be assigned multiple permission sets for different
|
||||
datastores.
|
||||
|
||||
.. Note::
|
||||
Naming convention is important here. For datastores on the host,
|
||||
@ -247,11 +262,11 @@ A single user/token can be assigned multiple permission sets for different datas
|
||||
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||
the remote.
|
||||
|
||||
API Token permissions
|
||||
API Token Permissions
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
API token permissions are calculated based on ACLs containing their ID
|
||||
independent of those of their corresponding user. The resulting permission set
|
||||
API token permissions are calculated based on ACLs containing their ID,
|
||||
independently of those of their corresponding user. The resulting permission set
|
||||
on a given path is then intersected with that of the corresponding user.
|
||||
|
||||
In practice this means:
|
||||
@ -259,10 +274,10 @@ In practice this means:
|
||||
#. API tokens require their own ACL entries
|
||||
#. API tokens can never do more than their corresponding user
|
||||
|
||||
Effective permissions
|
||||
Effective Permissions
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To calculate and display the effective permission set of a user or API token
|
||||
To calculate and display the effective permission set of a user or API token,
|
||||
you can use the ``proxmox-backup-manager user permission`` command:
|
||||
|
||||
.. code-block:: console
|
||||
@ -287,7 +302,7 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
||||
|
||||
.. _user_tfa:
|
||||
|
||||
Two-factor authentication
|
||||
Two-Factor Authentication
|
||||
-------------------------
|
||||
|
||||
Introduction
|
||||
@ -296,7 +311,7 @@ Introduction
|
||||
With simple authentication, only a password (single factor) is required to
|
||||
successfully claim an identity (authenticate), for example, to be able to log in
|
||||
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
|
||||
the password gets stolen or leaked, anybody can use it to log in - even if they
|
||||
the password gets leaked or stolen, anybody can use it to log in - even if they
|
||||
should not be allowed to do so.
|
||||
|
||||
With two-factor authentication (TFA), a user is asked for an additional factor
|
||||
@ -359,16 +374,18 @@ WebAuthn
|
||||
|
||||
For WebAuthn to work, you need to have two things:
|
||||
|
||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||
* A trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||
While it probably works with an untrusted certificate, some browsers may warn
|
||||
or refuse WebAuthn operations if it is not trusted.
|
||||
|
||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||
* Setup the WebAuthn configuration (see **Configuration -> Authentication** in
|
||||
the Proxmox Backup Server web interface). This can be auto-filled in most
|
||||
setups.
|
||||
|
||||
Once you have fulfilled both of these requirements, you can add a WebAuthn
|
||||
configuration in the *Access Control* panel.
|
||||
configuration in the **Two Factor Authentication** tab of the **Access Control**
|
||||
panel.
|
||||
|
||||
.. _user_tfa_setup_recovery_keys:
|
||||
|
||||
@ -380,7 +397,8 @@ Recovery Keys
|
||||
:alt: Add a new user
|
||||
|
||||
Recovery key codes do not need any preparation; you can simply create a set of
|
||||
recovery keys in the *Access Control* panel.
|
||||
recovery keys in the **Two Factor Authentication** tab of the **Access Control**
|
||||
panel.
|
||||
|
||||
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||
time.
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
|
||||
// chacha20-poly1305
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::{*, cli::*};
|
||||
use proxmox_schema::*;
|
||||
use proxmox_router::cli::*;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
|
||||
use pbs_api_types::Authid;
|
||||
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
|
||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
||||
.await?;
|
||||
@ -59,7 +59,7 @@ async fn run() -> Result<(), Error> {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = pbs_runtime::main(run()) {
|
||||
if let Err(err) = proxmox_async::runtime::main(run()) {
|
||||
eprintln!("ERROR: {}", err);
|
||||
}
|
||||
println!("DONE");
|
||||
|
@ -1,9 +1,9 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use std::thread;
|
||||
use std::path::PathBuf;
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
// tar handle files that shrink during backup, by simply padding with zeros.
|
||||
//
|
||||
// this binary run multiple thread which writes some large files, then truncates
|
||||
|
@ -69,7 +69,7 @@ fn send_request(
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
pbs_runtime::main(run())
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
|
@ -69,7 +69,7 @@ fn send_request(
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
pbs_runtime::main(run())
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
|
@ -9,7 +9,7 @@ use tokio::net::{TcpListener, TcpStream};
|
||||
use pbs_buildcfg::configdir;
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
pbs_runtime::main(run())
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
|
@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
pbs_runtime::main(run())
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
|
@ -13,7 +13,7 @@ use pbs_client::ChunkStream;
|
||||
// Note: I can currently get about 830MB/s
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = pbs_runtime::main(run()) {
|
||||
if let Err(err) = proxmox_async::runtime::main(run()) {
|
||||
panic!("ERROR: {}", err);
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
|
||||
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
let backup_time = proxmox_time::epoch_i64();
|
||||
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||
|
||||
@ -27,7 +27,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
match pbs_runtime::main(upload_speed()) {
|
||||
match proxmox_async::runtime::main(upload_speed()) {
|
||||
Ok(mbs) => {
|
||||
println!("average upload speed: {} MB/s", mbs);
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ description = "general API type helpers for PBS"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
hex = "0.4.3"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
nix = "0.19.1"
|
||||
@ -14,7 +15,8 @@ openssl = "0.10"
|
||||
regex = "1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
||||
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
||||
|
||||
proxmox-systemd = { path = "../proxmox-systemd" }
|
||||
pbs-tools = { path = "../pbs-tools" }
|
||||
proxmox = "0.15.3"
|
||||
proxmox-lang = "1.0.0"
|
||||
proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] }
|
||||
proxmox-time = "1.1"
|
||||
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
|
||||
|
@ -1,13 +1,12 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::de::{value, IntoDeserializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{
|
||||
ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||
use proxmox_lang::constnamedbitmap;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||
};
|
||||
use proxmox::{constnamedbitmap, const_regex};
|
||||
|
||||
const_regex! {
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
@ -76,7 +75,7 @@ constnamedbitmap! {
|
||||
|
||||
/// Admin always has all privileges. It can do everything except a few actions
|
||||
/// which are limited to the 'root@pam` superuser
|
||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
@ -222,7 +221,6 @@ pub enum Role {
|
||||
TapeReader = ROLE_TAPE_READER,
|
||||
}
|
||||
|
||||
|
||||
impl FromStr for Role {
|
||||
type Err = value::Error;
|
||||
|
||||
@ -231,26 +229,24 @@ impl FromStr for Role {
|
||||
}
|
||||
}
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
||||
"Access control path.")
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Allow to propagate (inherit) permissions.")
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
||||
"Type of 'ugid' property.")
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group")]))
|
||||
EnumEntry::new("group", "Group"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
|
@ -3,9 +3,7 @@ use std::fmt::{self, Display};
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
@ -35,6 +33,9 @@ impl Fingerprint {
|
||||
pub fn bytes(&self) -> &[u8; 32] {
|
||||
&self.bytes
|
||||
}
|
||||
pub fn signature(&self) -> String {
|
||||
as_fingerprint(&self.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Display as short key ID
|
||||
@ -55,3 +56,43 @@ impl std::str::FromStr for Fingerprint {
|
||||
}
|
||||
}
|
||||
|
||||
fn as_fingerprint(bytes: &[u8]) -> String {
|
||||
hex::encode(bytes)
|
||||
.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
||||
.collect::<Vec<&str>>().join(":")
|
||||
}
|
||||
|
||||
pub mod bytes_as_fingerprint {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use serde::{Deserialize, Serializer, Deserializer};
|
||||
|
||||
pub fn serialize<S>(
|
||||
bytes: &[u8; 32],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = super::as_fingerprint(bytes);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
||||
// hex::decode by-byte
|
||||
let mut s = String::deserialize(deserializer)?;
|
||||
s.retain(|c| c != ':');
|
||||
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
||||
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
Ok(unsafe { out.assume_init() })
|
||||
}
|
||||
}
|
||||
|
@ -1,13 +1,10 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{
|
||||
ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema,
|
||||
StringSchema, Updater,
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
|
||||
Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use proxmox::const_regex;
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
||||
Fingerprint, Userid, Authid,
|
||||
@ -43,6 +40,7 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
|
||||
pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
|
||||
|
||||
pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
|
||||
.format(&BACKUP_ID_FORMAT)
|
||||
@ -60,6 +58,10 @@ pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epo
|
||||
.minimum(1_547_797_308)
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
|
||||
.format(&BACKUP_GROUP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
@ -393,6 +395,9 @@ pub struct SnapshotListItem {
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub owner: Option<Authid>,
|
||||
/// Protection from prunes
|
||||
#[serde(default)]
|
||||
pub protected: bool,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
341
pbs-api-types/src/human_byte.rs
Normal file
341
pbs-api-types/src/human_byte.rs
Normal file
@ -0,0 +1,341 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
||||
|
||||
/// Size units for byte sizes
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum SizeUnit {
|
||||
Byte,
|
||||
// SI (base 10)
|
||||
KByte,
|
||||
MByte,
|
||||
GByte,
|
||||
TByte,
|
||||
PByte,
|
||||
// IEC (base 2)
|
||||
Kibi,
|
||||
Mebi,
|
||||
Gibi,
|
||||
Tebi,
|
||||
Pebi,
|
||||
}
|
||||
|
||||
impl SizeUnit {
|
||||
/// Returns the scaling factor
|
||||
pub fn factor(&self) -> f64 {
|
||||
match self {
|
||||
SizeUnit::Byte => 1.0,
|
||||
// SI (base 10)
|
||||
SizeUnit::KByte => 1_000.0,
|
||||
SizeUnit::MByte => 1_000_000.0,
|
||||
SizeUnit::GByte => 1_000_000_000.0,
|
||||
SizeUnit::TByte => 1_000_000_000_000.0,
|
||||
SizeUnit::PByte => 1_000_000_000_000_000.0,
|
||||
// IEC (base 2)
|
||||
SizeUnit::Kibi => 1024.0,
|
||||
SizeUnit::Mebi => 1024.0 * 1024.0,
|
||||
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
|
||||
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
|
||||
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// gets the biggest possible unit still having a value greater zero before the decimal point
|
||||
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
|
||||
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
|
||||
if binary {
|
||||
let bits = 64 - (size as u64).leading_zeros();
|
||||
match bits {
|
||||
51.. => SizeUnit::Pebi,
|
||||
41..=50 => SizeUnit::Tebi,
|
||||
31..=40 => SizeUnit::Gibi,
|
||||
21..=30 => SizeUnit::Mebi,
|
||||
11..=20 => SizeUnit::Kibi,
|
||||
_ => SizeUnit::Byte,
|
||||
}
|
||||
} else {
|
||||
if size >= 1_000_000_000_000_000.0 {
|
||||
SizeUnit::PByte
|
||||
} else if size >= 1_000_000_000_000.0 {
|
||||
SizeUnit::TByte
|
||||
} else if size >= 1_000_000_000.0 {
|
||||
SizeUnit::GByte
|
||||
} else if size >= 1_000_000.0 {
|
||||
SizeUnit::MByte
|
||||
} else if size >= 1_000.0 {
|
||||
SizeUnit::KByte
|
||||
} else {
|
||||
SizeUnit::Byte
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the string repesentation
|
||||
impl std::fmt::Display for SizeUnit {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SizeUnit::Byte => write!(f, "B"),
|
||||
// SI (base 10)
|
||||
SizeUnit::KByte => write!(f, "KB"),
|
||||
SizeUnit::MByte => write!(f, "MB"),
|
||||
SizeUnit::GByte => write!(f, "GB"),
|
||||
SizeUnit::TByte => write!(f, "TB"),
|
||||
SizeUnit::PByte => write!(f, "PB"),
|
||||
// IEC (base 2)
|
||||
SizeUnit::Kibi => write!(f, "KiB"),
|
||||
SizeUnit::Mebi => write!(f, "MiB"),
|
||||
SizeUnit::Gibi => write!(f, "GiB"),
|
||||
SizeUnit::Tebi => write!(f, "TiB"),
|
||||
SizeUnit::Pebi => write!(f, "PiB"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Strips a trailing SizeUnit inclusive trailing whitespace
|
||||
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
|
||||
fn strip_unit(v: &str) -> (&str, SizeUnit) {
|
||||
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
|
||||
|
||||
let (v, binary) = match v.strip_suffix('i') {
|
||||
Some(n) => (n, true),
|
||||
None => (v, false),
|
||||
};
|
||||
|
||||
let mut unit = SizeUnit::Byte;
|
||||
(v.strip_suffix(|c: char| match c {
|
||||
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
|
||||
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
|
||||
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
|
||||
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
|
||||
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
|
||||
// binary (IEC recommended) variants
|
||||
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
|
||||
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
|
||||
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
|
||||
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
|
||||
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
|
||||
_ => false
|
||||
}).unwrap_or(v).trim_end(), unit)
|
||||
}
|
||||
|
||||
/// Byte size which can be displayed in a human friendly way
|
||||
#[derive(Debug, Copy, Clone, UpdaterType)]
|
||||
pub struct HumanByte {
|
||||
/// The siginficant value, it does not includes any factor of the `unit`
|
||||
size: f64,
|
||||
/// The scale/unit of the value
|
||||
unit: SizeUnit,
|
||||
}
|
||||
|
||||
fn verify_human_byte(s: &str) -> Result<(), Error> {
|
||||
match s.parse::<HumanByte>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
|
||||
}
|
||||
}
|
||||
impl ApiType for HumanByte {
|
||||
const API_SCHEMA: Schema = StringSchema::new(
|
||||
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
|
||||
)
|
||||
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
|
||||
.min_length(1)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
}
|
||||
|
||||
impl HumanByte {
|
||||
/// Create instance with size and unit (size must be positive)
|
||||
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
|
||||
if size < 0.0 {
|
||||
bail!("byte size may not be negative");
|
||||
}
|
||||
Ok(HumanByte { size, unit })
|
||||
}
|
||||
|
||||
/// Create a new instance with optimal binary unit computed
|
||||
pub fn new_binary(size: f64) -> Self {
|
||||
let unit = SizeUnit::auto_scale(size, true);
|
||||
HumanByte { size: size / unit.factor(), unit }
|
||||
}
|
||||
|
||||
/// Create a new instance with optimal decimal unit computed
|
||||
pub fn new_decimal(size: f64) -> Self {
|
||||
let unit = SizeUnit::auto_scale(size, false);
|
||||
HumanByte { size: size / unit.factor(), unit }
|
||||
}
|
||||
|
||||
/// Returns the size as u64 number of bytes
|
||||
pub fn as_u64(&self) -> u64 {
|
||||
self.as_f64() as u64
|
||||
}
|
||||
|
||||
/// Returns the size as f64 number of bytes
|
||||
pub fn as_f64(&self) -> f64 {
|
||||
self.size * self.unit.factor()
|
||||
}
|
||||
|
||||
/// Returns a copy with optimal binary unit computed
|
||||
pub fn auto_scale_binary(self) -> Self {
|
||||
HumanByte::new_binary(self.as_f64())
|
||||
}
|
||||
|
||||
/// Returns a copy with optimal decimal unit computed
|
||||
pub fn auto_scale_decimal(self) -> Self {
|
||||
HumanByte::new_decimal(self.as_f64())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for HumanByte {
|
||||
fn from(v: u64) -> Self {
|
||||
HumanByte::new_binary(v as f64)
|
||||
}
|
||||
}
|
||||
impl From<usize> for HumanByte {
|
||||
fn from(v: usize) -> Self {
|
||||
HumanByte::new_binary(v as f64)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HumanByte {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let precision = f.precision().unwrap_or(3) as f64;
|
||||
let precision_factor = 1.0 * 10.0_f64.powf(precision);
|
||||
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
|
||||
let size = ((self.size * precision_factor).round()) / precision_factor;
|
||||
write!(f, "{} {}", size, self.unit)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for HumanByte {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(v: &str) -> Result<Self, Error> {
|
||||
let (v, unit) = strip_unit(v);
|
||||
HumanByte::with_unit(v.parse()?, unit)
|
||||
}
|
||||
}
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(HumanByte);
|
||||
proxmox::forward_serialize_to_display!(HumanByte);
|
||||
|
||||
#[test]
|
||||
fn test_human_byte_parser() -> Result<(), Error> {
|
||||
assert!("-10".parse::<HumanByte>().is_err()); // negative size
|
||||
|
||||
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
|
||||
let h: HumanByte = v.parse()?;
|
||||
|
||||
if h.size != size {
|
||||
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
|
||||
}
|
||||
if h.unit != unit {
|
||||
bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit);
|
||||
}
|
||||
|
||||
let new = h.to_string();
|
||||
if &new != as_str {
|
||||
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
|
||||
match do_test(v, size, unit, as_str) {
|
||||
Ok(_) => true,
|
||||
Err(err) => {
|
||||
eprintln!("{}", err); // makes debugging easier
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
|
||||
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
|
||||
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
|
||||
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
|
||||
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
|
||||
|
||||
let h: HumanByte = "1.2345678".parse()?;
|
||||
assert_eq!(&format!("{:.0}", h), "1 B");
|
||||
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
|
||||
assert_eq!(&format!("{:.1}", h), "1.2 B");
|
||||
assert_eq!(&format!("{:.2}", h), "1.23 B");
|
||||
assert_eq!(&format!("{:.3}", h), "1.235 B");
|
||||
assert_eq!(&format!("{:.4}", h), "1.2346 B");
|
||||
assert_eq!(&format!("{:.5}", h), "1.23457 B");
|
||||
assert_eq!(&format!("{:.6}", h), "1.234568 B");
|
||||
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
|
||||
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
|
||||
|
||||
assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B"));
|
||||
|
||||
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
|
||||
|
||||
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
|
||||
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
|
||||
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
|
||||
|
||||
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
|
||||
|
||||
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
|
||||
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
|
||||
|
||||
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
|
||||
|
||||
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
|
||||
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
|
||||
|
||||
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_human_byte_auto_unit_decimal() {
|
||||
fn convert(b: u64) -> String {
|
||||
HumanByte::new_decimal(b as f64).to_string()
|
||||
}
|
||||
assert_eq!(convert(987), "987 B");
|
||||
assert_eq!(convert(1022), "1.022 KB");
|
||||
assert_eq!(convert(9_000), "9 KB");
|
||||
assert_eq!(convert(1_000), "1 KB");
|
||||
assert_eq!(convert(1_000_000), "1 MB");
|
||||
assert_eq!(convert(1_000_000_000), "1 GB");
|
||||
assert_eq!(convert(1_000_000_000_000), "1 TB");
|
||||
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
|
||||
|
||||
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
|
||||
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
|
||||
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_human_byte_auto_unit_binary() {
|
||||
fn convert(b: u64) -> String {
|
||||
HumanByte::from(b).to_string()
|
||||
}
|
||||
assert_eq!(convert(0), "0 B");
|
||||
assert_eq!(convert(987), "987 B");
|
||||
assert_eq!(convert(1022), "1022 B");
|
||||
assert_eq!(convert(9_000), "8.789 KiB");
|
||||
assert_eq!(convert(10_000_000), "9.537 MiB");
|
||||
assert_eq!(convert(10_000_000_000), "9.313 GiB");
|
||||
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
|
||||
|
||||
assert_eq!(convert(1 << 10), "1 KiB");
|
||||
assert_eq!(convert((1 << 10) * 10), "10 KiB");
|
||||
assert_eq!(convert(1 << 20), "1 MiB");
|
||||
assert_eq!(convert(1 << 30), "1 GiB");
|
||||
assert_eq!(convert(1 << 40), "1 TiB");
|
||||
assert_eq!(convert(1 << 50), "1 PiB");
|
||||
|
||||
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
|
||||
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
|
||||
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
|
||||
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
|
||||
}
|
@ -1,12 +1,16 @@
|
||||
use anyhow::format_err;
|
||||
use std::str::FromStr;
|
||||
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::const_regex;
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||
Userid, Authid, RateLimitConfig,
|
||||
REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
|
||||
BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||
};
|
||||
|
||||
const_regex!{
|
||||
@ -25,31 +29,31 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
||||
.default(true)
|
||||
.default(false)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
@ -244,6 +248,10 @@ pub struct VerificationJobStatus {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||
@ -262,6 +270,8 @@ pub struct TapeBackupJobSetup {
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -319,6 +329,57 @@ pub struct TapeBackupJobStatus {
|
||||
pub next_media_label: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
||||
pub enum GroupFilter {
|
||||
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
||||
BackupType(String),
|
||||
/// Full identifier of BackupGroup, including type
|
||||
Group(String),
|
||||
/// A regular expression matched against the full identifier of the BackupGroup
|
||||
Regex(Regex),
|
||||
}
|
||||
|
||||
impl std::str::FromStr for GroupFilter {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.split_once(":") {
|
||||
Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())),
|
||||
Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())),
|
||||
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
|
||||
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
|
||||
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
|
||||
}.map_err(|err| format_err!("'{}' - {}", s, err))
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for GroupFilter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
||||
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
|
||||
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(GroupFilter);
|
||||
proxmox::forward_serialize_to_display!(GroupFilter);
|
||||
|
||||
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
||||
GroupFilter::from_str(input).map(|_| ())
|
||||
}
|
||||
|
||||
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
||||
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
|
||||
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
||||
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
||||
.schema();
|
||||
|
||||
pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
@ -345,10 +406,17 @@ pub struct TapeBackupJobStatus {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||
@ -368,6 +436,10 @@ pub struct SyncJobConfig {
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox_schema::api;
|
||||
|
||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
|
@ -3,10 +3,11 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use anyhow::bail;
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
|
||||
};
|
||||
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
||||
use proxmox_time::parse_daily_duration;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[macro_export]
|
||||
@ -38,6 +39,9 @@ pub use acl::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod human_byte;
|
||||
pub use human_byte::HumanByte;
|
||||
|
||||
mod jobs;
|
||||
pub use jobs::*;
|
||||
|
||||
@ -60,20 +64,25 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN
|
||||
mod user;
|
||||
pub use user::*;
|
||||
|
||||
pub mod upid;
|
||||
pub use upid::*;
|
||||
pub use proxmox_schema::upid::*;
|
||||
|
||||
mod crypto;
|
||||
pub use crypto::{CryptMode, Fingerprint};
|
||||
pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint};
|
||||
|
||||
pub mod file_restore;
|
||||
|
||||
mod openid;
|
||||
pub use openid::*;
|
||||
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
|
||||
mod tape;
|
||||
pub use tape::*;
|
||||
|
||||
mod traffic_control;
|
||||
pub use traffic_control::*;
|
||||
|
||||
mod zfs;
|
||||
pub use zfs::*;
|
||||
|
||||
@ -153,6 +162,9 @@ pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_
|
||||
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
|
||||
|
||||
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
|
||||
|
||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||
|
||||
@ -359,33 +371,6 @@ pub struct APTUpdateInfo {
|
||||
pub extra_info: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[repr(u64)]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum RRDTimeFrameResolution {
|
||||
/// 1 min => last 70 minutes
|
||||
Hour = 60,
|
||||
/// 30 min => last 35 hours
|
||||
Day = 60*30,
|
||||
/// 3 hours => about 8 days
|
||||
Week = 60*180,
|
||||
/// 12 hours => last 35 days
|
||||
Month = 60*720,
|
||||
/// 1 week => last 490 days
|
||||
Year = 60*10080,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
@ -397,3 +382,87 @@ pub enum NodePowerCommand {
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID::API_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Task properties.
|
||||
pub struct TaskListItem {
|
||||
pub upid: String,
|
||||
/// The node name where the task is running on.
|
||||
pub node: String,
|
||||
/// The Unix PID
|
||||
pub pid: i64,
|
||||
/// The task start time (Epoch)
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: String,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
/// Task end status
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
optional: false,
|
||||
schema: &ArraySchema::new(
|
||||
"A list of tasks.",
|
||||
&TaskListItem::API_SCHEMA,
|
||||
).schema(),
|
||||
};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
/// RRD consolidation mode
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// RRD time frame
|
||||
pub enum RRDTimeFrame {
|
||||
/// Hour
|
||||
Hour,
|
||||
/// Day
|
||||
Day,
|
||||
/// Week
|
||||
Week,
|
||||
/// Month
|
||||
Month,
|
||||
/// Year
|
||||
Year,
|
||||
/// Decade (10 years)
|
||||
Decade,
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_REGEX,
|
||||
|
121
pbs-api-types/src/openid.rs
Normal file
121
pbs-api-types/src/openid.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{
|
||||
api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use super::{
|
||||
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const OPENID_SCOPE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
|
||||
.format(&OPENID_SCOPE_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile";
|
||||
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
|
||||
.format(&OPENID_SCOPE_LIST_FORMAT)
|
||||
.default(OPENID_DEFAILT_SCOPE_LIST)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.")
|
||||
.format(&OPENID_SCOPE_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
|
||||
.format(&OPENID_ACR_LIST_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
|
||||
"Use the value of this attribute/claim as unique user name. It \
|
||||
is up to the identity provider to guarantee the uniqueness. The \
|
||||
OpenID specification only guarantees that Subject ('sub') is \
|
||||
unique. Also make sure that the user is not allowed to change that \
|
||||
attribute by himself!")
|
||||
.max_length(64)
|
||||
.min_length(1)
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT) .schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"client-key": {
|
||||
optional: true,
|
||||
},
|
||||
"scopes": {
|
||||
schema: OPENID_SCOPE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"acr-values": {
|
||||
schema: OPENID_ACR_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
prompt: {
|
||||
description: "OpenID Prompt",
|
||||
type: String,
|
||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
autocreate: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"username-claim": {
|
||||
schema: OPENID_USERNAME_CLAIM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// OpenID configuration properties.
|
||||
pub struct OpenIdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// OpenID Issuer Url
|
||||
pub issuer_url: String,
|
||||
/// OpenID Client ID
|
||||
pub client_id: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub scopes: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub acr_values: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prompt: Option<String>,
|
||||
/// OpenID Client Key
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub client_key: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Automatically create users if they do not exist.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub autocreate: Option<bool>,
|
||||
#[updater(skip)]
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub username_claim: Option<String>,
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox_schema::*;
|
||||
|
||||
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
|
@ -2,22 +2,11 @@
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::{
|
||||
Schema,
|
||||
ApiStringFormat,
|
||||
ArraySchema,
|
||||
IntegerSchema,
|
||||
StringSchema,
|
||||
Updater,
|
||||
},
|
||||
use proxmox_schema::{
|
||||
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT,
|
||||
OptionalDeviceIdentification,
|
||||
};
|
||||
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
@ -25,9 +14,8 @@ pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifi
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema = StringSchema::new(
|
||||
"Path to Linux generic SCSI device (e.g. '/dev/sg4')")
|
||||
.schema();
|
||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
||||
|
||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
@ -36,16 +24,18 @@ pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||
.schema();
|
||||
|
||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Slot list.", &IntegerSchema::new("Slot number")
|
||||
.minimum(1)
|
||||
.schema())
|
||||
"Slot list.",
|
||||
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
||||
)
|
||||
.schema();
|
||||
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new("\
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"\
|
||||
A list of slot numbers, comma separated. Those slots are reserved for
|
||||
Import/Export, i.e. any media in those slots are considered to be
|
||||
'offline'.
|
||||
")
|
||||
",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
|
@ -4,10 +4,7 @@ use std::convert::TryFrom;
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::{Schema, IntegerSchema, StringSchema, Updater},
|
||||
};
|
||||
use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT,
|
||||
|
@ -1,9 +1,7 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::{
|
||||
api::{api, schema::*},
|
||||
tools::Uuid,
|
||||
};
|
||||
use proxmox_schema::*;
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
UUID_FORMAT,
|
||||
|
@ -1,18 +1,8 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::{
|
||||
schema::{
|
||||
Schema,
|
||||
StringSchema,
|
||||
ApiStringFormat,
|
||||
parse_simple_value,
|
||||
},
|
||||
};
|
||||
use proxmox_schema::{parse_simple_value, ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT,
|
||||
CHANGER_NAME_SCHEMA,
|
||||
};
|
||||
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
@ -35,9 +25,10 @@ pub enum MediaLocation {
|
||||
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
||||
proxmox::forward_serialize_to_display!(MediaLocation);
|
||||
|
||||
impl proxmox::api::schema::ApiType for MediaLocation {
|
||||
impl proxmox_schema::ApiType for MediaLocation {
|
||||
const API_SCHEMA: Schema = StringSchema::new(
|
||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
||||
)
|
||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||
let location: MediaLocation = text.parse()?;
|
||||
match location {
|
||||
@ -54,9 +45,7 @@ impl proxmox::api::schema::ApiType for MediaLocation {
|
||||
.schema();
|
||||
}
|
||||
|
||||
|
||||
impl std::fmt::Display for MediaLocation {
|
||||
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
MediaLocation::Offline => {
|
||||
|
@ -9,12 +9,9 @@ use std::str::FromStr;
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::{Schema, StringSchema, ApiStringFormat, Updater},
|
||||
};
|
||||
use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater};
|
||||
|
||||
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
||||
use proxmox_time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT,
|
||||
|
@ -1,6 +1,6 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
/// Media status
|
||||
|
@ -22,13 +22,10 @@ pub use media_location::*;
|
||||
mod media;
|
||||
pub use media::*;
|
||||
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat};
|
||||
use proxmox::tools::Uuid;
|
||||
|
||||
use proxmox::const_regex;
|
||||
use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||
|
122
pbs-api-types/src/traffic_control.rs
Normal file
122
pbs-api-types/src/traffic_control.rs
Normal file
@ -0,0 +1,122 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT,
|
||||
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new(
|
||||
"Timeframe to specify when the rule is actice.")
|
||||
.format(&DAILY_DURATION_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Rate limit (for Token bucket filter) in bytes/second.")
|
||||
.minimum(100_000)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Size of the token bucket (for Token bucket filter) in bytes.")
|
||||
.minimum(1000)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"rate-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"rate-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize,Deserialize,Default,Clone,Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Rate Limit Configuration
|
||||
pub struct RateLimitConfig {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub rate_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub burst_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub rate_out: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub burst_out: Option<HumanByte>,
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
|
||||
Self {
|
||||
rate_in: rate,
|
||||
burst_in: burst,
|
||||
rate_out: rate,
|
||||
burst_out: burst,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: TRAFFIC_CONTROL_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
network: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: CIDR_SCHEMA,
|
||||
},
|
||||
},
|
||||
timeframe: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize,Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule
|
||||
pub struct TrafficControlRule {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Rule applies to Source IPs within this networks
|
||||
pub network: Vec<String>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
// fixme: expose this?
|
||||
// /// Bandwidth is shared accross all connections
|
||||
// #[serde(skip_serializing_if="Option::is_none")]
|
||||
// pub shared: Option<bool>,
|
||||
/// Enable the rule at specific times
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub timeframe: Option<Vec<String>>,
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
use crate::Authid;
|
||||
|
||||
/// Unique Process/Task Identifier
|
||||
///
|
||||
/// We use this to uniquely identify worker task. UPIDs have a short
|
||||
/// string repesentaion, which gives additional information about the
|
||||
/// type of the task. for example:
|
||||
/// ```text
|
||||
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}:
|
||||
/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam:
|
||||
/// ```
|
||||
/// Please note that we use tokio, so a single thread can run multiple
|
||||
/// tasks.
|
||||
// #[api] - manually implemented API type
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UPID {
|
||||
/// The Unix PID
|
||||
pub pid: libc::pid_t,
|
||||
/// The Unix process start time from `/proc/pid/stat`
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// The task ID (inside the process/thread)
|
||||
pub task_id: usize,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub auth_id: Authid,
|
||||
/// The node name.
|
||||
pub node: String,
|
||||
}
|
||||
|
||||
proxmox::forward_serialize_to_display!(UPID);
|
||||
proxmox::forward_deserialize_to_from_str!(UPID);
|
||||
|
||||
const_regex! {
|
||||
pub PROXMOX_UPID_REGEX = concat!(
|
||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
||||
);
|
||||
}
|
||||
|
||||
pub const PROXMOX_UPID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX);
|
||||
|
||||
pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier")
|
||||
.min_length("UPID:N:12345678:12345678:12345678:::".len())
|
||||
.max_length(128) // arbitrary
|
||||
.format(&PROXMOX_UPID_FORMAT)
|
||||
.schema();
|
||||
|
||||
impl ApiType for UPID {
|
||||
const API_SCHEMA: Schema = UPID_SCHEMA;
|
||||
}
|
||||
|
||||
impl UPID {
|
||||
/// Create a new UPID
|
||||
pub fn new(
|
||||
worker_type: &str,
|
||||
worker_id: Option<String>,
|
||||
auth_id: Authid,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let pid = unsafe { libc::getpid() };
|
||||
|
||||
let bad: &[_] = &['/', ':', ' '];
|
||||
|
||||
if worker_type.contains(bad) {
|
||||
bail!("illegal characters in worker type '{}'", worker_type);
|
||||
}
|
||||
|
||||
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
Ok(UPID {
|
||||
pid,
|
||||
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
||||
starttime: proxmox::tools::time::epoch_i64(),
|
||||
task_id,
|
||||
worker_type: worker_type.to_owned(),
|
||||
worker_id,
|
||||
auth_id,
|
||||
node: proxmox::tools::nodename().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl std::str::FromStr for UPID {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||
|
||||
let worker_id = if cap["wid"].is_empty() {
|
||||
None
|
||||
} else {
|
||||
let wid = proxmox_systemd::unescape_unit(&cap["wid"])?;
|
||||
Some(wid)
|
||||
};
|
||||
|
||||
Ok(UPID {
|
||||
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||
worker_type: cap["wtype"].to_string(),
|
||||
worker_id,
|
||||
auth_id: cap["authid"].parse()?,
|
||||
node: cap["node"].to_string(),
|
||||
})
|
||||
} else {
|
||||
bail!("unable to parse UPID '{}'", s);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for UPID {
|
||||
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
|
||||
let wid = if let Some(ref id) = self.worker_id {
|
||||
proxmox_systemd::escape_unit(id, false)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
||||
// more that 8 characters for pstart
|
||||
|
||||
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID::API_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Task properties.
|
||||
pub struct TaskListItem {
|
||||
pub upid: String,
|
||||
/// The node name where the task is running on.
|
||||
pub node: String,
|
||||
/// The Unix PID
|
||||
pub pid: i64,
|
||||
/// The task start time (Epoch)
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: Authid,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
/// Task end status
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
optional: false,
|
||||
schema: &ArraySchema::new(
|
||||
"A list of tasks.",
|
||||
&TaskListItem::API_SCHEMA,
|
||||
).schema(),
|
||||
};
|
||||
|
@ -1,8 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{
|
||||
BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
use proxmox_schema::{
|
||||
api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
@ -133,7 +132,7 @@ impl ApiToken {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
@ -198,7 +197,7 @@ impl User {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
|
@ -29,9 +29,9 @@ use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
||||
use proxmox::const_regex;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType,
|
||||
};
|
||||
|
||||
// we only allow a limited set of characters
|
||||
// colon is not allowed, because we store usernames in
|
||||
|
@ -1,8 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
|
||||
use proxmox::const_regex;
|
||||
use proxmox_schema::*;
|
||||
|
||||
const_regex! {
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "pbs-buildcfg"
|
||||
version = "2.0.10"
|
||||
version = "2.1.2"
|
||||
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||
edition = "2018"
|
||||
description = "macros used for pbs related paths such as configdir and rundir"
|
||||
|
@ -22,6 +22,9 @@ pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_BACKUP_STATE_DIR_M { () => ("/var/lib/proxmox-backup") }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||
|
||||
@ -36,6 +39,9 @@ macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
|
||||
/// namespaced directory for in-memory (tmpfs) run state
|
||||
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
||||
|
||||
/// namespaced directory for persistent state
|
||||
pub const PROXMOX_BACKUP_STATE_DIR: &str = PROXMOX_BACKUP_STATE_DIR_M!();
|
||||
|
||||
/// namespaced directory for persistent logging
|
||||
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
|
||||
|
||||
|
@ -28,13 +28,18 @@ tower-service = "0.3.0"
|
||||
xdg = "2.2"
|
||||
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
||||
proxmox = "0.15.3"
|
||||
proxmox-async = "0.2"
|
||||
proxmox-fuse = "0.1.1"
|
||||
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
|
||||
proxmox-io = { version = "1", features = [ "tokio" ] }
|
||||
proxmox-lang = "1"
|
||||
proxmox-router = { version = "1.1", features = [ "cli" ] }
|
||||
proxmox-schema = "1"
|
||||
proxmox-time = "1"
|
||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||
|
||||
pbs-api-types = { path = "../pbs-api-types" }
|
||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||
pbs-datastore = { path = "../pbs-datastore" }
|
||||
pbs-runtime = { path = "../pbs-runtime" }
|
||||
pbs-tools = { path = "../pbs-tools" }
|
||||
|
@ -1,8 +1,8 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox_schema::*;
|
||||
|
||||
proxmox::const_regex! {
|
||||
const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
|
@ -14,8 +14,8 @@ use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
|
||||
use pbs_api_types::HumanByte;
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
use pbs_tools::format::HumanByte;
|
||||
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
@ -338,7 +338,7 @@ impl BackupWriter {
|
||||
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||
let size: HumanByte = upload_stats.size.into();
|
||||
let archive = if self.verbose {
|
||||
archive_name.to_string()
|
||||
archive_name
|
||||
} else {
|
||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||
};
|
||||
|
@ -3,6 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::future::Future;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::ops::ControlFlow;
|
||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
@ -13,14 +14,13 @@ use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
||||
use proxmox_schema::api;
|
||||
use pxar::{EntryKind, Metadata};
|
||||
|
||||
use pbs_runtime::block_in_place;
|
||||
use proxmox_async::runtime::block_in_place;
|
||||
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||
use pbs_tools::ops::ControlFlow;
|
||||
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||
@ -79,13 +79,13 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
||||
"restore-selected",
|
||||
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
|
||||
.arg_param(&["target"])
|
||||
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||
.completion_cb("target", cli::complete_file_name),
|
||||
)
|
||||
.insert(
|
||||
"restore",
|
||||
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
|
||||
.arg_param(&["target"])
|
||||
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||
.completion_cb("target", cli::complete_file_name),
|
||||
)
|
||||
.insert(
|
||||
"find",
|
||||
@ -1100,7 +1100,7 @@ impl<'a> ExtractorState<'a> {
|
||||
|
||||
self.extractor.leave_directory()?;
|
||||
|
||||
Ok(ControlFlow::CONTINUE)
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
|
||||
async fn handle_new_directory(
|
||||
|
@ -1,230 +0,0 @@
|
||||
use std::io::{self, Seek, SeekFrom};
|
||||
use std::ops::Range;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::read_chunk::ReadChunk;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_tools::lru_cache::LruCache;
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
store: S,
|
||||
index: DynamicIndexReader,
|
||||
archive_size: u64,
|
||||
read_buffer: Vec<u8>,
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
store: &'a mut S,
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
archive_size,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
buffered_chunk_idx: 0,
|
||||
buffered_chunk_start: 0,
|
||||
read_offset: 0,
|
||||
lru_cache: LruCache::new(32),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn archive_size(&self) -> u64 {
|
||||
self.archive_size
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
index: &self.index,
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> pbs_tools::io::BufferedRead for BufferedDynamicReader<S> {
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
||||
if offset == self.archive_size {
|
||||
return Ok(&self.read_buffer[0..0]);
|
||||
}
|
||||
|
||||
let buffer_len = self.read_buffer.len();
|
||||
let index = &self.index;
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index().len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
let next_end = index.chunk_end(next_idx);
|
||||
if offset < next_end {
|
||||
self.buffer_chunk(next_idx)?;
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
return Ok(&self.read_buffer[buffer_offset..]);
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer_len == 0)
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index().len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
}
|
||||
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
Ok(&self.read_buffer[buffer_offset..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
use pbs_tools::io::BufferedRead;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
let data = match self.buffered_read(self.read_offset) {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
||||
};
|
||||
|
||||
let n = if data.len() > buf.len() {
|
||||
buf.len()
|
||||
} else {
|
||||
data.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
||||
let new_offset = match pos {
|
||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
||||
};
|
||||
|
||||
use std::io::{Error, ErrorKind};
|
||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!(
|
||||
"seek is out of range {} ([0..{}])",
|
||||
new_offset, self.archive_size
|
||||
),
|
||||
));
|
||||
}
|
||||
self.read_offset = new_offset as u64;
|
||||
|
||||
Ok(self.read_offset)
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
@ -15,16 +15,16 @@ use percent_encoding::percent_encode;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use proxmox::{
|
||||
api::error::HttpError,
|
||||
sys::linux::tty,
|
||||
tools::fs::{file_get_json, replace_file, CreateOptions},
|
||||
};
|
||||
use proxmox_router::HttpError;
|
||||
|
||||
use proxmox_http::client::HttpsConnector;
|
||||
use proxmox_http::client::{HttpsConnector, RateLimiter};
|
||||
use proxmox_http::uri::build_authority;
|
||||
use proxmox_async::broadcast_future::BroadcastFuture;
|
||||
|
||||
use pbs_api_types::{Authid, Userid};
|
||||
use pbs_tools::broadcast_future::BroadcastFuture;
|
||||
use pbs_api_types::{Authid, Userid, RateLimitConfig};
|
||||
use pbs_tools::json::json_object_to_query;
|
||||
use pbs_tools::ticket;
|
||||
use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
|
||||
@ -51,6 +51,7 @@ pub struct HttpClientOptions {
|
||||
ticket_cache: bool,
|
||||
fingerprint_cache: bool,
|
||||
verify_cert: bool,
|
||||
limit: RateLimitConfig,
|
||||
}
|
||||
|
||||
impl HttpClientOptions {
|
||||
@ -109,6 +110,11 @@ impl HttpClientOptions {
|
||||
self.verify_cert = verify_cert;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn rate_limit(mut self, rate_limit: RateLimitConfig) -> Self {
|
||||
self.limit = rate_limit;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HttpClientOptions {
|
||||
@ -121,6 +127,7 @@ impl Default for HttpClientOptions {
|
||||
ticket_cache: false,
|
||||
fingerprint_cache: false,
|
||||
verify_cert: true,
|
||||
limit: RateLimitConfig::default(), // unlimited
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -153,7 +160,7 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Resu
|
||||
map.remove(username.as_str());
|
||||
}
|
||||
|
||||
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
|
||||
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -195,7 +202,7 @@ fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<()
|
||||
result.push_str(fingerprint);
|
||||
result.push('\n');
|
||||
|
||||
replace_file(path, result.as_bytes(), CreateOptions::new())?;
|
||||
replace_file(path, result.as_bytes(), CreateOptions::new(), false)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -230,7 +237,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
|
||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let now = proxmox_time::epoch_i64();
|
||||
|
||||
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
||||
|
||||
@ -250,7 +257,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
}
|
||||
}
|
||||
|
||||
replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
|
||||
replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -261,7 +268,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
||||
// usually /run/user/<uid>/...
|
||||
let path = base.place_runtime_file("tickets").ok()?;
|
||||
let data = file_get_json(&path, None).ok()?;
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let now = proxmox_time::epoch_i64();
|
||||
let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
|
||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||
@ -343,7 +350,21 @@ impl HttpClient {
|
||||
httpc.enforce_http(false); // we want https...
|
||||
|
||||
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
|
||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
let mut https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
|
||||
if let Some(rate_in) = options.limit.rate_in {
|
||||
let burst_in = options.limit.burst_in.unwrap_or_else(|| rate_in).as_u64();
|
||||
https.set_read_limiter(Some(Arc::new(Mutex::new(
|
||||
RateLimiter::new(rate_in.as_u64(), burst_in)
|
||||
))));
|
||||
}
|
||||
|
||||
if let Some(rate_out) = options.limit.rate_out {
|
||||
let burst_out = options.limit.burst_out.unwrap_or_else(|| rate_out).as_u64();
|
||||
https.set_write_limiter(Some(Arc::new(Mutex::new(
|
||||
RateLimiter::new(rate_out.as_u64(), burst_out)
|
||||
))));
|
||||
}
|
||||
|
||||
let client = Client::builder()
|
||||
//.http2_initial_stream_window_size( (1 << 31) - 2)
|
||||
|
@ -3,15 +3,7 @@
|
||||
//! This library implements the client side to access the backups
|
||||
//! server using https.
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use pbs_api_types::{Authid, Userid};
|
||||
use pbs_tools::ticket::Ticket;
|
||||
use pbs_tools::cert::CertInfo;
|
||||
use pbs_tools::auth::private_auth_key;
|
||||
|
||||
pub mod catalog_shell;
|
||||
pub mod dynamic_index;
|
||||
pub mod pxar;
|
||||
pub mod tools;
|
||||
|
||||
@ -49,26 +41,3 @@ mod chunk_stream;
|
||||
pub use chunk_stream::{ChunkStream, FixedChunkStream};
|
||||
|
||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||
|
||||
/// Connect to localhost:8007 as root@pam
|
||||
///
|
||||
/// This automatically creates a ticket if run as 'root' user.
|
||||
pub fn connect_to_localhost() -> Result<HttpClient, Error> {
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
|
||||
let client = if uid.is_root() {
|
||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||
.sign(private_auth_key(), None)?;
|
||||
let fingerprint = CertInfo::new()?.fingerprint()?;
|
||||
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
|
||||
|
||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
||||
} else {
|
||||
let options = HttpClientOptions::new_interactive(None, None);
|
||||
|
||||
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
|
||||
};
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
@ -19,11 +19,11 @@ use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
|
||||
use pxar::Metadata;
|
||||
use pxar::encoder::{SeqWrite, LinkOffset};
|
||||
|
||||
use proxmox::c_str;
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::RawFdNum;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::fd::Fd;
|
||||
use proxmox_io::vec;
|
||||
use proxmox_lang::c_str;
|
||||
|
||||
use pbs_datastore::catalog::BackupCatalogWriter;
|
||||
use pbs_tools::{acl, fs, xattr};
|
||||
|
@ -22,12 +22,10 @@ use pxar::format::Device;
|
||||
use pxar::{Entry, EntryKind, Metadata};
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::tools::{
|
||||
fs::{create_path, CreateOptions},
|
||||
io::{sparse_copy, sparse_copy_async},
|
||||
};
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
use proxmox_io::{sparse_copy, sparse_copy_async};
|
||||
|
||||
use pbs_tools::zip::{ZipEncoder, ZipEntry};
|
||||
use proxmox_async::zip::{ZipEncoder, ZipEntry};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::metadata;
|
||||
|
@ -20,7 +20,7 @@ use futures::select;
|
||||
use futures::sink::SinkExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
|
||||
use proxmox::tools::vec;
|
||||
use proxmox_io::vec;
|
||||
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
|
||||
|
||||
use proxmox_fuse::requests::{self, FuseRequest};
|
||||
@ -344,7 +344,7 @@ impl SessionImpl {
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::Getattr(request) => match self.getattr(request.inode).await {
|
||||
Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
|
||||
Ok(stat) => request.reply(&stat, f64::MAX).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
|
||||
@ -539,7 +539,7 @@ impl SessionImpl {
|
||||
let file = file?.decode_entry().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = file.file_name();
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
@ -551,7 +551,7 @@ impl SessionImpl {
|
||||
let file = dir.lookup_self().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = OsStr::new(".");
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
@ -565,7 +565,7 @@ impl SessionImpl {
|
||||
let file = parent_dir.lookup_self().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = OsStr::new("..");
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ fn mode_string(entry: &Entry) -> String {
|
||||
}
|
||||
|
||||
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||
if let Ok(s) = proxmox_time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||
return s;
|
||||
}
|
||||
format!("{}.{}", mtime.secs, mtime.nanos)
|
||||
|
@ -12,9 +12,10 @@ use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use proxmox_async::blocking::TokioWriterAdapter;
|
||||
|
||||
use pbs_datastore::catalog::CatalogWriter;
|
||||
use pbs_tools::sync::StdChannelWriter;
|
||||
use pbs_tools::tokio::TokioWriterAdapter;
|
||||
|
||||
/// Stream implementation to encode and upload .pxar archives.
|
||||
///
|
||||
@ -111,7 +112,7 @@ impl Stream for PxarBackupStream {
|
||||
}
|
||||
}
|
||||
|
||||
match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
||||
match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
||||
Ok(data) => Poll::Ready(Some(data)),
|
||||
Err(_) => {
|
||||
let error = self.error.lock().unwrap();
|
||||
|
@ -5,12 +5,13 @@ use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_async::runtime::block_on;
|
||||
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
use pbs_api_types::CryptMode;
|
||||
use pbs_datastore::data_blob::DataBlob;
|
||||
use pbs_datastore::read_chunk::ReadChunk;
|
||||
use pbs_datastore::read_chunk::AsyncReadChunk;
|
||||
use pbs_runtime::block_on;
|
||||
|
||||
use super::BackupReader;
|
||||
|
||||
|
@ -5,7 +5,7 @@ use serde_json::{json, Value};
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use futures::*;
|
||||
|
||||
use proxmox::api::cli::format_and_print_result;
|
||||
use proxmox_router::cli::format_and_print_result;
|
||||
|
||||
use pbs_tools::percent_encoding::percent_encode_component;
|
||||
|
||||
|
@ -6,9 +6,9 @@ use std::io::Read;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::sys::linux::tty;
|
||||
use proxmox::tools::fs::file_get_contents;
|
||||
use proxmox_schema::*;
|
||||
|
||||
use pbs_api_types::CryptMode;
|
||||
|
||||
@ -440,8 +440,8 @@ fn test_crypto_parameters_handling() -> Result<(), Error> {
|
||||
mode: CryptMode::SignOnly,
|
||||
};
|
||||
|
||||
replace_file(&keypath, &some_key, CreateOptions::default())?;
|
||||
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
|
||||
replace_file(&keypath, &some_key, CreateOptions::default(), false)?;
|
||||
replace_file(&master_keypath, &some_master_key, CreateOptions::default(), false)?;
|
||||
|
||||
// no params, no default key == no key
|
||||
let res = crypto_parameters(&json!({}));
|
||||
|
@ -10,13 +10,11 @@ use anyhow::{bail, format_err, Context, Error};
|
||||
use serde_json::{json, Value};
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use proxmox::{
|
||||
api::schema::*,
|
||||
api::cli::shellword_split,
|
||||
tools::fs::file_get_json,
|
||||
};
|
||||
use proxmox_schema::*;
|
||||
use proxmox_router::cli::{complete_file_name, shellword_split};
|
||||
use proxmox::tools::fs::file_get_json;
|
||||
|
||||
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
|
||||
use pbs_api_types::{BACKUP_REPO_URL, Authid, RateLimitConfig, UserWithTokens};
|
||||
use pbs_datastore::BackupDir;
|
||||
use pbs_tools::json::json_object_to_query;
|
||||
|
||||
@ -137,15 +135,30 @@ pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<Ba
|
||||
}
|
||||
|
||||
pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
|
||||
connect_do(repo.host(), repo.port(), repo.auth_id())
|
||||
let rate_limit = RateLimitConfig::default(); // unlimited
|
||||
connect_do(repo.host(), repo.port(), repo.auth_id(), rate_limit)
|
||||
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
|
||||
}
|
||||
|
||||
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
|
||||
pub fn connect_rate_limited(
|
||||
repo: &BackupRepository,
|
||||
rate_limit: RateLimitConfig,
|
||||
) -> Result<HttpClient, Error> {
|
||||
connect_do(repo.host(), repo.port(), repo.auth_id(), rate_limit)
|
||||
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
|
||||
}
|
||||
|
||||
fn connect_do(
|
||||
server: &str,
|
||||
port: u16,
|
||||
auth_id: &Authid,
|
||||
rate_limit: RateLimitConfig,
|
||||
) -> Result<HttpClient, Error> {
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
|
||||
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
|
||||
let options = HttpClientOptions::new_interactive(password, fingerprint);
|
||||
let options = HttpClientOptions::new_interactive(password, fingerprint)
|
||||
.rate_limit(rate_limit);
|
||||
|
||||
HttpClient::new(server, port, auth_id, options)
|
||||
}
|
||||
@ -179,7 +192,7 @@ pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||
}
|
||||
|
||||
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
pbs_runtime::main(async { complete_backup_group_do(param).await })
|
||||
proxmox_async::runtime::main(async { complete_backup_group_do(param).await })
|
||||
}
|
||||
|
||||
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||
@ -209,7 +222,7 @@ pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<St
|
||||
}
|
||||
|
||||
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
||||
proxmox_async::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
||||
}
|
||||
|
||||
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
@ -228,7 +241,7 @@ pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, St
|
||||
}
|
||||
|
||||
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
|
||||
proxmox_async::runtime::main(async { complete_backup_snapshot_do(param).await })
|
||||
}
|
||||
|
||||
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||
@ -260,7 +273,7 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
|
||||
}
|
||||
|
||||
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
pbs_runtime::main(async { complete_server_file_name_do(param).await })
|
||||
proxmox_async::runtime::main(async { complete_server_file_name_do(param).await })
|
||||
}
|
||||
|
||||
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||
@ -306,7 +319,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
||||
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_server_file_name(arg, param)
|
||||
.iter()
|
||||
.map(|v| pbs_tools::format::strip_server_file_extension(&v))
|
||||
.map(|v| pbs_tools::format::strip_server_file_extension(&v).to_owned())
|
||||
.collect()
|
||||
}
|
||||
|
||||
@ -315,7 +328,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
if name.ends_with(".pxar.didx") {
|
||||
Some(pbs_tools::format::strip_server_file_extension(name))
|
||||
Some(pbs_tools::format::strip_server_file_extension(name).to_owned())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -328,7 +341,7 @@ pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
if name.ends_with(".img.fidx") {
|
||||
Some(pbs_tools::format::strip_server_file_extension(name))
|
||||
Some(pbs_tools::format::strip_server_file_extension(name).to_owned())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -351,7 +364,7 @@ pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
||||
}
|
||||
|
||||
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
pbs_runtime::main(async { complete_auth_id_do(param).await })
|
||||
proxmox_async::runtime::main(async { complete_auth_id_do(param).await })
|
||||
}
|
||||
|
||||
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
|
||||
@ -413,7 +426,7 @@ pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec
|
||||
return result;
|
||||
}
|
||||
|
||||
let files = pbs_tools::fs::complete_file_name(data[1], param);
|
||||
let files = complete_file_name(data[1], param);
|
||||
|
||||
for file in files {
|
||||
result.push(format!("{}:{}", data[0], file));
|
||||
|
@ -13,7 +13,7 @@ use serde_json::Value;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||
use tokio::net::UnixStream;
|
||||
|
||||
use proxmox::api::error::HttpError;
|
||||
use proxmox_router::HttpError;
|
||||
|
||||
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
||||
|
||||
|
@ -6,17 +6,25 @@ edition = "2018"
|
||||
description = "Configuration file management for PBS"
|
||||
|
||||
[dependencies]
|
||||
libc = "0.2"
|
||||
anyhow = "1.0"
|
||||
hex = "0.4.3"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
nix = "0.19.1"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
regex = "1.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
openssl = "0.10"
|
||||
nix = "0.19.1"
|
||||
regex = "1.2"
|
||||
once_cell = "1.3.1"
|
||||
|
||||
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
||||
proxmox = "0.15.3"
|
||||
proxmox-lang = "1"
|
||||
proxmox-router = { version = "1.1", default-features = false }
|
||||
proxmox-schema = "1"
|
||||
proxmox-section-config = "1"
|
||||
proxmox-time = "1"
|
||||
proxmox-shared-memory = "0.1.1"
|
||||
proxmox-sys = "0.1.2"
|
||||
|
||||
pbs-api-types = { path = "../pbs-api-types" }
|
||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||
|
@ -8,7 +8,7 @@ use anyhow::{bail, Error};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat, ApiType};
|
||||
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema};
|
||||
|
||||
use pbs_api_types::{Authid, Userid, Role, ROLE_NAME_NO_ACCESS};
|
||||
|
||||
|
@ -3,16 +3,16 @@
|
||||
use std::sync::{RwLock, Arc};
|
||||
|
||||
use anyhow::{Error, bail};
|
||||
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use lazy_static::lazy_static;
|
||||
use proxmox::api::UserInformation;
|
||||
use proxmox::tools::time::epoch_i64;
|
||||
|
||||
use proxmox_router::UserInformation;
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
use proxmox_time::epoch_i64;
|
||||
|
||||
use pbs_api_types::{Authid, Userid, User, ApiToken, ROLE_ADMIN};
|
||||
|
||||
use crate::acl::{AclTree, ROLE_NAMES};
|
||||
use crate::memcom::Memcom;
|
||||
use crate::ConfigVersionCache;
|
||||
|
||||
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
||||
pub struct CachedUserInfo {
|
||||
@ -38,8 +38,8 @@ impl CachedUserInfo {
|
||||
pub fn new() -> Result<Arc<Self>, Error> {
|
||||
let now = epoch_i64();
|
||||
|
||||
let memcom = Memcom::new()?;
|
||||
let user_cache_generation = memcom.user_cache_generation();
|
||||
let version_cache = ConfigVersionCache::new()?;
|
||||
let user_cache_generation = version_cache.user_cache_generation();
|
||||
|
||||
{ // limit scope
|
||||
let cache = CACHED_CONFIG.read().unwrap();
|
||||
|
121
pbs-config/src/config_version_cache.rs
Normal file
121
pbs-config/src/config_version_cache.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use once_cell::sync::OnceCell;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
// openssl::sha::sha256(b"Proxmox Backup ConfigVersionCache v1.0")[0..8];
|
||||
pub const PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0: [u8; 8] = [25, 198, 168, 230, 154, 132, 143, 131];
|
||||
|
||||
const FILE_PATH: &str = pbs_buildcfg::rundir!("/shmem/config-versions");
|
||||
|
||||
use proxmox_shared_memory::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[repr(C)]
|
||||
struct ConfigVersionCacheData {
|
||||
magic: [u8; 8],
|
||||
// User (user.cfg) cache generation/version.
|
||||
user_cache_generation: AtomicUsize,
|
||||
// Traffic control (traffic-control.cfg) generation/version.
|
||||
traffic_control_generation: AtomicUsize,
|
||||
|
||||
// Add further atomics here (and reduce padding size)
|
||||
|
||||
padding: [u8; 4096 - 3*8],
|
||||
}
|
||||
|
||||
|
||||
impl Init for ConfigVersionCacheData {
|
||||
fn initialize(this: &mut MaybeUninit<Self>) {
|
||||
unsafe {
|
||||
let me = &mut *this.as_mut_ptr();
|
||||
me.magic = PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0;
|
||||
}
|
||||
}
|
||||
|
||||
fn check_type_magic(this: &MaybeUninit<Self>) -> Result<(), Error> {
|
||||
unsafe {
|
||||
let me = &*this.as_ptr();
|
||||
if me.magic != PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0 {
|
||||
bail!("ConfigVersionCache: wrong magic number");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct ConfigVersionCache {
|
||||
shmem: SharedMemory<ConfigVersionCacheData>
|
||||
}
|
||||
|
||||
static INSTANCE: OnceCell<Arc< ConfigVersionCache>> = OnceCell::new();
|
||||
|
||||
impl ConfigVersionCache {
|
||||
|
||||
/// Open the memory based communication channel singleton.
|
||||
pub fn new() -> Result<Arc<Self>, Error> {
|
||||
INSTANCE.get_or_try_init(Self::open).map(Arc::clone)
|
||||
}
|
||||
|
||||
// Actual work of `new`:
|
||||
fn open() -> Result<Arc<Self>, Error> {
|
||||
let user = crate::backup_user()?;
|
||||
|
||||
let dir_opts = CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o770))
|
||||
.owner(user.uid)
|
||||
.group(user.gid);
|
||||
|
||||
let file_path = Path::new(FILE_PATH);
|
||||
let dir_path = file_path.parent().unwrap();
|
||||
|
||||
create_path(
|
||||
dir_path,
|
||||
Some(dir_opts.clone()),
|
||||
Some(dir_opts))?;
|
||||
|
||||
let file_opts = CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o660))
|
||||
.owner(user.uid)
|
||||
.group(user.gid);
|
||||
|
||||
let shmem: SharedMemory<ConfigVersionCacheData> =
|
||||
SharedMemory::open(file_path, file_opts)?;
|
||||
|
||||
Ok(Arc::new(Self { shmem }))
|
||||
}
|
||||
|
||||
/// Returns the user cache generation number.
|
||||
pub fn user_cache_generation(&self) -> usize {
|
||||
self.shmem.data()
|
||||
.user_cache_generation.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
/// Increase the user cache generation number.
|
||||
pub fn increase_user_cache_generation(&self) {
|
||||
self.shmem.data()
|
||||
.user_cache_generation
|
||||
.fetch_add(1, Ordering::AcqRel);
|
||||
}
|
||||
|
||||
/// Returns the traffic control generation number.
|
||||
pub fn traffic_control_generation(&self) -> usize {
|
||||
self.shmem.data()
|
||||
.traffic_control_generation.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
/// Increase the traffic control generation number.
|
||||
pub fn increase_traffic_control_generation(&self) {
|
||||
self.shmem.data()
|
||||
.traffic_control_generation
|
||||
.fetch_add(1, Ordering::AcqRel);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user