Compare commits

..

1 Commits

Author SHA1 Message Date
4d86df04a0 bump version to 1.1.10-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-06-16 09:55:47 +02:00
535 changed files with 22241 additions and 25650 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "2.1.2"
version = "1.1.10"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -15,48 +15,29 @@ edition = "2018"
license = "AGPL-3"
description = "Proxmox Backup"
homepage = "https://www.proxmox.com"
build = "build.rs"
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
[workspace]
members = [
"pbs-buildcfg",
"pbs-client",
"pbs-config",
"pbs-datastore",
"pbs-fuse-loop",
"proxmox-rest-server",
"proxmox-rrd",
"pbs-tape",
"pbs-tools",
"proxmox-backup-banner",
"proxmox-backup-client",
"proxmox-file-restore",
"proxmox-restore-daemon",
"pxar-bin",
]
[lib]
name = "proxmox_backup"
path = "src/lib.rs"
[dependencies]
apt-pkg-native = "0.3.2"
base64 = "0.13"
base64 = "0.12"
bitflags = "1.2.1"
bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.7"
flate2 = "1.0"
anyhow = "1.0"
foreign-types = "0.3"
thiserror = "1.0"
futures = "0.3"
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
hex = "0.4.3"
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
@ -69,6 +50,17 @@ openssl = "0.10"
pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
pin-utils = "0.1.0"
pin-project = "1.0"
pathpatterns = "0.1.2"
proxmox = { version = "0.11.5", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
#proxmox-http = { version = "0.2.0", path = "../proxmox/proxmox-http", features = [ "client", "http-helpers", "websocket" ] }
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
regex = "1.2"
rustyline = "7"
serde = { version = "1.0", features = ["derive"] }
@ -80,56 +72,17 @@ tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0"
udev = "0.4"
udev = ">= 0.3, <0.5"
url = "2.1"
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
walkdir = "2"
webauthn-rs = "0.2.5"
xdg = "2.2"
zstd = { version = "0.4", features = [ "bindgen" ] }
nom = "5.1"
crossbeam-channel = "0.5"
# Used only by examples currently:
zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1"
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-section-config = "1"
proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
proxmox-time = "1"
proxmox-uuid = "1"
proxmox-shared-memory = "0.1.1"
proxmox-sys = "0.1.2"
proxmox-acme-rs = "0.3"
proxmox-apt = "0.8.0"
proxmox-async = "0.2"
proxmox-openid = "0.9.0"
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
pbs-datastore = { path = "pbs-datastore" }
proxmox-rest-server = { path = "proxmox-rest-server" }
proxmox-rrd = { path = "proxmox-rrd" }
pbs-tools = { path = "pbs-tools" }
pbs-tape = { path = "pbs-tape" }
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#proxmox = { path = "../proxmox/proxmox" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#pxar = { path = "../pxar" }
proxmox-acme-rs = "0.2.1"
[features]
default = []

118
Makefile
View File

@ -17,8 +17,7 @@ USR_BIN := \
# Binaries usable by admins
USR_SBIN := \
proxmox-backup-manager \
proxmox-backup-debug \
proxmox-backup-manager
# Binaries for services:
SERVICE_BIN := \
@ -31,23 +30,6 @@ SERVICE_BIN := \
RESTORE_BIN := \
proxmox-restore-daemon
SUBCRATES := \
pbs-api-types \
pbs-buildcfg \
pbs-client \
pbs-config \
pbs-datastore \
pbs-fuse-loop \
proxmox-rest-server \
proxmox-rrd \
pbs-tape \
pbs-tools \
proxmox-backup-banner \
proxmox-backup-client \
proxmox-file-restore \
proxmox-restore-daemon \
pxar-bin
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
@ -75,15 +57,13 @@ RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
${RESTORE_DEB} ${RESTORE_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB}
${RESTORE_DEB} ${RESTORE_DBG_DEB}
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
DESTDIR=
tests ?= --workspace
all: $(SUBDIRS)
all: cargo-build $(SUBDIRS)
.PHONY: $(SUBDIRS)
$(SUBDIRS):
@ -95,23 +75,25 @@ test:
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
doc:
$(CARGO) doc --workspace --no-deps $(CARGO_BUILD_ARGS)
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
# always re-create this dir
.PHONY: build
build:
@echo "Setting pkg-buildcfg version to: $(DEB_VERSION_UPSTREAM)"
sed -i -e 's/^version =.*$$/version = "$(DEB_VERSION_UPSTREAM)"/' \
pbs-buildcfg/Cargo.toml
rm -rf build
mkdir build
cp -a debian \
Cargo.toml src \
$(SUBCRATES) \
docs etc examples tests www zsh-completions \
defines.mk Makefile \
./build/
rm -f build/Cargo.lock
rm -f debian/control
debcargo package \
--config debian/debcargo.toml \
--changelog-ready \
--no-overlay-write-back \
--directory build \
proxmox-backup \
$(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
cat build/debian/control.src build/debian/control.in > build/debian/control
rm build/debian/control.in build/debian/control.src
cp build/debian/control debian/control
rm build/Cargo.lock
find build/debian -name "*.hint" -delete
$(foreach i,$(SUBDIRS), \
$(MAKE) -C build/$(i) clean ;)
@ -131,9 +113,7 @@ deb: build
lintian $(DEBS)
.PHONY: deb-all
deb-all: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
lintian $(DEBS) $(DOC_DEB)
deb-all: $(DOC_DEB) $(DEBS)
.PHONY: dsc
dsc: $(DSC)
@ -141,61 +121,27 @@ $(DSC): build
cd build; dpkg-buildpackage -S -us -uc -d -nc
lintian $(DSC)
.PHONY: clean distclean deb clean
distclean: clean
clean: clean-deb
clean:
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build
find . -name '*~' -exec rm {} ';'
# allows one to avoid running cargo clean when one just wants to tidy up after a packgae build
clean-deb:
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build/
.PHONY: dinstall
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
${DEBUG_DEB} ${DEBUG_DBG_DEB}
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
dpkg -i $^
# make sure we build binaries before docs
docs: $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen
docs: cargo-build
.PHONY: cargo-build
cargo-build:
rm -f .do-cargo-build
$(MAKE) $(COMPILED_BINS)
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
.do-cargo-build:
$(CARGO) build $(CARGO_BUILD_ARGS) \
--package proxmox-backup-banner \
--bin proxmox-backup-banner \
--package proxmox-backup-client \
--bin proxmox-backup-client \
--bin dump-catalog-shell-cli \
--bin proxmox-backup-debug \
--package proxmox-file-restore \
--bin proxmox-file-restore \
--package pxar-bin \
--bin pxar \
--package pbs-tape \
--bin pmt \
--bin pmtx \
--package proxmox-restore-daemon \
--bin proxmox-restore-daemon \
--package proxmox-backup \
--bin docgen \
--bin proxmox-backup-api \
--bin proxmox-backup-manager \
--bin proxmox-backup-proxy \
--bin proxmox-daily-update \
--bin proxmox-file-restore \
--bin proxmox-tape \
--bin sg-tape-cmd
touch "$@"
$(CARGO) build $(CARGO_BUILD_ARGS)
$(COMPILED_BINS): cargo-build
.PHONY: lint
lint:
@ -221,16 +167,12 @@ install: $(COMPILED_BINS)
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
$(MAKE) -C www install
$(MAKE) -C docs install
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
endif
.PHONY: upload
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} \
${CLIENT_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB} \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist bullseye
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist bullseye
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist bullseye
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist buster
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster

369
debian/changelog vendored
View File

@ -1,352 +1,4 @@
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
* docs: backup-client: fix wrong reference
* docs: remotes: note that protected flags will not be synced
* sync job: correctly apply rate limit
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Nov 2021 13:56:15 +0100
rust-proxmox-backup (2.1.1-2) bullseye; urgency=medium
* docs: update and add traffic control related screenshots
* docs: mention traffic control (bandwidth limits) for sync jobs
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 16:07:39 +0100
rust-proxmox-backup (2.1.1-1) bullseye; urgency=medium
* fix proxmox-backup-manager sync-job list
* ui, api: sync-job: allow one to configure a rate limit
* api: snapshot list: set default for 'protected' flag
* ui: datastore content: rework rendering protection state
* docs: update traffic control docs (use HumanBytes)
* ui: traffic-control: include ipv6 in 'all' networks
* ui: traffic-control edit: add spaces between networks for more
readabillity
* tape: fix passing-through key-fingerprint
* avoid a bogus error regarding logrotate-path due to a reversed check
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 12:24:31 +0100
rust-proxmox-backup (2.1.0-1) bullseye; urgency=medium
* rest server: make successful-ticket auth log a debug one to avoid
syslog spam
* traffic-controls: add API/CLI to show current traffic
* docs: add traffic control section
* ui: use TFA widgets from widget toolkit
* sync: allow pulling groups selectively
* fix #3533: tape backup: filter groups according to config
* proxmox-tape: add missing notify-user option to backup command
* openid: allow arbitrary username-claims
* openid: support configuring the prompt, scopes and ACR values
* use human-byte for traffic-control rate-in/out and burst-in/out config
* ui: add traffic control view and editor
-- Proxmox Support Team <support@proxmox.com> Sat, 20 Nov 2021 22:44:07 +0100
rust-proxmox-backup (2.0.14-1) bullseye; urgency=medium
* fix directory permission problems
* add traffic control configuration config with API
* proxmox-backup-proxy: implement traffic control
* proxmox-backup-client: add rate/burst parameter to backup/restore CLI
* openid_login: vertify that firstname, lastname and email fits our
schema definitions
* docs: add info about protection flag to client docs
* fix #3602: ui: datastore/Content: add action to set protection status
* ui: add protected icon to snapshot (if they are protected)
* ui: PruneInputPanel: add keepReason 'protected' for protected backups
* proxmox-backup-client: add 'protected' commands
* acme: interpret no TOS as accepted
* acme: new_account: prevent replacing existing accounts
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Nov 2021 08:04:55 +0100
rust-proxmox-backup (2.0.13-1) bullseye; urgency=medium
* tape: simplify export_media_set for pool writer
* tape: improve export_media error message for not found tape
* rest-server: use hashmap for parameter errors
* proxmox-rrd: use new file firmat with higher resolution
* proxmox-rrd: use a journal to reduce amount of bytes written
* use new fsync parameter to replace_file and atomic_open_or_create
* docs: langauge and formatting fixup
* docs: Update for new features/functionality
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 08:17:00 +0200
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
* proxmox-backup-proxy: clean up old tasks when their reference was rotated
out of the task-log index
* api daemons: fix sending log-reopen command
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Oct 2021 10:48:28 +0200
rust-proxmox-backup (2.0.11-1) bullseye; urgency=medium
* drop aritifical limits for task-UPID length
* tools: smart: only throw error for the fatal usage errors of smartctl
* api: improve returning errors for extjs formatter
* proxmox-rest-server: improve logging
* subscription: switch verification domain over to shop.proxmox.com
* rest-server/daemon: use new sd_notify_barrier helper for handling
synchronization with systemd on service reloading
* ui: datastore/Content: add empty text for no snapshots
* ui: datastore/Content: move first store-load into activate listener to
ensure we've a proper loading mask for better UX
-- Proxmox Support Team <support@proxmox.com> Tue, 05 Oct 2021 16:34:14 +0200
rust-proxmox-backup (2.0.10-1) bullseye; urgency=medium
* ui: fix order of prune keep reasons
* server: add proxmox-backup-debug binary with chunk/file inspection, an API
shell with completion support
* restructured code base to reduce linkage and libraray ABI version
constraints for all non-server binaries (client, pxar, file-restore)
* zsh: fix passign parameters in auto-completion scripts
* tape: also add 'force-media-set' to availablea CLI options
* api: nodes: add missing node list (index) api endpoint
* docs: proxmox-backup-debug: add info about the new 'api' subcommand
* docs/technical-overview: add troubleshooting section
-- Proxmox Support Team <support@proxmox.com> Tue, 21 Sep 2021 14:00:48 +0200
rust-proxmox-backup (2.0.9-2) bullseye; urgency=medium
* tape backup: mention groups that were empty
* tape: compute next-media-label for each tape backup job
* tape: lto: increase default timeout to 10 minutes
* ui: display next-media-label for tape backup jobs
* cli: proxmox-tape backup-job list: use status api and display next-run
and next-media-label
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Aug 2021 14:44:12 +0200
rust-proxmox-backup (2.0.8-1) bullseye; urgency=medium
* use proxmox-apt to 0.6
* api: apt: adapt to proxmox-apt back-end changes
* api/ui: allow zstd compression for new zpools
* tape: media_catalog: add snapshot list cache for catalog
* api2: tape: media: use MediaCatalog::snapshot_list for content listing
* tape: lock media_catalog file to to get a consistent view with load_catalog
* tape: changer: handle libraries that sends wrong amount of data
* tape: changer: remove unnecesary inquiry parameter
* api2: tape/restore: commit temporary catalog at the end
* docs: tape: add instructions on how to restore the catalog
* ui: tape/ChangerStatus: improve layout for large libraries
* tape: changer: handle invalid descriptor data from library in status page
* datastore config: cleanup code (use flatten attribute)
-- Proxmox Support Team <support@proxmox.com> Mon, 02 Aug 2021 10:34:55 +0200
rust-proxmox-backup (2.0.7-1) bullseye; urgency=medium
* tape changer: better cope with models that are not following spec
proposals when returning the status page
* tape changer: make DVCID information optional, not all devices return it
* restore daemon: setup the 'backup' system user and group in the minimal
restore environment, as we like to ensure that all state files are ownend
by them.
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Jul 2021 08:43:51 +0200
rust-proxmox-backup (2.0.6-1) bullseye; urgency=medium
* increase maximum drives per changer to 255
* allow one to pass a secret not only directly through the environment value,
but also indirectly through a file path, an open file descriptor or a
command that can write the secret to standard out.
* pull in new proxmox library version to improve the file system
comaptibility on creation of atomic files, e.g., lock files.
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Jul 2021 10:22:19 +0200
rust-proxmox-backup (2.0.5-2) bullseye; urgency=medium
* ui: tape: backup overview: increase timeout for media-set content
* tape: changer: always retry until timeout
* file-restore: increase lock timeout on QEMU map
* fix #3515: file-restore-daemon: allow LVs/PVs with dash in name
* fix #3526: correctly filter tasks with 'since' and 'until'
* tape: changer: make scsi request for DVCID a separate one, as some
libraries cannot handle requesting that combined with volume tags in one
go
* api, ui: datastore: add new 'prune-datastore' api call and expose it with
a 'Prune All' button
* make creating log files more robust so that theys are always owned by the
less privileged `backup` user
-- Proxmox Support Team <support@proxmox.com> Wed, 21 Jul 2021 09:12:39 +0200
rust-proxmox-backup (2.0.4-1) bullseye; urgency=medium
* change tape drive lock path to avoid issues with sticky bit on tmpfs
mountpoint
* tape: changer: query transport-element types separately
* auth: improve thread safety of 'crypt' C-library
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 18:51:21 +0200
rust-proxmox-backup (2.0.3-1) bullseye; urgency=medium
* api: apt: add repositories info and update calls
* ui: administration: add APT repositories status and update panel
* api: access domains: add get/create/update/delete endpoints for realms
* ui: access control: add 'Realm' tab for adding and editing OpenID Connect
identity provider
* fix #3447: ui: Dashboard: disallow selection of datastore statistics row
* ui: tapeRestore: make window non-resizable
* ui: dashboard: rework resource-load panel to a more detailed status panel,
showing, among other things, uptime, Kernel version, CPU info and
repository status.
* ui: adminsitration/dashboard: auto-scale columns count and add
browser-local setting to override that to a fixed value of columns.
* fix #3212: api, ui: add support for notes on backup groups
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 08:07:41 +0200
rust-proxmox-backup (2.0.2-1) bullseye; urgency=medium
* ui: use task list component from widget toolkit
* api: add keep-job-configs flag to datastore remove endpoint
* api: config: delete datastore: also remove tape backup jobs
* ui: tape restore: mark datastore selector as 'not a form field' to fix
compatibility with ExtJS 7.0
* ui: datastore removal: only navigate away when the user actually confirmed
the removal of that datastore
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Jul 2021 14:44:12 +0200
rust-proxmox-backup (2.0.1-2) bullseye; urgency=medium
* file restore daemon: log basic startup steps
* REST-API: set error message extension for bad-request response log to
ensure the actual error is logged in any (access) log, making debugging
such issues easier
* restore daemon: create /run/proxmox-backup on startup as there's now some
runtime state saved there, which failed all API requests to the restore
daemon otherwise
* restore daemon: use millisecond log resolution
* fix #3496: acme: plugin: actually sleep after setting the TXT record,
ensuring DNS propagation of that record. This makes it catch up with the
docs/web-interface, where the option was already available.
* docs: initial update to repositories for bullseye
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 23:14:49 +0200
rust-proxmox-backup (2.0.0-2) bullseye; urgency=medium
* file-restore-daemon/disk: add LVM (thin) support
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 02:15:16 +0200
rust-proxmox-backup (2.0.0-1) bullseye; urgency=medium
* initial bump for Debian 11 Bullseye / Proxmox Backup Server 2.0
rust-proxmox-backup (1.1.10-1) buster; urgency=medium
* ui: datastore list summary: catch and show errors per datastore
@ -363,7 +15,7 @@ rust-proxmox-backup (2.0.0-1) bullseye; urgency=medium
* ui: datastore options: add remove button to drop a datastore from the
configuration, without removing any actual data
* ui: tape: drive selector: do not auto select the drive
* ui: tape: drive selector: do not autoselect the drive
* ui: tape: backup job: use correct default value for pbsUserSelector
@ -372,22 +24,7 @@ rust-proxmox-backup (2.0.0-1) bullseye; urgency=medium
* backup: add helpers for async last recently used (LRU) caches for chunk
and index reading of backup snapshot
* fix #3459: manager: add --ignore-verified and --outdated-after parameters
* proxmox-backup-manager: show task log on datastore create
* tape: snapshot reader: read chunks sorted by inode (per index) to improve
sequential reads when backing up data from slow spinning disks to tape.
* file-restore: support ZFS pools
* improve fix for #3393: pxar create: try to read xattrs/fcaps/acls by default
* fix compatibility with ExtJS 7.0
* docs: build api-viewer from widget-toolkit-dev
-- Proxmox Support Team <support@proxmox.com> Mon, 28 Jun 2021 19:35:40 +0200
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Jun 2021 09:46:15 +0200
rust-proxmox-backup (1.1.9-1) stable; urgency=medium

75
debian/control vendored
View File

@ -1,17 +1,16 @@
Source: rust-proxmox-backup
Section: admin
Priority: optional
Build-Depends: debhelper (>= 12),
dh-cargo (>= 24),
Build-Depends: debhelper (>= 11),
dh-cargo (>= 18),
cargo:native,
rustc:native,
libstd-rust-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-base64-0.13+default-dev,
librust-base64-0.12+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
@ -23,10 +22,9 @@ Build-Depends: debhelper (>= 12),
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
librust-hyper-0.14+full-dev (>= 0.14.5-~~),
librust-hyper-0.14+default-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev,
@ -39,44 +37,23 @@ Build-Depends: debhelper (>= 12),
librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-0.15+default-dev (>= 0.15.3-~~),
librust-proxmox-0.15+sortable-macro-dev (>= 0.15.3-~~),
librust-proxmox-0.15+tokio-dev (>= 0.15.3-~~),
librust-proxmox-acme-rs-0.3+default-dev,
librust-proxmox-apt-0.8+default-dev,
librust-proxmox-async-0.2+default-dev,
librust-proxmox-borrow-1+default-dev,
librust-pin-project-1+default-dev,
librust-pin-utils-0.1+default-dev,
librust-proxmox-0.11+api-macro-dev (>= 0.11.5-~~),
librust-proxmox-0.11+default-dev (>= 0.11.5-~~),
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.5-~~),
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-http-0.5+client-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+default-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+http-helpers-dev (>= 0.5.4-~~),
librust-proxmox-http-0.5+websocket-dev (>= 0.5.4-~~),
librust-proxmox-io-1+default-dev,
librust-proxmox-io-1+tokio-dev,
librust-proxmox-lang-1+default-dev,
librust-proxmox-openid-0.9+default-dev,
librust-proxmox-router-1+cli-dev (>= 1.1-~~),
librust-proxmox-router-1+default-dev (>= 1.1-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+default-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.0.1-~~),
librust-proxmox-section-config-1+default-dev,
librust-proxmox-shared-memory-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-sys-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-tfa-1+api-dev (>= 1.3-~~),
librust-proxmox-tfa-1+api-types-dev (>= 1.3-~~),
librust-proxmox-tfa-1+default-dev (>= 1.3-~~),
librust-proxmox-time-1+default-dev (>= 1.1-~~),
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-proxmox-http-0.2+client-dev (>= 0.2.1-~~),
librust-proxmox-http-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-http-0.2+http-helpers-dev (>= 0.2.1-~~),
librust-proxmox-http-0.2+websocket-dev (>= 0.2.1-~~),
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-7+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev,
@ -92,7 +69,6 @@ Build-Depends: debhelper (>= 12),
librust-tokio-1+rt-dev (>= 1.6-~~),
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
librust-tokio-1+signal-dev (>= 1.6-~~),
librust-tokio-1+sync-dev (>= 1.6-~~),
librust-tokio-1+time-dev (>= 1.6-~~),
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev,
@ -100,15 +76,16 @@ Build-Depends: debhelper (>= 12),
librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev,
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.6+bindgen-dev,
librust-zstd-0.6+default-dev,
librust-zstd-0.4+bindgen-dev,
librust-zstd-0.4+default-dev,
libacl1-dev,
libfuse3-dev,
libsystemd-dev (>= 246-~~),
libsystemd-dev,
uuid-dev,
libsgutils2-dev,
bash-completion,
@ -119,7 +96,6 @@ Build-Depends: debhelper (>= 12),
graphviz <!nodoc>,
latexmk <!nodoc>,
patchelf,
proxmox-widget-toolkit-dev <!nodoc>,
pve-eslint (>= 7.18.0-1),
python3-docutils,
python3-pygments,
@ -130,16 +106,15 @@ Build-Depends: debhelper (>= 12),
texlive-xetex <!nodoc>,
xindy <!nodoc>
Maintainer: Proxmox Support Team <support@proxmox.com>
Standards-Version: 4.5.1
Standards-Version: 4.4.1
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
Homepage: https://www.proxmox.com
Rules-Requires-Root: binary-targets
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 7~),
libjs-extjs (>= 6.0.1),
libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins,
libsgutils2-2,
@ -150,7 +125,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 3.4-3),
proxmox-widget-toolkit (>= 2.5-6),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -174,8 +149,7 @@ Description: Proxmox Backup Client tools
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc
Depends: fonts-font-awesome,
libjs-extjs,
Depends: libjs-extjs,
libjs-mathjax,
${misc:Depends},
Architecture: all
@ -188,7 +162,6 @@ Depends: ${misc:Depends},
${shlibs:Depends},
Recommends: pve-qemu-kvm (>= 5.0.0-9),
proxmox-backup-restore-image,
Breaks: proxmox-backup-restore-image (<< 0.3.1)
Description: Proxmox Backup single file restore tools for pxar and block device backups
This package contains the Proxmox Backup single file restore client for
restoring individual files and folders from both host/container and VM/block

55
debian/control.in vendored Normal file
View File

@ -0,0 +1,55 @@
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins,
libsgutils2-2,
libzstd1 (>= 1.3.8),
lvm2,
openssh-server,
pbs-i18n,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.5-6),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.
Package: proxmox-backup-client
Architecture: any
Depends: qrencode,
${misc:Depends},
${shlibs:Depends},
Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc
Depends: libjs-extjs,
libjs-mathjax,
${misc:Depends},
Architecture: all
Description: Proxmox Backup Documentation
This package contains the Proxmox Backup Documentation files.
Package: proxmox-backup-file-restore
Architecture: any
Depends: ${misc:Depends},
${shlibs:Depends},
Recommends: pve-qemu-kvm (>= 5.0.0-9),
proxmox-backup-restore-image,
Description: Proxmox Backup single file restore tools for pxar and block device backups
This package contains the Proxmox Backup single file restore client for
restoring individual files and folders from both host/container and VM/block
device backups. It includes a block device restore driver using QEMU.

42
debian/debcargo.toml vendored Normal file
View File

@ -0,0 +1,42 @@
overlay = "."
crate_src_path = ".."
whitelist = ["tests/*.c"]
maintainer = "Proxmox Support Team <support@proxmox.com>"
[source]
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
section = "admin"
build_depends = [
"bash-completion",
"debhelper (>= 12~)",
"fonts-dejavu-core <!nodoc>",
"fonts-lato <!nodoc>",
"fonts-open-sans <!nodoc>",
"graphviz <!nodoc>",
"latexmk <!nodoc>",
"patchelf",
"pve-eslint (>= 7.18.0-1)",
"python3-docutils",
"python3-pygments",
"python3-sphinx <!nodoc>",
"rsync",
"texlive-fonts-extra <!nodoc>",
"texlive-fonts-recommended <!nodoc>",
"texlive-xetex <!nodoc>",
"xindy <!nodoc>",
]
build_depends_excludes = [
"debhelper (>=11)",
]
[packages.lib]
depends = [
"libacl1-dev",
"libfuse3-dev",
"libsystemd-dev",
"uuid-dev",
"libsgutils2-dev",
]

67
debian/postinst vendored
View File

@ -4,14 +4,6 @@ set -e
#DEBHELPER#
update_sync_job() {
job="$1"
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|| echo "Failed, please check sync.cfg manually!"
}
case "$1" in
configure)
# need to have user backup in the tape group
@ -34,35 +26,48 @@ case "$1" in
fi
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
# FIXME: Remove with 1.1
if test -n "$2"; then
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
echo "NOTE: drop all verify schedules from datastore config."
echo "You can now add more flexible verify jobs"
flock -w 30 /etc/proxmox-backup/.datastore.lck \
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
fi
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
if [ -e /etc/proxmox-backup/remote.cfg ]; then
echo "NOTE: Switching over remote.cfg to new field names.."
flock -w 30 /etc/proxmox-backup/.remote.lck \
sed -i \
-e 's/^\s\+userid /\tauth-id /g' \
/etc/proxmox-backup/remote.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '1.0.14-1'; then
# FIXME: Remove with 2.0
if grep -s -q -P -e '^linux:' /etc/proxmox-backup/tape.cfg; then
echo "========="
echo "= NOTE: You have now unsupported 'linux' tape drives configured."
echo "= * Execute 'udevadm control --reload-rules && udevadm trigger' to update /dev"
echo "= * Edit '/etc/proxmox-backup/tape.cfg', remove 'linux' entries and re-add over CLI/GUI"
echo "========="
fi
fi
# FIXME: remove with 2.0
if [ -d "/var/lib/proxmox-backup/tape" ] &&
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
chmod 0750 /var/lib/proxmox-backup/tape || true
fi
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi
if dpkg --compare-versions "$2" 'lt' '7.1-1' && test -e /etc/proxmox-backup/sync.cfg; then
prev_job=""
# read from HERE doc because POSIX sh limitations
while read -r key value; do
if test "$key" = "sync:"; then
if test -n "$prev_job"; then
# previous job doesn't have an explicit value
update_sync_job "$prev_job"
fi
prev_job=$value
else
prev_job=""
fi
done <<EOF
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
EOF
if test -n "$prev_job"; then
# last job doesn't have an explicit value
update_sync_job "$prev_job"
fi
fi
fi
;;

View File

@ -1,8 +0,0 @@
# proxmox-backup-debug bash completion
# see http://tiswww.case.edu/php/chet/bash/FAQ
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
# this modifies global var, but I found no better way
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
complete -C 'proxmox-backup-debug bashcomplete' proxmox-backup-debug

View File

@ -1,6 +1,5 @@
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
/usr/share/fonts-font-awesome/ /usr/share/doc/proxmox-backup/html/lto-barcode/font-awesome
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax

View File

@ -1,5 +1,4 @@
debian/proxmox-backup-manager.bc proxmox-backup-manager
debian/proxmox-backup-debug.bc proxmox-backup-debug
debian/proxmox-tape.bc proxmox-tape
debian/pmtx.bc pmtx
debian/pmt.bc pmt

View File

@ -9,7 +9,6 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
usr/sbin/proxmox-backup-debug
usr/sbin/proxmox-backup-manager
usr/bin/pmtx
usr/bin/pmt
@ -18,7 +17,6 @@ usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
usr/share/javascript/proxmox-backup/images
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
usr/share/man/man1/proxmox-backup-debug.1
usr/share/man/man1/proxmox-backup-manager.1
usr/share/man/man1/proxmox-backup-proxy.1
usr/share/man/man1/proxmox-tape.1
@ -33,7 +31,6 @@ usr/share/man/man5/verification.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/zsh/vendor-completions/_proxmox-backup-debug
usr/share/zsh/vendor-completions/_proxmox-backup-manager
usr/share/zsh/vendor-completions/_proxmox-tape
usr/share/zsh/vendor-completions/_pmtx

8
debian/rules vendored
View File

@ -32,9 +32,6 @@ override_dh_auto_build:
override_dh_missing:
dh_missing --fail-missing
override_dh_auto_test:
# ignore here to avoid rebuilding the binaries with the wrong target
override_dh_auto_install:
dh_auto_install -- \
PROXY_USER=backup \
@ -48,6 +45,11 @@ override_dh_installsystemd:
override_dh_fixperms:
dh_fixperms --exclude sg-tape-cmd
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
# TODO: remove once available (Debian 11 ?)
override_dh_dwz:
dh_dwz --no-dwz-multifile
override_dh_strip:
dh_strip
for exe in $$(find \

View File

@ -5,7 +5,6 @@ GENERATED_SYNOPSIS := \
proxmox-backup-client/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-file-restore/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
@ -28,8 +27,7 @@ MAN1_PAGES := \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1 \
proxmox-file-restore.1 \
proxmox-backup-debug.1
proxmox-file-restore.1
MAN5_PAGES := \
media-pool.cfg.5 \
@ -48,35 +46,23 @@ PRUNE_SIMULATOR_FILES := \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
PRUNE_SIMULATOR_JS_SOURCE := \
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
prune-simulator/prune-simulator_source.js
LTO_BARCODE_JS_SOURCE := \
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
lto-barcode/code39.js \
lto-barcode/prefix-field.js \
lto-barcode/label-style.js \
lto-barcode/tape-type.js \
lto-barcode/paper-size.js \
lto-barcode/page-layout.js \
lto-barcode/page-calibration.js \
lto-barcode/label-list.js \
lto-barcode/label-setup.js \
lto-barcode/lto-barcode.js
LTO_BARCODE_FILES := \
lto-barcode/index.html \
lto-barcode/lto-barcode-generator.js
lto-barcode/code39.js \
lto-barcode/prefix-field.js \
lto-barcode/label-style.js \
lto-barcode/tape-type.js \
lto-barcode/paper-size.js \
lto-barcode/page-layout.js \
lto-barcode/page-calibration.js \
lto-barcode/label-list.js \
lto-barcode/label-setup.js \
lto-barcode/lto-barcode.js
API_VIEWER_SOURCES= \
api-viewer/index.html \
api-viewer/apidoc.js
API_VIEWER_FILES := \
api-viewer/apidata.js \
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
# Sphinx documentation setup
SPHINXOPTS =
SPHINXBUILD = sphinx-build
@ -201,12 +187,6 @@ proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
rst2man $< >$@
proxmox-backup-debug/synopsis.rst: ${COMPILEDIR}/proxmox-backup-debug
${COMPILEDIR}/proxmox-backup-debug printdoc > proxmox-backup-debug/synopsis.rst
proxmox-backup-debug.1: proxmox-backup-debug/man1.rst proxmox-backup-debug/description.rst proxmox-backup-debug/synopsis.rst
rst2man $< >$@
.PHONY: onlinehelpinfo
onlinehelpinfo:
@echo "Generating OnlineHelpInfo.js..."
@ -216,17 +196,8 @@ onlinehelpinfo:
api-viewer/apidata.js: ${COMPILEDIR}/docgen
${COMPILEDIR}/docgen apidata.js >$@
api-viewer/apidoc.js: ${API_VIEWER_FILES}
cat ${API_VIEWER_FILES} >$@.tmp
mv $@.tmp $@
prune-simulator/prune-simulator.js: ${PRUNE_SIMULATOR_JS_SOURCE}
cat ${PRUNE_SIMULATOR_JS_SOURCE} >$@.tmp
mv $@.tmp $@
lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
cat ${LTO_BARCODE_JS_SOURCE} >$@.tmp
mv $@.tmp $@
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
.PHONY: html
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
@ -257,7 +228,6 @@ epub3: ${GENERATED_SYNOPSIS}
clean:
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
rm -f api-viewer/apidoc.js lto-barcode/lto-barcode-generator.js prune-simulator/prune-simulator.js
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}

526
docs/api-viewer/PBSAPI.js Normal file
View File

@ -0,0 +1,526 @@
// avoid errors when running without development tools
if (!Ext.isDefined(Ext.global.console)) {
var console = {
dir: function() {},
log: function() {}
};
}
Ext.onReady(function() {
Ext.define('pve-param-schema', {
extend: 'Ext.data.Model',
fields: [
'name', 'type', 'typetext', 'description', 'verbose_description',
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
'pattern', 'title', 'requires', 'format', 'default',
'disallow', 'extends', 'links',
{
name: 'optional',
type: 'boolean'
}
]
});
var store = Ext.define('pve-updated-treestore', {
extend: 'Ext.data.TreeStore',
model: Ext.define('pve-api-doc', {
extend: 'Ext.data.Model',
fields: [
'path', 'info', 'text',
]
}),
proxy: {
type: 'memory',
data: pbsapi
},
sorters: [{
property: 'leaf',
direction: 'ASC'
}, {
property: 'text',
direction: 'ASC'
}],
filterer: 'bottomup',
doFilter: function(node) {
this.filterNodes(node, this.getFilters().getFilterFn(), true);
},
filterNodes: function(node, filterFn, parentVisible) {
var me = this,
bottomUpFiltering = me.filterer === 'bottomup',
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
childNodes = node.childNodes,
len = childNodes && childNodes.length, i, matchingChildren;
if (len) {
for (i = 0; i < len; ++i) {
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
}
if (bottomUpFiltering) {
match = matchingChildren || match;
}
}
node.set("visible", match, me._silentOptions);
return match;
},
}).create();
var render_description = function(value, metaData, record) {
var pdef = record.data;
value = pdef.verbose_description || value;
// TODO: try to render asciidoc correctly
metaData.style = 'white-space:pre-wrap;'
return Ext.htmlEncode(value);
};
var render_type = function(value, metaData, record) {
var pdef = record.data;
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
};
let render_simple_format = function(pdef, type_fallback) {
if (pdef.typetext)
return pdef.typetext;
if (pdef['enum'])
return pdef['enum'].join(' | ');
if (pdef.format)
return pdef.format;
if (pdef.pattern)
return pdef.pattern;
if (pdef.type === 'boolean')
return `<true|false>`;
if (type_fallback && pdef.type)
return `<${pdef.type}>`;
return;
};
let render_format = function(value, metaData, record) {
let pdef = record.data;
metaData.style = 'white-space:normal;'
if (pdef.type === 'array' && pdef.items) {
let format = render_simple_format(pdef.items, true);
return `[${Ext.htmlEncode(format)}, ...]`;
}
return Ext.htmlEncode(render_simple_format(pdef) || '');
};
var real_path = function(path) {
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
};
var permission_text = function(permission) {
let permhtml = "";
if (permission.user) {
if (!permission.description) {
if (permission.user === 'world') {
permhtml += "Accessible without any authentication.";
} else if (permission.user === 'all') {
permhtml += "Accessible by all authenticated users.";
} else {
permhtml += 'Onyl accessible by user "' +
permission.user + '"';
}
}
} else if (permission.check) {
permhtml += "<pre>Check: " +
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
} else if (permission.userParam) {
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
} else if (permission.or) {
permhtml += "<div>Or<div style='padding-left: 10px;'>";
Ext.Array.each(permission.or, function(sub_permission) {
permhtml += permission_text(sub_permission);
})
permhtml += "</div></div>";
} else if (permission.and) {
permhtml += "<div>And<div style='padding-left: 10px;'>";
Ext.Array.each(permission.and, function(sub_permission) {
permhtml += permission_text(sub_permission);
})
permhtml += "</div></div>";
} else {
//console.log(permission);
permhtml += "Unknown syntax!";
}
return permhtml;
};
var render_docu = function(data) {
var md = data.info;
// console.dir(data);
var items = [];
var clicmdhash = {
GET: 'get',
POST: 'create',
PUT: 'set',
DELETE: 'delete'
};
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
var info = md[method];
if (info) {
var usage = "";
usage += "<table><tr><td>HTTP:&nbsp;&nbsp;&nbsp;</td><td>"
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
var sections = [
{
title: 'Description',
html: Ext.htmlEncode(info.description),
bodyPadding: 10
},
{
title: 'Usage',
html: usage,
bodyPadding: 10
}
];
if (info.parameters && info.parameters.properties) {
var pstore = Ext.create('Ext.data.Store', {
model: 'pve-param-schema',
proxy: {
type: 'memory'
},
groupField: 'optional',
sorters: [
{
property: 'name',
direction: 'ASC'
}
]
});
Ext.Object.each(info.parameters.properties, function(name, pdef) {
pdef.name = name;
pstore.add(pdef);
});
pstore.sort();
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
enableGroupingMenu: false,
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
});
sections.push({
xtype: 'gridpanel',
title: 'Parameters',
features: [groupingFeature],
store: pstore,
viewConfig: {
trackOver: false,
stripeRows: true
},
columns: [
{
header: 'Name',
dataIndex: 'name',
flex: 1
},
{
header: 'Type',
dataIndex: 'type',
renderer: render_type,
flex: 1
},
{
header: 'Default',
dataIndex: 'default',
flex: 1
},
{
header: 'Format',
dataIndex: 'type',
renderer: render_format,
flex: 2
},
{
header: 'Description',
dataIndex: 'description',
renderer: render_description,
flex: 6
}
]
});
}
if (info.returns) {
var retinf = info.returns;
var rtype = retinf.type;
if (!rtype && retinf.items)
rtype = 'array';
if (!rtype)
rtype = 'object';
var rpstore = Ext.create('Ext.data.Store', {
model: 'pve-param-schema',
proxy: {
type: 'memory'
},
groupField: 'optional',
sorters: [
{
property: 'name',
direction: 'ASC'
}
]
});
var properties;
if (rtype === 'array' && retinf.items.properties) {
properties = retinf.items.properties;
}
if (rtype === 'object' && retinf.properties) {
properties = retinf.properties;
}
Ext.Object.each(properties, function(name, pdef) {
pdef.name = name;
rpstore.add(pdef);
});
rpstore.sort();
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
enableGroupingMenu: false,
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
});
var returnhtml;
if (retinf.items) {
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
}
if (retinf.properties) {
returnhtml = returnhtml || '';
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
}
var rawSection = Ext.create('Ext.panel.Panel', {
bodyPadding: '0px 10px 10px 10px',
html: returnhtml,
hidden: true
});
sections.push({
xtype: 'gridpanel',
title: 'Returns: ' + rtype,
features: [groupingFeature],
store: rpstore,
viewConfig: {
trackOver: false,
stripeRows: true
},
columns: [
{
header: 'Name',
dataIndex: 'name',
flex: 1
},
{
header: 'Type',
dataIndex: 'type',
renderer: render_type,
flex: 1
},
{
header: 'Default',
dataIndex: 'default',
flex: 1
},
{
header: 'Format',
dataIndex: 'type',
renderer: render_format,
flex: 2
},
{
header: 'Description',
dataIndex: 'description',
renderer: render_description,
flex: 6
}
],
bbar: [
{
xtype: 'button',
text: 'Show RAW',
handler: function(btn) {
rawSection.setVisible(!rawSection.isVisible());
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
}}
]
});
sections.push(rawSection);
}
if (!data.path.match(/\/_upgrade_/)) {
var permhtml = '';
if (!info.permissions) {
permhtml = "Root only.";
} else {
if (info.permissions.description) {
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
Ext.htmlEncode(info.permissions.description) + "</div>";
}
permhtml += permission_text(info.permissions);
}
// we do not have this information for PBS api
//if (!info.allowtoken) {
// permhtml += "<br />This API endpoint is not available for API tokens."
//}
sections.push({
title: 'Required permissions',
bodyPadding: 10,
html: permhtml
});
}
items.push({
title: method,
autoScroll: true,
defaults: {
border: false
},
items: sections
});
}
});
var ct = Ext.getCmp('docview');
ct.setTitle("Path: " + real_path(data.path));
ct.removeAll(true);
ct.add(items);
ct.setActiveTab(0);
};
Ext.define('Ext.form.SearchField', {
extend: 'Ext.form.field.Text',
alias: 'widget.searchfield',
emptyText: 'Search...',
flex: 1,
inputType: 'search',
listeners: {
'change': function(){
var value = this.getValue();
if (!Ext.isEmpty(value)) {
store.filter({
property: 'path',
value: value,
anyMatch: true
});
} else {
store.clearFilter();
}
}
}
});
var tree = Ext.create('Ext.tree.Panel', {
title: 'Resource Tree',
tbar: [
{
xtype: 'searchfield',
}
],
tools: [
{
type: 'expand',
tooltip: 'Expand all',
tooltipType: 'title',
callback: (tree) => tree.expandAll(),
},
{
type: 'collapse',
tooltip: 'Collapse all',
tooltipType: 'title',
callback: (tree) => tree.collapseAll(),
},
],
store: store,
width: 200,
region: 'west',
split: true,
margins: '5 0 5 5',
rootVisible: false,
listeners: {
selectionchange: function(v, selections) {
if (!selections[0])
return;
var rec = selections[0];
render_docu(rec.data);
location.hash = '#' + rec.data.path;
}
}
});
Ext.create('Ext.container.Viewport', {
layout: 'border',
renderTo: Ext.getBody(),
items: [
tree,
{
xtype: 'tabpanel',
title: 'Documentation',
id: 'docview',
region: 'center',
margins: '5 5 5 0',
layout: 'fit',
items: []
}
]
});
var deepLink = function() {
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
var endpoint = store.findNode('path', path);
if (endpoint) {
tree.getSelectionModel().select(endpoint);
tree.expandPath(endpoint.getPath());
render_docu(endpoint.data);
}
}
window.onhashchange = deepLink;
deepLink();
});

View File

@ -1,33 +1,31 @@
Backup Client Usage
===================
The command line client for Proxmox Backup Server is called
:command:`proxmox-backup-client`.
The command line client is called :command:`proxmox-backup-client`.
.. _client_repository:
Backup Repository Locations
---------------------------
The client uses the following format to specify a datastore repository
on the backup server (where username is specified in the form of user@realm):
The client uses the following notation to specify a datastore repository
on the backup server.
[[username@]server[:port]:]datastore
The default value for ``username`` is ``root@pam``. If no server is specified,
the default is the local host (``localhost``).
You can specify a port if your backup server is only reachable on a non-default
port (for example, with NAT and port forwarding configurations).
You can specify a port if your backup server is only reachable on a different
port (e.g. with NAT and port forwarding).
Note that if the server uses an IPv6 address, you have to write it with square
Note that if the server is an IPv6 address, you have to write it with square
brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
Below are some examples of valid repositories and their corresponding real
values:
Here some examples of valid repositories and the real values
================================ ================== ================== ===========
Example User Host:Port Datastore
@ -48,31 +46,16 @@ Environment Variables
The default backup repository.
``PBS_PASSWORD``
When set, this value is used as the password for the backup server.
You can also set this to an API token secret.
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
name or from the `stdout` of a command, respectively. The first defined
environment variable from the order above is preferred.
When set, this value is used for the password required for the backup server.
You can also set this to a API token secret.
``PBS_ENCRYPTION_PASSWORD``
When set, this value is used to access the secret encryption key (if
protected by password).
``PBS_ENCRYPTION_PASSWORD_FD``, ``PBS_ENCRYPTION_PASSWORD_FILE``, ``PBS_ENCRYPTION_PASSWORD_CMD``
Like ``PBS_ENCRYPTION_PASSWORD``, but read data from an open file descriptor,
a file name or from the `stdout` of a command, respectively. The first
defined environment variable from the order above is preferred.
``PBS_FINGERPRINT``
When set, this value is used to verify the server certificate (only used if
the system CA certificates cannot validate the certificate).
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
convienience, Proxmox Backup Server only uses the first line as password, so
you can add arbitrary comments after the first newline.
``PBS_FINGERPRINT`` When set, this value is used to verify the server
certificate (only used if the system CA certificates cannot validate the
certificate).
Output Format
@ -87,15 +70,14 @@ Creating Backups
----------------
This section explains how to create a backup from within the machine. This can
be a physical host, a virtual machine, or a container. Such backups may contain
file and image archives. There are no restrictions in this case.
be a physical host, a virtual machine, or a container. Such backups may contain file
and image archives. There are no restrictions in this case.
.. Note:: If you want to backup virtual machines or containers on Proxmox VE,
see :ref:`pve-integration`.
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
For the following example, you need to have a backup server set up, have working
credentials, and know the repository name.
In the following examples, we use ``backup-server:store1``.
For the following example you need to have a backup server set up, working
credentials and need to know the repository name.
In the following examples we use ``backup-server:store1``.
.. code-block:: console
@ -109,12 +91,12 @@ In the following examples, we use ``backup-server:store1``.
Uploaded 12129 chunks in 87 seconds (564 MB/s).
End Time: 2019-12-03T10:36:29+01:00
This will prompt you for a password, then upload a file archive named
This will prompt you for a password and then uploads a file archive named
``root.pxar`` containing all the files in the ``/`` directory.
.. Caution:: Please note that proxmox-backup-client does not
.. Caution:: Please note that the proxmox-backup-client does not
automatically include mount points. Instead, you will see a short
``skip mount point`` message for each of them. The idea is to
``skip mount point`` notice for each of them. The idea is to
create a separate file archive for each mounted disk. You can
explicitly include them using the ``--include-dev`` option
(i.e. ``--include-dev /boot/efi``). You can use this option
@ -122,19 +104,19 @@ This will prompt you for a password, then upload a file archive named
The ``--repository`` option can get quite long and is used by all
commands. You can avoid having to enter this value by setting the
environment variable ``PBS_REPOSITORY``. Note that if you would like this to
remain set over multiple sessions, you should instead add the below line to your
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
over multiple sessions, you should instead add the below line to your
``.bashrc`` file.
.. code-block:: console
# export PBS_REPOSITORY=backup-server:store1
After this, you can execute all commands without having to specify the
``--repository`` option.
After this you can execute all commands without specifying the ``--repository``
option.
A single backup is allowed to contain more than one archive. For example, if
you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
One single backup is allowed to contain more than one archive. For example, if
you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
.. code-block:: console
@ -148,26 +130,26 @@ archive source at the client. The format is:
<archive-name>.<type>:<source-path>
Common types are ``.pxar`` for file archives and ``.img`` for block
device images. To create a backup of a block device, run the following command:
Common types are ``.pxar`` for file archives, and ``.img`` for block
device images. To create a backup of a block device run the following command:
.. code-block:: console
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
Excluding Files/Directories from a Backup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Excluding files/folders from a backup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes it is desired to exclude certain files or directories from a backup archive.
Sometimes it is desired to exclude certain files or folders from a backup archive.
To tell the Proxmox Backup client when and how to ignore files and directories,
place a text file named ``.pxarexclude`` in the filesystem hierarchy.
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
Whenever the backup client encounters such a file in a directory, it interprets
each line as a glob match pattern for files and directories that are to be excluded
each line as glob match patterns for files and directories that are to be excluded
from the backup.
The file must contain a single glob pattern per line. Empty lines and lines
starting with ``#`` (indicating a comment) are ignored.
The file must contain a single glob pattern per line. Empty lines are ignored.
The same is true for lines starting with ``#``, which indicates a comment.
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
to an explicit inclusion. This makes it possible to exclude all entries in a
directory except for a few single files/subdirectories.
@ -178,24 +160,23 @@ the given patterns. It is only possible to match files in this directory and its
``\`` is used to escape special glob characters.
``?`` matches any single character.
``*`` matches any character, including an empty string.
``**`` is used to match current directory and subdirectories. For example, with
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
a directory and its subdirectories.
``**`` is used to match subdirectories. It can be used to, for example, exclude
all files ending in ``.tmp`` within the directory or subdirectories with the
following pattern ``**/*.tmp``.
``[...]`` matches a single character from any of the provided characters within
the brackets. ``[!...]`` does the complementary and matches any single character
not contained within the brackets. It is also possible to specify ranges with two
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
alphabetic character, and ``[0-9]`` matches any single digit.
alphabetic character and ``[0-9]`` matches any one single digit.
The order of the glob match patterns defines whether a file is included or
excluded, that is to say, later entries override earlier ones.
excluded, that is to say later entries override previous ones.
This is also true for match patterns encountered deeper down the directory tree,
which can override a previous exclusion.
.. Note:: Excluded directories will **not** be read by the backup client. Thus,
a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
``.pxarexclude`` files are treated as regular files and will be included in
the backup archive.
Be aware that excluded directories will **not** be read by the backup client.
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
``.pxarexclude`` files are treated as regular files and will be included in the
backup archive.
For example, consider the following directory structure:
@ -283,7 +264,7 @@ You can avoid entering the passwords by setting the environment
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
Using a Master Key to Store and Recover Encryption Keys
Using a master key to store and recover encryption keys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can also use ``proxmox-backup-client key`` to create an RSA public/private
@ -363,7 +344,7 @@ To set up a master key:
keep keys ordered and in a place that is separate from the contents being
backed up. It can happen, for example, that you back up an entire system, using
a key on that system. If the system then becomes inaccessible for any reason
and needs to be restored, this will not be possible, as the encryption key will be
and needs to be restored, this will not be possible as the encryption key will be
lost along with the broken system.
It is recommended that you keep your master key safe, but easily accessible, in
@ -385,10 +366,10 @@ version of your master key. The following command sends the output of the
Restoring Data
--------------
The regular creation of backups is a necessary step in avoiding data loss. More
importantly, however, is the restoration. It is good practice to perform
periodic recovery tests to ensure that you can access the data in case of
disaster.
The regular creation of backups is a necessary step to avoiding data
loss. More importantly, however, is the restoration. It is good practice to perform
periodic recovery tests to ensure that you can access the data in
case of problems.
First, you need to find the snapshot which you want to restore. The snapshot
list command provides a list of all the snapshots on the server:
@ -447,22 +428,23 @@ to use the interactive recovery shell.
The interactive recovery shell is a minimal command line interface that
utilizes the metadata stored in the catalog to quickly list, navigate and
search for files in a file archive.
search files in a file archive.
To restore files, you can select them individually or match them with a glob
pattern.
Using the catalog for navigation reduces the overhead considerably because only
the catalog needs to be downloaded and, optionally, decrypted.
The actual chunks are only accessed if the metadata in the catalog is
insufficient or for the actual restore.
The actual chunks are only accessed if the metadata in the catalog is not enough
or for the actual restore.
Similar to common UNIX shells, ``cd`` and ``ls`` are the commands used to change
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
working directory and list directory contents in the archive.
``pwd`` shows the full path of the current working directory with respect to the
archive root.
The ability to quickly search the contents of the archive is a commonly required
feature. That's where the catalog is most valuable. For example:
Being able to quickly search the contents of the archive is a commonly needed feature.
That's where the catalog is most valuable.
For example:
.. code-block:: console
@ -473,8 +455,8 @@ feature. That's where the catalog is most valuable. For example:
pxar:/ > restore-selected /target/path
...
This will find and print all files ending in ``.txt`` located in ``etc/`` or its
subdirectories, and add the corresponding pattern to the list for subsequent restores.
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
subdirectory and add the corresponding pattern to the list for subsequent restores.
``list-selected`` shows these patterns and ``restore-selected`` finally restores
all files in the archive matching the patterns to ``/target/path`` on the local
host. This will scan the whole archive.
@ -499,7 +481,7 @@ Mounting of Archives via FUSE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :term:`FUSE` implementation for the pxar archive allows you to mount a
file archive as a read-only filesystem to a mount point on your host.
file archive as a read-only filesystem to a mountpoint on your host.
.. code-block:: console
@ -515,7 +497,7 @@ This allows you to access the full contents of the archive in a seamless manner.
load on your host, depending on the operations you perform on the mounted
filesystem.
To unmount the filesystem, use the ``umount`` command on the mount point:
To unmount the filesystem use the ``umount`` command on the mountpoint:
.. code-block:: console
@ -524,7 +506,7 @@ To unmount the filesystem, use the ``umount`` command on the mount point:
Login and Logout
----------------
The client tool prompts you to enter the login password as soon as you
The client tool prompts you to enter the logon password as soon as you
want to access the backup server. The server checks your credentials
and responds with a ticket that is valid for two hours. The client
tool automatically stores that ticket and uses it for further requests
@ -553,7 +535,7 @@ Changing the Owner of a Backup Group
By default, the owner of a backup group is the user which was used to originally
create that backup group (or in the case of sync jobs, ``root@pam``). This
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
can not be used to create backups in that same backup group. In case you want
can not be used to create backups in that same backup group. In case you want
to change the owner of a backup, you can do so with the below command, using a
user that has ``Datastore.Modify`` privileges on the datastore.
@ -654,25 +636,6 @@ shows the list of existing snapshots and what actions prune would take.
in the chunk-store. The chunk-store still contains the data blocks. To free
space you need to perform :ref:`client_garbage-collection`.
It is also possible to protect single snapshots from being pruned or deleted:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> true
This will set the protected flag on the snapshot and prevent pruning or manual
deletion of this snapshot untilt he flag is removed again with:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> false
When a group is with a protected snapshot is deleted, only the non-protected
ones are removed and the group will remain.
.. note:: This flag will not be synced when using pull or sync jobs. If you
want to protect a synced snapshot, you have to manually to this again on
the target backup server.
.. _client_garbage-collection:
@ -698,7 +661,7 @@ unused data blocks are removed.
(access time) property. Filesystems are mounted with the ``relatime`` option
by default. This results in a better performance by only updating the
``atime`` property if the last access has been at least 24 hours ago. The
downside is that touching a chunk within these 24 hours will not always
downside is, that touching a chunk within these 24 hours will not always
update its ``atime`` property.
Chunks in the grace period will be logged at the end of the garbage
@ -722,8 +685,8 @@ unused data blocks are removed.
Average chunk size: 2486565
TASK OK
Garbage collection can also be scheduled using ``promxox-backup-manager`` or
from the Proxmox Backup Server's web interface.
.. todo:: howto run garbage-collection at regular intervals (cron)
Benchmarking
------------

View File

@ -1,10 +1,10 @@
Backup Protocol
===============
Proxmox Backup Server uses a REST-based API. While the management
interface uses normal HTTP, the actual backup and restore interface uses
Proxmox Backup Server uses a REST based API. While the management
interface use normal HTTP, the actual backup and restore interface use
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
standards, so the following section assumes that you are familiar with
standards, so the following section assumes that you are familiar on
how to use them.
@ -13,35 +13,35 @@ Backup Protocol API
To start a new backup, the API call ``GET /api2/json/backup`` needs to
be upgraded to a HTTP/2 connection using
``proxmox-backup-protocol-v1`` as the protocol name::
``proxmox-backup-protocol-v1`` as protocol name::
GET /api2/json/backup HTTP/1.1
UPGRADE: proxmox-backup-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code,
and you can then issue REST commands on the updated HTTP/2 connection.
The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on that updated HTTP/2 connection.
The backup protocol allows you to upload three different kind of files:
- Chunks and blobs (binary data)
- Fixed indexes (List of chunks with fixed size)
- Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (List of chunks with variable size)
- Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to upload such
The following section gives a short introduction how to upload such
files. Please use the `API Viewer <api-viewer/index.html>`_ for
details about the available REST commands.
details about available REST commands.
Upload Blobs
~~~~~~~~~~~~
Blobs are uploaded using ``POST /blob``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Uploading blobs is done using ``POST /blob``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`).
The file name must end with ``.blob``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
The file name needs to end with ``.blob``, and is automatically added
to the backup manifest.
Upload Chunks
@ -56,41 +56,40 @@ encoded as :ref:`Data Blob <data-blob-format>`).
Upload Fixed Indexes
~~~~~~~~~~~~~~~~~~~~
Fixed indexes are used to store VM image data. The VM image is split
Fixed indexes are use to store VM image data. The VM image is split
into equally sized chunks, which are uploaded individually. The index
file simply contains a list of chunk digests.
file simply contains a list to chunk digests.
You create a fixed index with ``POST /fixed_index``. Then, upload
You create a fixed index with ``POST /fixed_index``. Then upload
chunks with ``POST /fixed_chunk``, and append them to the index with
``PUT /fixed_index``. When finished, you need to close the index using
``POST /fixed_close``.
The file name needs to end with ``.fidx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
to the backup manifest.
Upload Dynamic Indexes
~~~~~~~~~~~~~~~~~~~~~~
Dynamic indexes are used to store file archive data. The archive data
Dynamic indexes are use to store file archive data. The archive data
is split into dynamically sized chunks, which are uploaded
individually. The index file simply contains a list of chunk digests
individually. The index file simply contains a list to chunk digests
and offsets.
You can create a dynamically sized index with ``POST /dynamic_index``. Then,
You create a dynamic sized index with ``POST /dynamic_index``. Then
upload chunks with ``POST /dynamic_chunk``, and append them to the index with
``PUT /dynamic_index``. When finished, you need to close the index using
``POST /dynamic_close``.
The filename needs to end with ``.didx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``.
The file name needs to end with ``.didx``, and is automatically added
to the backup manifest.
Finish Backup
~~~~~~~~~~~~~
Once you have uploaded all data, you need to call ``POST /finish``. This
commits all data and ends the backup protocol.
Once you have uploaded all data, you need to call ``POST
/finish``. This commits all data and ends the backup protocol.
Restore/Reader Protocol API
@ -103,39 +102,39 @@ be upgraded to a HTTP/2 connection using
GET /api2/json/reader HTTP/1.1
UPGRADE: proxmox-backup-reader-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code,
The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on that updated HTTP/2 connection.
The reader protocol allows you to download three different kinds of files:
The reader protocol allows you to download three different kind of files:
- Chunks and blobs (binary data)
- Fixed indexes (list of chunks with fixed size)
- Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (list of chunks with variable size)
- Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to download such
The following section gives a short introduction how to download such
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
the available REST commands.
available REST commands.
Download Blobs
~~~~~~~~~~~~~~
Blobs are downloaded using ``GET /download``. The HTTP body contains the
Downloading blobs is done using ``GET /download``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Download Chunks
~~~~~~~~~~~~~~~
Chunks are downloaded using ``GET /chunk``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`.
Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`).
Download Index Files
~~~~~~~~~~~~~~~~~~~~
Index files are downloaded using ``GET /download``. The HTTP body
Downloading index files is done using ``GET /download``. The HTTP body
contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
or :ref:`Dynamic Index <dynamic-index-format>`.

View File

@ -37,7 +37,7 @@ Each field can contain multiple values in the following formats:
* and a combination of the above: e.g., 01,05..10,12/02
* or a `*` for every possible value: e.g., \*:00
There are some special values that have a specific meaning:
There are some special values that have specific meaning:
================================= ==============================
Value Syntax
@ -81,19 +81,19 @@ Not all features of systemd calendar events are implemented:
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
a specific point in time
* no timezone: all schedules use the timezone of the server
* no timezone: all schedules use the set timezone on the server
* no sub-second resolution
* no reverse day syntax (e.g. 2020-03~01)
* no repetition of ranges (e.g. 1..10/2)
Notes on Scheduling
Notes on scheduling
-------------------
In `Proxmox Backup`_, scheduling for most tasks is done in the
In `Proxmox Backup`_ scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
every minute, to see if any are due. This means that even though
if they are due every minute. This means that even if
`calendar events` can contain seconds, it will only be checked
once per minute.
once a minute.
Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server.

View File

@ -21,7 +21,3 @@ Command Line Tools
.. include:: pxar/description.rst
``proxmox-backup-debug``
~~~~~~~~
.. include:: proxmox-backup-debug/description.rst

View File

@ -10,7 +10,7 @@ Command Syntax
Catalog Shell Commands
~~~~~~~~~~~~~~~~~~~~~~
The following commands are available in an interactive restore shell:
Those command are available when you start an interactive restore shell:
.. code-block:: console

View File

@ -2,13 +2,13 @@ This file contains the access control list for the Proxmox Backup
Server API.
Each line starts with ``acl:``, followed by 4 additional values
separated by colon.
separated by collon.
:propagate: Propagate permissions down the hierarchy
:propagate: Propagate permissions down the hierachrchy
:path: The object path
:User/Token: List of users and tokens
:User/Token: List of users and token
:Role: List of assigned roles

View File

@ -1,9 +1,9 @@
This file contains a list of datastore configuration sections. Each
section starts with the header ``datastore: <name>``, followed by the
The file contains a list of datastore configuration sections. Each
section starts with a header ``datastore: <name>``, followed by the
datastore configuration options.
::
datastore: <name1>
path <path1>
<option1> <value1>

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``pool: <name>``, followed by the
Each entry starts with a header ``pool: <name>``, followed by the
media pool configuration options.
::
@ -8,6 +8,6 @@ media pool configuration options.
retention overwrite
pool: ...
You can use the ``proxmox-tape pool`` command to manipulate this file.

View File

@ -1,6 +1,6 @@
This file contains information used to access remote servers.
Each entry starts with the header ``remote: <name>``, followed by the
Each entry starts with a header ``remote: <name>``, followed by the
remote configuration options.
::
@ -11,7 +11,7 @@ remote configuration options.
...
remote: ...
You can use the ``proxmox-backup-manager remote`` command to manipulate
this file.

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``sync: <name>``, followed by the
Each entry starts with a header ``sync: <name>``, followed by the
job configuration options.
::
@ -9,7 +9,7 @@ job configuration options.
remote lina
sync: ...
You can use the ``proxmox-backup-manager sync-job`` command to manipulate
this file.

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``backup: <name>``, followed by the
Each entry starts with a header ``backup: <name>``, followed by the
job configuration options.
::

View File

@ -1,7 +1,7 @@
Each LTO drive configuration section starts with the header ``lto: <name>``,
Each LTO drive configuration section starts with a header ``lto: <name>``,
followed by the drive configuration options.
Tape changer configurations start with the header ``changer: <name>``,
Tape changer configurations starts with ``changer: <name>``,
followed by the changer configuration options.
::
@ -18,5 +18,5 @@ followed by the changer configuration options.
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
commands to manipulate this file.
.. NOTE:: The ``virtual:`` drive type is experimental and should only be used
.. NOTE:: The ``virtual:`` drive type is experimental and onyl used
for debugging.

View File

@ -1,9 +1,9 @@
This file contains the list of API users and API tokens.
Each user configuration section starts with the header ``user: <name>``,
Each user configuration section starts with a header ``user: <name>``,
followed by the user configuration options.
API token configuration starts with the header ``token:
API token configuration starts with a header ``token:
<userid!token_name>``, followed by the token configuration. The data
used to authenticate tokens is stored in a separate file
(``token.shadow``).

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``verification: <name>``, followed by the
Each entry starts with a header ``verification: <name>``, followed by the
job configuration options.
::

View File

@ -1,7 +1,7 @@
Configuration Files
===================
All Proxmox Backup Server configuration files reside in the directory
All Proxmox Backup Server configuration files resides inside directory
``/etc/proxmox-backup/``.

View File

@ -1,5 +1,5 @@
.. Epilog (included at top of each file)
We use this file to define external links and common replacement
patterns.
@ -13,6 +13,7 @@
.. _Proxmox: https://www.proxmox.com
.. _Proxmox Community Forum: https://forum.proxmox.com
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
.. FIXME
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
@ -22,7 +23,6 @@
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
.. _QEMU: https://www.qemu.org/
.. _LXC: https://linuxcontainers.org/lxc/introduction/
.. _Client-server model: https://en.wikipedia.org/wiki/Client-server_model
.. _AE: https://en.wikipedia.org/wiki/Authenticated_encryption

View File

@ -24,13 +24,11 @@ future plans to support 32-bit processors.
How long will my Proxmox Backup Server version be supported?
------------------------------------------------------------
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
+-----------------------+----------------------+---------------+------------+--------------------+
+-----------------------+--------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+====================+===============+============+====================+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
+-----------------------+--------------------+---------------+------------+--------------------+
Can I copy or synchronize my datastore to another location?
@ -69,6 +67,6 @@ be able to read the data.
Is the backup incremental/deduplicated?
---------------------------------------
With Proxmox Backup Server, backups are sent incrementally to the server, and
data is then deduplicated on the server. This minimizes both the storage
consumed and the impact on the network.
With Proxmox Backup Server, backups are sent incremental and data is
deduplicated on the server.
This minimizes both the storage consumed and the network impact.

View File

@ -14,8 +14,7 @@ Proxmox File Archive Format (``.pxar``)
Data Blob Format (``.blob``)
----------------------------
The data blob format is used to store small binary data. The magic number
decides the exact format:
The data blob format is used to store small binary data. The magic number decides the exact format:
.. list-table::
:widths: auto
@ -33,8 +32,7 @@ decides the exact format:
- encrypted
- compressed
The compression algorithm used is ``zstd``. The encryption cipher is
``AES_256_GCM``.
Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
Unencrypted blobs use the following format:
@ -45,15 +43,15 @@ Unencrypted blobs use the following format:
* - ``CRC32: [u8; 4]``
* - ``Data: (max 16MiB)``
Encrypted blobs additionally contain a 16 byte initialization vector (IV),
followed by a 16 byte authenticated encryption (AE) tag, followed by the
encrypted data:
Encrypted blobs additionally contains a 16 byte IV, followed by a 16
byte Authenticated Encyryption (AE) tag, followed by the encrypted
data:
.. list-table::
* - ``MAGIC: [u8; 8]``
* - ``CRC32: [u8; 4]``
* - ``IV: [u8; 16]``
* - ``ÌV: [u8; 16]``
* - ``TAG: [u8; 16]``
* - ``Data: (max 16MiB)``
@ -74,19 +72,19 @@ All numbers are stored as little-endian.
* - ``ctime: i64``,
- Creation Time (epoch)
* - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(digest1||digest2||...)``
- Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
* - ``size: u64``,
- Image size
* - ``chunk_size: u64``,
- Chunk size
* - ``reserved: [u8; 4016]``,
- Overall header size is one page (4096 bytes)
- overall header size is one page (4096 bytes)
* - ``digest1: [u8; 32]``
- First chunk digest
- first chunk digest
* - ``digest2: [u8; 32]``
- Second chunk digest
- next chunk
* - ...
- Next chunk digest ...
- next chunk ...
.. _dynamic-index-format:
@ -105,16 +103,16 @@ All numbers are stored as little-endian.
* - ``ctime: i64``,
- Creation Time (epoch)
* - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
- Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
* - ``reserved: [u8; 4032]``,
- Overall header size is one page (4096 bytes)
* - ``offset1: u64``
- End of first chunk
* - ``digest1: [u8; 32]``
- First chunk digest
- first chunk digest
* - ``offset2: u64``
- End of second chunk
* - ``digest2: [u8; 32]``
- Second chunk digest
- second chunk digest
* - ...
- Next chunk offset/digest
- next chunk offset/digest

View File

@ -11,7 +11,7 @@ Glossary
`Container`_
A container is an isolated user space. Programs run directly on
the host's kernel, but with limited access to the host's resources.
the host's kernel, but with limited access to the host resources.
Datastore
@ -23,19 +23,19 @@ Glossary
Rust is a new, fast and memory-efficient system programming
language. It has no runtime or garbage collector. Rusts rich type
system and ownership model guarantee memory-safety and
thread-safety. This can eliminate many classes of bugs
thread-safety. I can eliminate many classes of bugs
at compile-time.
`Sphinx`_
Is a tool that makes it easy to create intelligent and nicely formatted
documentation. It was originally created for the documentation of the
Python programming language. It has excellent facilities for the
Is a tool that makes it easy to create intelligent and
beautiful documentation. It was originally created for the
documentation of the Python programming language. It has excellent facilities for the
documentation of software projects in a range of languages.
`reStructuredText`_
Is an easy-to-read, what-you-see-is-what-you-get, plaintext
Is an easy-to-read, what-you-see-is-what-you-get plaintext
markup syntax and parser system.
`FUSE`

View File

@ -8,9 +8,8 @@ tools. The web interface also provides a built-in console, so if you prefer the
command line or need some extra control, you have this option.
The web interface can be accessed via https://youripaddress:8007. The default
login is `root`, and the password is either the one specified during the
installation process or the password of the root user, in case of installation
on top of Debian.
login is `root`, and the password is the one specified during the installation
process.
Features
@ -49,13 +48,12 @@ GUI Overview
The Proxmox Backup Server web interface consists of 3 main sections:
* **Header**: At the top. This shows version information and contains buttons to
view documentation, monitor running tasks, set the language, configure various
display settings, and logout.
* **Sidebar**: On the left. This contains the administration options for
* **Header**: At the top. This shows version information, and contains buttons to view
documentation, monitor running tasks, set the language and logout.
* **Sidebar**: On the left. This contains the configuration options for
the server.
* **Configuration Panel**: In the center. This contains the respective control
interfaces for the administration options in the *Sidebar*.
* **Configuration Panel**: In the center. This contains the control interface for the
configuration options in the *Sidebar*.
Sidebar
@ -76,14 +74,12 @@ previous and currently running tasks, and subscription information.
Configuration
^^^^^^^^^^^^^
The Configuration section contains some system options, such as time, network,
WebAuthn, and HTTP proxy configuration. It also contains the following
subsections:
The Configuration section contains some system configuration options, such as
time and network configuration. It also contains the following subsections:
* **Access Control**: Add and manage users, API tokens, and the permissions
associated with these items
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
* **Certificates**: Manage ACME accounts and create SSL certificates.
* **Subscription**: Upload a subscription key, view subscription status and
access a text-based system report.
@ -102,7 +98,6 @@ tasks and information. These are:
resource usage statistics
* **Services**: Manage and monitor system services
* **Updates**: An interface for upgrading packages
* **Repositories**: An interface for configuring APT repositories
* **Syslog**: View log messages from the server
* **Tasks**: Task history with multiple filter options
@ -115,7 +110,7 @@ The administration menu item also contains a disk management subsection:
* **Disks**: View information on available disks
* **Directory**: Create and view information on *ext4* and *xfs* disks
* **ZFS**: Create and view information on *ZFS* disks
* **ZFS**: Create and view information on *ZFS* disks
Tape Backup
^^^^^^^^^^^
@ -124,20 +119,11 @@ Tape Backup
:align: right
:alt: Tape Backup: Tape changer overview
The `Tape Backup`_ section contains a top panel, with options for managing tape
media sets, inventories, drives, changers, encryption keys, and the tape backup
jobs itself. The tabs are as follows:
The `Tape Backup`_ section contains a top panel, managing tape media sets,
inventories, drives, changers and the tape backup jobs itself.
* **Content**: Information on the contents of the tape backup
* **Inventory**: Manage the tapes attached to the system
* **Changers**: Manage tape loading devices
* **Drives**: Manage drives used for reading and writing to tapes
* **Media Pools**: Manage logical pools of tapes
* **Encryption Keys**: Manage tape backup encryption keys
* **Backup Jobs**: Manage tape backup jobs
The section also contains a subsection per standalone drive and per changer,
with a status and management view for those devices.
It also contains a subsection per standalone drive and per changer, with a
status and management view for those devices.
Datastore
^^^^^^^^^
@ -147,9 +133,9 @@ Datastore
:alt: Datastore Configuration
The Datastore section contains interfaces for creating and managing
datastores. It also contains a button for creating a new datastore on the
server, as well as a subsection for each datastore on the system, in which you
can use the top panel to view:
datastores. It contains a button to create a new datastore on the server, as
well as a subsection for each datastore on the system, in which you can use the
top panel to view:
* **Summary**: Access a range of datastore usage statistics
* **Content**: Information on the datastore's backup groups and their respective
@ -158,7 +144,5 @@ can use the top panel to view:
collection <client_garbage-collection>` operations, and run garbage collection
manually
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs
on the datastore
* **Options**: Configure notification and verification settings
* **Permissions**: Manage permissions on the datastore
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
datastore

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

View File

@ -19,24 +19,24 @@ for various management tasks such as disk management.
`Proxmox Backup`_ without the server part.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
as well as all necessary packages for the `Proxmox Backup`_ Server.
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configuration
(for example timezone, language, network), and install all required packages.
you to partition the local disk(s), apply basic system configurations
(e.g. timezone, language, network), and install all required packages.
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
Alternatively, `Proxmox Backup`_ server can be installed on top of an
existing Debian system.
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install `Proxmox Backup`_ with the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the ISO from |DOWNLOADS|.
It includes the following:
* The `Proxmox Backup`_ Server installer, which partitions the local
* The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
@ -63,7 +63,7 @@ standard Debian installation. After configuring the
# apt-get update
# apt-get install proxmox-backup-server
The above commands keep the current (Debian) kernel and install a minimal
The commands above keep the current (Debian) kernel and install a minimal
set of required packages.
If you want to install the same set of packages as the installer

View File

@ -4,15 +4,15 @@ Introduction
What is Proxmox Backup Server?
------------------------------
Proxmox Backup Server is an enterprise-class, client-server backup solution that
is capable of backing up :term:`virtual machine`\ s, :term:`container`\ s, and
Proxmox Backup Server is an enterprise-class, client-server backup software
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
platform and allows you to back up your data securely, even between remote
sites, providing easy management through a web-based user interface.
sites, providing easy management with a web-based user interface.
It supports deduplication, compression, and authenticated
encryption (AE_). Using :term:`Rust` as the implementation language guarantees
high performance, low resource usage, and a safe, high-quality codebase.
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
performance, low resource usage, and a safe, high-quality codebase.
Proxmox Backup uses state of the art cryptography for both client-server
communication and backup content :ref:`encryption <client_encryption>`. All
@ -28,23 +28,22 @@ Proxmox Backup Server uses a `client-server model`_. The server stores the
backup data and provides an API to create and manage datastores. With the
API, it's also possible to manage disks and other server-side resources.
The backup client uses this API to access the backed up data. You can use the
``proxmox-backup-client`` command line tool to create and restore file backups.
For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
integrated client.
The backup client uses this API to access the backed up data. With the command
line tool ``proxmox-backup-client`` you can create backups and restore data.
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
A single backup is allowed to contain several archives. For example, when you
backup a :term:`virtual machine`, each disk is stored as a separate archive
inside that backup. The VM configuration itself is stored as an extra file.
This way, it's easy to access and restore only the important parts of the
backup, without the need to scan the whole backup.
This way, it's easy to access and restore only important parts of the backup,
without the need to scan the whole backup.
Main Features
-------------
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
supported, and you can easily backup :term:`virtual machine`\ s and
supported and you can easily backup :term:`virtual machine`\ s and
:term:`container`\ s.
:Performance: The whole software stack is written in :term:`Rust`,
@ -71,10 +70,6 @@ Main Features
modern hardware. In addition to client-side encryption, all data is
transferred via a secure TLS connection.
:Tape backup: For long-term archiving of data, Proxmox Backup Server also
provides extensive support for backing up to tape and managing tape
libraries.
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
user interface.
@ -85,7 +80,7 @@ Main Features
backup-clients.
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
the form of `Proxmox Backup Server Subscription Plans
form of `Proxmox Backup Server Subscription Plans
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
subscription level get access to the Proxmox Backup :ref:`Enterprise
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
@ -178,7 +173,7 @@ Bug Tracker
~~~~~~~~~~~
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
issue appears, file your report there. An issue can be a bug, as well as a
issue appears, file your report there. An issue can be a bug as well as a
request for a new feature or enhancement. The bug tracker helps to keep track
of the issue and will send a notification once it has been solved.
@ -229,6 +224,5 @@ requirements.
In July 2020, we released the first beta version of Proxmox Backup
Server, followed by the first stable version in November 2020. With support for
encryption and incremental, fully deduplicated backups, Proxmox Backup offers a
secure environment, which significantly reduces network load and saves valuable
storage space.
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
network load and saves valuable storage space.

View File

@ -4,17 +4,17 @@
ZFS on Linux
------------
ZFS is a combined file system and logical volume manager, designed by
ZFS is a combined file system and logical volume manager designed by
Sun Microsystems. There is no need to manually compile ZFS modules - all
packages are included.
By using ZFS, it's possible to achieve maximum enterprise features with
low budget hardware, and also high performance systems by leveraging
SSD caching or even SSD only setups. ZFS can replace expensive
hardware raid cards with moderate CPU and memory load, combined with easy
low budget hardware, but also high performance systems by leveraging
SSD caching or even SSD only setups. ZFS can replace cost intense
hardware raid cards by moderate CPU and memory load combined with easy
management.
General advantages of ZFS:
General ZFS advantages
* Easy configuration and management with GUI and CLI.
* Reliable
@ -34,18 +34,18 @@ General advantages of ZFS:
Hardware
~~~~~~~~~
ZFS depends heavily on memory, so it's recommended to have at least 8GB to
start. In practice, use as much you can get for your hardware/budget. To prevent
ZFS depends heavily on memory, so you need at least 8GB to start. In
practice, use as much you can get for your hardware/budget. To prevent
data corruption, we recommend the use of high quality ECC RAM.
If you use a dedicated cache and/or log disk, you should use an
enterprise class SSD (for example, Intel SSD DC S3700 Series). This can
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
increase the overall performance significantly.
IMPORTANT: Do not use ZFS on top of a hardware controller which has its
IMPORTANT: Do not use ZFS on top of hardware controller which has its
own cache management. ZFS needs to directly communicate with disks. An
HBA adapter or something like an LSI controller flashed in ``IT`` mode is
recommended.
HBA adapter is the way to go, or something like LSI controller flashed
in ``IT`` mode.
ZFS Administration
@ -53,7 +53,7 @@ ZFS Administration
This section gives you some usage examples for common tasks. ZFS
itself is really powerful and provides many options. The main commands
to manage ZFS are `zfs` and `zpool`. Both commands come with extensive
to manage ZFS are `zfs` and `zpool`. Both commands come with great
manual pages, which can be read with:
.. code-block:: console
@ -123,7 +123,7 @@ Create a new pool with cache (L2ARC)
It is possible to use a dedicated cache drive partition to increase
the performance (use SSD).
For `<device>`, you can use multiple devices, as is shown in
As `<device>` it is possible to use more devices, like it's shown in
"Create a new pool with RAID*".
.. code-block:: console
@ -136,7 +136,7 @@ Create a new pool with log (ZIL)
It is possible to use a dedicated cache drive partition to increase
the performance (SSD).
For `<device>`, you can use multiple devices, as is shown in
As `<device>` it is possible to use more devices, like it's shown in
"Create a new pool with RAID*".
.. code-block:: console
@ -146,9 +146,8 @@ For `<device>`, you can use multiple devices, as is shown in
Add cache and log to an existing pool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can add cache and log devices to a pool after its creation. In this example,
we will use a single drive for both cache and log. First, you need to create
2 partitions on the SSD with `parted` or `gdisk`
If you have a pool without cache and log. First partition the SSD in
2 partition with `parted` or `gdisk`
.. important:: Always use GPT partition tables.
@ -172,12 +171,12 @@ Changing a failed device
Changing a failed bootable device
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Depending on how Proxmox Backup was installed, it is either using `grub` or
`systemd-boot` as a bootloader.
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
as bootloader.
In either case, the first steps of copying the partition table, reissuing GUIDs
and replacing the ZFS partition are the same. To make the system bootable from
the new disk, different steps are needed which depend on the bootloader in use.
The first steps of copying the partition table, reissuing GUIDs and replacing
the ZFS partition are the same. To make the system bootable from the new disk,
different steps are needed which depend on the bootloader in use.
.. code-block:: console
@ -208,7 +207,7 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
# grub-mkconfig -o /path/to/grub.cfg
Activate e-mail notification
Activate E-Mail Notification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ZFS comes with an event daemon, which monitors events generated by the
@ -220,24 +219,24 @@ and you can install it using `apt-get`:
# apt-get install zfs-zed
To activate the daemon, it is necessary to to uncomment the ZED_EMAIL_ADDR
setting, in the file `/etc/zfs/zed.d/zed.rc`.
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
.. code-block:: console
ZED_EMAIL_ADDR="root"
Please note that Proxmox Backup forwards mails to `root` to the email address
Please note Proxmox Backup forwards mails to `root` to the email address
configured for the root user.
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
other settings are optional.
Limit ZFS memory usage
Limit ZFS Memory Usage
^^^^^^^^^^^^^^^^^^^^^^
It is good to use at most 50 percent (which is the default) of the
system memory for ZFS ARC, to prevent performance degradation of the
system memory for ZFS ARC to prevent performance shortage of the
host. Use your preferred editor to change the configuration in
`/etc/modprobe.d/zfs.conf` and insert:
@ -245,42 +244,27 @@ host. Use your preferred editor to change the configuration in
options zfs zfs_arc_max=8589934592
The above example limits the usage to 8 GiB ('8 * 2^30^').
This example setting limits the usage to 8GB.
.. IMPORTANT:: In case your desired `zfs_arc_max` value is lower than or equal
to `zfs_arc_min` (which defaults to 1/32 of the system memory), `zfs_arc_max`
will be ignored. Thus, for it to work in this case, you must set
`zfs_arc_min` to at most `zfs_arc_max - 1`. This would require updating the
configuration in `/etc/modprobe.d/zfs.conf`, with:
.. code-block:: console
options zfs zfs_arc_min=8589934591
options zfs zfs_arc_max=8589934592
This example setting limits the usage to 8 GiB ('8 * 2^30^') on
systems with more than 256 GiB of total memory, where simply setting
`zfs_arc_max` alone would not work.
.. IMPORTANT:: If your root file system is ZFS, you must update your initramfs
every time this value changes.
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
.. code-block:: console
# update-initramfs -u
Swap on ZFS
SWAP on ZFS
^^^^^^^^^^^
Swap-space created on a zvol may cause some issues, such as blocking the
Swap-space created on a zvol may generate some troubles, like blocking the
server or generating a high IO load, often seen when starting a Backup
to an external Storage.
We strongly recommend using enough memory, so that you normally do not
We strongly recommend to use enough memory, so that you normally do not
run into low memory situations. Should you need or want to add swap, it is
preferred to create a partition on a physical disk and use it as a swap device.
preferred to create a partition on a physical disk and use it as swap device.
You can leave some space free for this purpose in the advanced options of the
installer. Additionally, you can lower the `swappiness` value.
installer. Additionally, you can lower the `swappiness` value.
A good value for servers is 10:
.. code-block:: console
@ -307,7 +291,7 @@ an editor of your choice and add the following line:
vm.swappiness = 100 The kernel will swap aggressively.
==================== ===============================================================
ZFS compression
ZFS Compression
^^^^^^^^^^^^^^^
To activate compression:
@ -316,11 +300,10 @@ To activate compression:
# zpool set compression=lz4 <pool>
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer from `1-9`
representing the compression ratio, where 1 is fastest and 9 is best
compression) are also available. Depending on the algorithm and how
compressible the data is, having compression enabled can even increase I/O
performance.
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
the compression ratio, 1 is fastest and 9 is best compression) are also available.
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
I/O performance.
You can disable compression at any time with:
.. code-block:: console
@ -331,26 +314,26 @@ Only new blocks will be affected by this change.
.. _local_zfs_special_device:
ZFS special device
ZFS Special Device
^^^^^^^^^^^^^^^^^^
Since version 0.8.0, ZFS supports `special` devices. A `special` device in a
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
pool is used to store metadata, deduplication tables, and optionally small
file blocks.
A `special` device can improve the speed of a pool consisting of slow spinning
hard disks with a lot of metadata changes. For example, workloads that involve
hard disks with a lot of metadata changes. For example workloads that involve
creating, updating or deleting a large number of files will benefit from the
presence of a `special` device. ZFS datasets can also be configured to store
small files on the `special` device, which can further improve the
whole small files on the `special` device which can further improve the
performance. Use fast SSDs for the `special` device.
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
pool, since the `special` device is a point of failure for the entire pool.
pool, since the `special` device is a point of failure for the whole pool.
.. WARNING:: Adding a `special` device to a pool cannot be undone!
To create a pool with `special` device and RAID-1:
Create a pool with `special` device and RAID-1:
.. code-block:: console
@ -363,8 +346,8 @@ Adding a `special` device to an existing pool with RAID-1:
# zpool add <pool> special mirror <device1> <device2>
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
`0` to disable storing small file blocks on the `special` device, or a power of
two in the range between `512B` to `128K`. After setting this property, new file
`0` to disable storing small file blocks on the `special` device or a power of
two in the range between `512B` to `128K`. After setting the property new file
blocks smaller than `size` will be allocated on the `special` device.
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
@ -372,10 +355,10 @@ blocks smaller than `size` will be allocated on the `special` device.
the `special` device, so be careful!
Setting the `special_small_blocks` property on a pool will change the default
value of that property for all child ZFS datasets (for example, all containers
value of that property for all child ZFS datasets (for example all containers
in the pool will opt in for small file blocks).
Opt in for all files smaller than 4K-blocks pool-wide:
Opt in for all file smaller than 4K-blocks pool-wide:
.. code-block:: console
@ -396,15 +379,10 @@ Opt out from small file blocks for a single dataset:
Troubleshooting
^^^^^^^^^^^^^^^
Corrupt cache file
""""""""""""""""""
Corrupted cachefile
`zfs-import-cache.service` imports ZFS pools using the ZFS cache file. If this
file becomes corrupted, the service won't be able to import the pools that it's
unable to read from it.
As a result, in case of a corrupted ZFS cache file, some volumes may not be
mounted during boot and must be mounted manually later.
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
boot until mounted manually later.
For each pool, run:
@ -412,13 +390,16 @@ For each pool, run:
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
then, update the `initramfs` by running:
and afterwards update the `initramfs` by running:
.. code-block:: console
# update-initramfs -u -k all
and finally, reboot the node.
and finally reboot your node.
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
doesn't import the pools that aren't present in the cachefile.
Another workaround to this problem is enabling the `zfs-import-scan.service`,
which searches and imports pools via device scanning (usually slower).

View File

@ -34,7 +34,17 @@
</style>
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
<script type="text/javascript" src="extjs/ext-all.js"></script>
<script type="text/javascript" src="lto-barcode-generator.js"></script>
<script type="text/javascript" src="code39.js"></script>
<script type="text/javascript" src="prefix-field.js"></script>
<script type="text/javascript" src="label-style.js"></script>
<script type="text/javascript" src="tape-type.js"></script>
<script type="text/javascript" src="paper-size.js"></script>
<script type="text/javascript" src="page-layout.js"></script>
<script type="text/javascript" src="page-calibration.js"></script>
<script type="text/javascript" src="label-list.js"></script>
<script type="text/javascript" src="label-setup.js"></script>
<script type="text/javascript" src="lto-barcode.js"></script>
</head>
<body>
</body>

View File

@ -1,5 +1,7 @@
// for toolkit.js
function gettext(val) { return val; };
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
if (Ext.isFirefox) {
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
}
function draw_labels(target_id, label_list, page_layout, calibration) {
let max_labels = compute_max_labels(page_layout);

View File

@ -14,15 +14,15 @@ following retention options are available:
``keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is retained.
backup for a single hour, only the latest is kept.
``keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is retained.
backup for a single day, only the latest is kept.
``keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is retained.
backup for a single week, only the latest is kept.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
@ -30,17 +30,17 @@ following retention options are available:
``keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is retained.
backup for a single month, only the latest is kept.
``keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is retained.
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.
Unfinished and incomplete backups will be removed by the prune command, unless
Unfinished and incomplete backups will be removed by the prune command unless
they are newer than the last successful backup. In this case, the last failed
backup is retained.
@ -48,7 +48,7 @@ Prune Simulator
^^^^^^^^^^^^^^^
You can use the built-in `prune simulator <prune-simulator/index.html>`_
to explore the effect of different retention options with various backup
to explore the effect of different retetion options with various backup
schedules.
Manual Pruning
@ -59,10 +59,10 @@ Manual Pruning
:align: right
:alt: Prune and garbage collection options
To manually prune a specific backup group, you can use
``proxmox-backup-client``'s ``prune`` subcommand, discussed in
:ref:`backup-pruning`, or navigate to the **Content** tab of the datastore and
click the scissors icon in the **Actions** column of the relevant backup group.
To access pruning functionality for a specific backup group, you can use the
prune command line option discussed in :ref:`backup-pruning`, or navigate to
the **Content** tab of the datastore and click the scissors icon in the
**Actions** column of the relevant backup group.
Prune Schedules
^^^^^^^^^^^^^^^
@ -81,7 +81,7 @@ Retention Settings Example
^^^^^^^^^^^^^^^^^^^^^^^^^^
The backup frequency and retention of old backups may depend on how often data
changes and how important an older state may be in a specific workload.
changes, and how important an older state may be, in a specific work load.
When backups act as a company's document archive, there may also be legal
requirements for how long backup snapshots must be kept.
@ -125,8 +125,8 @@ start garbage collection on an entire datastore and the ``status`` subcommand to
see attributes relating to the :ref:`garbage collection <client_garbage-collection>`.
This functionality can also be accessed in the GUI, by navigating to **Prune &
GC** from the top panel of a datastore. From here, you can edit the schedule at
which garbage collection runs and manually start the operation.
GC** from the top panel. From here, you can edit the schedule at which garbage
collection runs and manually start the operation.
.. _maintenance_verification:
@ -139,13 +139,13 @@ Verification
:align: right
:alt: Adding a verify job
Proxmox Backup Server offers various verification options to ensure that backup
data is intact. Verification is generally carried out through the creation of
verify jobs. These are scheduled tasks that run verification at a given interval
(see :ref:`calendar-event-scheduling`). With these, you can also set whether
already verified snapshots are ignored, as well as set a time period, after
which snapshots are checked again. The interface for creating verify jobs can be
found under the **Verify Jobs** tab of the datastore.
Proxmox Backup offers various verification options to ensure that backup data is
intact. Verification is generally carried out through the creation of verify
jobs. These are scheduled tasks that run verification at a given interval (see
:ref:`calendar-event-scheduling`). With these, you can set whether already verified
snapshots are ignored, as well as set a time period, after which verified jobs
are checked again. The interface for creating verify jobs can be found under the
**Verify Jobs** tab of the datastore.
.. Note:: It is recommended that you reverify all backups at least monthly, even
if a previous verification was successful. This is because physical drives
@ -158,9 +158,9 @@ found under the **Verify Jobs** tab of the datastore.
data.
Aside from using verify jobs, you can also run verification manually on entire
datastores, backup groups or snapshots. To do this, navigate to the **Content**
tab of the datastore and either click *Verify All* or select the *V.* icon from
the **Actions** column in the table.
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
tab of the datastore and either click *Verify All*, or select the *V.* icon from
the *Actions* column in the table.
.. _maintenance_notification:
@ -170,8 +170,8 @@ Notifications
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore.
By default, notifications are send to the email address configured for the
`root@pam` user. You can set that user for each datastore.
You can also change the level of notification received per task type, the
following options are available:
@ -179,6 +179,6 @@ following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Errors: send a notification for any scheduled task resulting in an error
* Never: do not send any notification at all

View File

@ -17,8 +17,8 @@ configuration information for remotes is stored in the file
:align: right
:alt: Add a remote
To add a remote, you need its hostname or IP address, a userid and password on
the remote, and its certificate fingerprint. To get the fingerprint, use the
To add a remote, you need its hostname or IP, a userid and password on the
remote, and its certificate fingerprint. To get the fingerprint, use the
``proxmox-backup-manager cert info`` command on the remote, or navigate to
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
@ -60,13 +60,12 @@ Sync Jobs
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
a local datastore. You can manage sync jobs in the web interface, from the
**Sync Jobs** tab of the **Datastore** panel or from that of the Datastore
itself. Alternatively, you can manage them with the ``proxmox-backup-manager
sync-job`` command. The configuration information for sync jobs is stored at
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually from the GUI or provide it with a schedule (see
:ref:`calendar-event-scheduling`) to run regularly.
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
the ``proxmox-backup-manager sync-job`` command. The configuration information
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
sync job, click the add button in the GUI, or use the ``create`` subcommand.
After creating a sync job, you can either start it manually from the GUI or
provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly.
.. code-block:: console
@ -80,48 +79,17 @@ either start it manually from the GUI or provide it with a schedule (see
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
# proxmox-backup-manager sync-job remove pbs2-local
To set up sync jobs, the configuring user needs the following permissions:
For setting up sync jobs, the configuring user needs the following permissions:
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or set to something other than the configuring user,
``Datastore.Modify`` is required as well.
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or is set to something other than the configuring user,
``Datastore.Modify`` is required as well.
If the ``group-filter`` option is set, only backup groups matching at least one
of the specified criteria are synced. The available criteria are:
* backup type, for example to only sync groups of the `ct` (Container) type:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter type:ct
* full group identifier
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100
* regular expression matched against the full group identifier
.. todo:: add example for regex
The same filter is applied to local groups for handling of the
``remove-vanished`` option.
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Bandwidth Limit
^^^^^^^^^^^^^^^
Syncing datastores to an archive can produce lots of traffic and impact other
users of the network. So, to avoid network or storage congetsion you can limit
the bandwith of the sync job by setting the ``rate-in`` option either in the
web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB

View File

@ -82,12 +82,9 @@ is:
.. note:: This command and corresponding GUI button rely on the ``ifreload``
command, from the package ``ifupdown2``. This package is included within the
Proxmox Backup Server installation, however, you may have to install it yourself,
if you have installed Proxmox Backup Server on top of Debian or a Proxmox VE
version prior to version 7.
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
You can also configure DNS settings, from the **DNS** section
of **Configuration** or by using the ``dns`` subcommand of
``proxmox-backup-manager``.
.. include:: traffic-control.rst

View File

@ -1,5 +1,5 @@
Most commands that produce output support the ``--output-format``
parameter. This accepts the following values:
Most commands producing output supports the ``--output-format``
parameter. It accepts the following values:
:``text``: Text format (default). Structured data is rendered as a table.

View File

@ -17,13 +17,15 @@ update``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian bullseye main contrib
deb http://ftp.debian.org/debian bullseye-updates main contrib
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
# security updates
deb http://security.debian.org/debian-security bullseye-security main contrib
deb http://security.debian.org/debian-security buster/updates main contrib
.. FIXME for 7.0: change security update suite to bullseye-security
In addition, you need a package repository from Proxmox to get Proxmox Backup
updates.
@ -43,21 +45,31 @@ key with the following commands:
.. code-block:: console
# wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
Verify the SHA512 checksum afterwards with the expected output below:
Verify the SHA512 checksum afterwards with:
.. code-block:: console
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
7fb03ec8a1675723d2853b84aa4fdb49a46a3bb72b9951361488bfd19b29aab0a789a4f8c7406e71a69aabbc727c936d3549731c4659ffa1a08f44db8fdcebfa /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
and the md5sum, with the expected output below:
The output should be:
.. code-block:: console
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
bcc35c7173e0845c0d6ad6470b70f50e /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
and the md5sum:
.. code-block:: console
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
Here, the output should be:
.. code-block:: console
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
.. _sysadmin_package_repos_enterprise:
@ -72,7 +84,7 @@ enabled by default:
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
To never miss important security fixes, the superuser (``root@pam`` user) is
@ -102,15 +114,15 @@ We recommend to configure this repository in ``/etc/apt/sources.list``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian bullseye main contrib
deb http://ftp.debian.org/debian bullseye-updates main contrib
deb http://ftp.debian.org/debian buster main contrib
deb http://ftp.debian.org/debian buster-updates main contrib
# PBS pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
# security updates
deb http://security.debian.org/debian-security bullseye-security main contrib
deb http://security.debian.org/debian-security buster/updates main contrib
`Proxmox Backup`_ Test Repository
@ -128,7 +140,7 @@ You can access this repository by adding the following line to
.. code-block:: sources.list
:caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs bullseye pbstest
deb http://download.proxmox.com/debian/pbs buster pbstest
.. _package_repositories_client_only:
@ -149,26 +161,6 @@ APT-based Proxmox Backup Client Repository
For modern Linux distributions using `apt` as package manager, like all Debian
and Ubuntu Derivative do, you may be able to use the APT-based repository.
In order to configure this repository you need to first :ref:`setup the Proxmox
release key <package_repos_secure_apt>`. After that, add the repository URL to
the APT sources lists.
**Repositories for Debian 11 (Bullseye) based releases**
This repository is tested with:
- Debian Bullseye
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
snipped
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://download.proxmox.com/debian/pbs-client bullseye main
**Repositories for Debian 10 (Buster) based releases**
This repository is tested with:
- Debian Buster
@ -176,6 +168,9 @@ This repository is tested with:
It may work with older, and should work with more recent released versions.
In order to configure this repository you need to first :ref:`setup the Proxmox
release key <package_repos_secure_apt>`. After that, add the repository URL to
the APT sources lists.
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
snipped

View File

@ -1,14 +0,0 @@
Implements debugging functionality to inspect Proxmox Backup datastore
files, verify the integrity of chunks.
Also contains an 'api' subcommand where arbitrary api paths can be called
(get/create/set/delete) as well as display their parameters (usage) and
their child-links (ls).
By default, it connects to the proxmox-backup-proxy on localhost via https,
but by setting the environment variable `PROXMOX_DEBUG_API_CODE` to `1` the
tool directly calls the corresponding code.
.. WARNING:: Using `PROXMOX_DEBUG_API_CODE` can be dangerous and is only intended
for debugging purposes. It is not intended for use on a production system.

View File

@ -1,33 +0,0 @@
==========================
proxmox-backup-debug
==========================
.. include:: ../epilog.rst
-------------------------------------------------------------
Debugging command line tool for Backup and Restore
-------------------------------------------------------------
:Author: |AUTHOR|
:Version: Version |VERSION|
:Manual section: 1
Synopsis
==========
.. include:: synopsis.rst
Common Options
==============
.. include:: ../output-format.rst
Description
============
.. include:: description.rst
.. include:: ../pbs-copyright.rst

View File

@ -1,5 +1,5 @@
This daemon exposes the whole Proxmox Backup Server API on TCP port
8007 using HTTPS. It runs as user ``backup`` and has very limited
permissions. Operations requiring more permissions are forwarded to
permissions. Operation requiring more permissions are forwarded to
the local ``proxmox-backup`` service.

View File

@ -1,5 +1,7 @@
// for Toolkit.js
function gettext(val) { return val; };
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
if (Ext.isFirefox) {
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
}
Ext.onReady(function() {
const NOW = new Date();
@ -35,6 +37,7 @@ Ext.onReady(function() {
editable: true,
displayField: 'text',
valueField: 'value',
queryMode: 'local',

View File

@ -3,8 +3,8 @@
`Proxmox VE`_ Integration
-------------------------
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
setup, by adding it as a storage in Proxmox VE.
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
former as a storage in a Proxmox VE standalone or cluster setup.
See also the `Proxmox VE Storage - Proxmox Backup Server
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
@ -14,8 +14,8 @@ of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
Using the Proxmox VE Web-Interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox VE has native API and web interface integration of Proxmox Backup
Server as of `Proxmox VE 6.3
Proxmox VE has native API and web-interface integration of Proxmox Backup
Server since the `Proxmox VE 6.3 release
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
@ -24,8 +24,8 @@ Using the Proxmox VE Command-Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
node. The following example uses ``store2`` as the storage's name, and
assumes the server address is ``localhost`` and you want to connect
node. The following example uses ``store2`` as storage name, and
assumes the server address is ``localhost``, and you want to connect
as ``user1@pbs``.
.. code-block:: console
@ -33,7 +33,7 @@ as ``user1@pbs``.
# pvesm add pbs store2 --server localhost --datastore store2
# pvesm set store2 --username user1@pbs --password <secret>
.. note:: If you would rather not enter your password as plain text, you can pass
.. note:: If you would rather not pass your password as plain text, you can pass
the ``--password`` parameter, without any arguments. This will cause the
program to prompt you for a password upon entering the command.
@ -53,7 +53,7 @@ relationship:
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
After that, you should be able to view storage status with:
After that you should be able to see storage status with:
.. code-block:: console

View File

@ -1,12 +1,12 @@
``pxar`` is a command line utility for creating and manipulating archives in the
``pxar`` is a command line utility to create and manipulate archives in the
:ref:`pxar-format`.
It is inspired by `casync file archive format
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
which caters to a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
Backup Server, for example, efficient storage of hard links.
The format is designed to reduce the required storage on the server by
achieving a high level of deduplication.
Backup Server, for example, efficient storage of hardlinks.
The format is designed to reduce storage space needed on the server by achieving
a high level of deduplication.
Creating an Archive
^^^^^^^^^^^^^^^^^^^
@ -24,10 +24,10 @@ This will create a new archive called ``archive.pxar`` with the contents of the
the same name is already present in the target folder, the creation will
fail.
By default, ``pxar`` will skip certain mount points and will not follow device
By default, ``pxar`` will skip certain mountpoints and will not follow device
boundaries. This design decision is based on the primary use case of creating
archives for backups. It makes sense to ignore the contents of certain
temporary or system specific files in a backup.
archives for backups. It makes sense to not back up the contents of certain
temporary or system specific files.
To alter this behavior and follow device boundaries, use the
``--all-file-systems`` flag.
@ -41,38 +41,40 @@ by running:
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
Be aware that the shell itself will try to expand glob patterns before invoking
``pxar``. In order to avoid this, all globs have to be quoted correctly.
Be aware that the shell itself will try to expand all of the glob patterns before
invoking ``pxar``.
In order to avoid this, all globs have to be quoted correctly.
It is possible to pass the ``--exclude`` parameter multiple times, in order to
match more than one pattern. This allows you to use more complex
file inclusion/exclusion behavior. However, it is recommended to use
file exclusion/inclusion behavior. However, it is recommended to use
``.pxarexclude`` files instead for such cases.
For example you might want to exclude all ``.txt`` files except a specific
one from the archive. This would be achieved via the negated match pattern,
prefixed by ``!``. All the glob patterns are relative to the ``source``
directory.
For example you might want to exclude all ``.txt`` files except for a specific
one from the archive. This is achieved via the negated match pattern, prefixed
by ``!``.
All the glob patterns are relative to the ``source`` directory.
.. code-block:: console
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
.. NOTE:: The order of the glob match patterns matters, as later ones override
earlier ones. Permutations of the same patterns lead to different results.
.. NOTE:: The order of the glob match patterns matters as later ones override
previous ones. Permutations of the same patterns lead to different results.
``pxar`` will store the list of glob match patterns passed as parameters via the
command line, in a file called ``.pxarexclude-cli``, at the root of the archive.
command line, in a file called ``.pxarexclude-cli`` at the root of
the archive.
If a file with this name is already present in the source folder during archive
creation, this file is not included in the archive, and the file containing the
new patterns is added to the archive instead. The original file is not altered.
creation, this file is not included in the archive and the file containing the
new patterns is added to the archive instead, the original file is not altered.
A more convenient and persistent way to exclude files from the archive is by
placing the glob match patterns in ``.pxarexclude`` files.
It is possible to create and place these files in any directory of the filesystem
tree.
These files must contain one pattern per line, and later patterns override
earlier ones.
These files must contain one pattern per line, again later patterns win over
previous ones.
The patterns control file exclusions of files present within the given directory
or further below it in the tree.
The behavior is the same as described in :ref:`client_creating_backups`.
@ -87,7 +89,7 @@ with the following command:
# pxar extract archive.pxar /path/to/target
If no target is provided, the contents of the archive is extracted to the current
If no target is provided, the content of the archive is extracted to the current
working directory.
In order to restore only parts of an archive, single files, and/or folders,
@ -114,13 +116,13 @@ run the following command:
# pxar list archive.pxar
This displays the full path of each file or directory with respect to the
archive's root.
archives root.
Mounting an Archive
^^^^^^^^^^^^^^^^^^^
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
In order to mount an archive named ``archive.pxar`` to the mount point ``/mnt``,
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
run the command:
.. code-block:: console
@ -128,7 +130,7 @@ run the command:
# pxar mount archive.pxar /mnt
Once the archive is mounted, you can access its content under the given
mount point.
mountpoint.
.. code-block:: console

View File

@ -15,7 +15,7 @@ accessed using the ``disk`` subcommand. This subcommand allows you to initialize
disks, create various filesystems, and get information about the disks.
To view the disks connected to the system, navigate to **Administration ->
Storage/Disks** in the web interface or use the ``list`` subcommand of
Disks** in the web interface or use the ``list`` subcommand of
``disk``:
.. code-block:: console
@ -42,9 +42,9 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
:alt: Create a directory
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
in the web interface and creating one from there. The following command creates
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
create``, or by navigating to **Administration -> Disks -> Directory** in the
web interface and creating one from there. The following command creates an
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
automatically create a datastore on the disk (in this case ``sdd``). This will
create a datastore at the location ``/mnt/datastore/store1``:
@ -57,7 +57,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
:alt: Create ZFS
You can also create a ``zpool`` with various raid levels from **Administration
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it under ``/mnt/datastore/zpool1``:
@ -102,7 +102,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
subdirectories per directory. That number comes from the 2\ :sup:`16`
pre-created chunk namespace directories, and the ``.`` and ``..`` default
directory entries. This requirement excludes certain filesystems and
filesystem configurations from being supported for a datastore. For example,
filesystem configuration from being supported for a datastore. For example,
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
@ -113,15 +113,14 @@ Datastore Configuration
:align: right
:alt: Datastore Overview
You can configure multiple datastores. A minimum of one datastore needs to be
You can configure multiple datastores. Minimum one datastore needs to be
configured. The datastore is identified by a simple *name* and points to a
directory on the filesystem. Each datastore also has associated retention
settings of how many backup snapshots for each interval of ``hourly``,
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
number of backups to keep in that store. :ref:`backup-pruning` and
:ref:`garbage collection <client_garbage-collection>` can also be configured to
run periodically, based on a configured schedule (see
:ref:`calendar-event-scheduling`) per datastore.
:ref:`garbage collection <client_garbage-collection>` can also be configured to run
periodically based on a configured schedule (see :ref:`calendar-event-scheduling`) per datastore.
.. _storage_datastore_create:
@ -147,8 +146,7 @@ window:
* *Comment* can be used to add some contextual information to the datastore.
Alternatively you can create a new datastore from the command line. The
following command creates a new datastore called ``store1`` on
:file:`/backup/disk1/store1`
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
.. code-block:: console
@ -158,7 +156,7 @@ following command creates a new datastore called ``store1`` on
Managing Datastores
^^^^^^^^^^^^^^^^^^^
To list existing datastores from the command line, run:
To list existing datastores from the command line run:
.. code-block:: console
@ -218,9 +216,8 @@ After creating a datastore, the following default layout will appear:
`.lock` is an empty file used for process locking.
The `.chunks` directory contains folders, starting from `0000` and increasing in
hexadecimal values until `ffff`. These directories will store the chunked data,
categorized by checksum, after a backup operation has been executed.
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
directories will store the chunked data after a backup operation has been executed.
.. code-block:: console

View File

@ -4,8 +4,8 @@ Host System Administration
==========================
`Proxmox Backup`_ is based on the famous Debian_ Linux
distribution. This means that you have access to the entire range of
Debian packages, and that the base system is well documented. The `Debian
distribution. That means that you have access to the whole world of
Debian packages, and the base system is well documented. The `Debian
Administrator's Handbook`_ is available online, and provides a
comprehensive introduction to the Debian operating system.
@ -17,11 +17,11 @@ updates to some Debian packages when necessary.
We also deliver a specially optimized Linux kernel, where we enable
all required virtualization and container features. That kernel
includes drivers for ZFS_, as well as several hardware drivers. For example,
includes drivers for ZFS_, and several hardware drivers. For example,
we ship Intel network card drivers to support their newest hardware.
The following sections will concentrate on backup related topics. They
will explain things which are different on `Proxmox Backup`_, or
either explain things which are different on `Proxmox Backup`_, or
tasks which are commonly used on `Proxmox Backup`_. For other topics,
please refer to the standard Debian documentation.

View File

@ -3,6 +3,9 @@
Tape Backup
===========
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
production use.
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
:align: right
:alt: Tape Backup: Tape changer overview
@ -845,17 +848,6 @@ Update Inventory
Restore Catalog
~~~~~~~~~~~~~~~
To restore a catalog from an existing tape, just insert the tape into the drive
and execute:
.. code-block:: console
# proxmox-tape catalog
You can restore from a tape even without an existing catalog, but only the
whole media set. If you do this, the catalog will be automatically created.
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -8,7 +8,7 @@ Datastores
A Datastore is the logical place where :ref:`Backup Snapshots
<term_backup_snapshot>` and their chunks are stored. Snapshots consist of a
manifest, blobs, and dynamic- and fixed-indexes (see :ref:`terms`), and are
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terms`), and are
stored in the following directory structure:
<datastore-root>/<type>/<id>/<time>/
@ -32,8 +32,8 @@ The chunks of a datastore are found in
<datastore-root>/.chunks/
This chunk directory is further subdivided by the first four bytes of the
chunk's checksum, so a chunk with the checksum
This chunk directory is further subdivided by the first four byte of the chunks
checksum, so the chunk with the checksum
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
@ -47,7 +47,7 @@ per directory can be bad for file system performance.
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
is created.
Fixed-Sized Chunks
Fixed-sized Chunks
^^^^^^^^^^^^^^^^^^
For block based backups (like VMs), fixed-sized chunks are used. The content
@ -58,10 +58,10 @@ often tries to allocate files in contiguous pieces, so new files get new
blocks, and changing existing files changes only their own blocks.
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
can track the changed blocks of an image. Since these bitmaps are also a
can track the changed blocks of an image. Since these bitmap are also a
representation of the image split into chunks, there is a direct relation
between the dirty blocks of the image and chunks which need to be uploaded.
Thus, only modified chunks of the disk need to be uploaded to a backup.
between dirty blocks of the image and chunks which need to get uploaded, so
only modified chunks of the disk have to be uploaded for a backup.
Since the image is always split into chunks of the same size, unchanged blocks
will result in identical checksums for those chunks, so such chunks do not need
@ -71,24 +71,24 @@ changed blocks.
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
does not rely on storage snapshots either.
Dynamically Sized Chunks
Dynamically sized Chunks
^^^^^^^^^^^^^^^^^^^^^^^^
When working with file-based systems rather than block-based systems,
using fixed-sized chunks is not a good idea, since every time a file
would change in size, the remaining data would be shifted around,
resulting in many chunks changing and the amount of deduplication being reduced.
If one does not want to backup block-based systems but rather file-based
systems, using fixed-sized chunks is not a good idea, since every time a file
would change in size, the remaining data gets shifted around and this would
result in many chunks changing, reducing the amount of deduplication.
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
instead. Instead of splitting an image into fixed sizes, it first generates a
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
over this on-the-fly generated archive to calculate chunk boundaries.
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
by continuously calculating a checksum while iterating over the data, and on
certain conditions, it triggers a hash boundary.
certain conditions it triggers a hash boundary.
Assuming that most files on the system that is to be backed up have not
Assuming that most files of the system that is to be backed up have not
changed, eventually the algorithm triggers the boundary on the same data as a
previous backup, resulting in chunks that can be reused.
@ -100,8 +100,8 @@ can be encrypted, and they are handled in a slightly different manner than
normal chunks.
The hashes of encrypted chunks are calculated not with the actual (encrypted)
chunk content, but with the plain-text content, concatenated with the encryption
key. This way, two chunks with the same data but encrypted with different keys
chunk content, but with the plain-text content concatenated with the encryption
key. This way, two chunks of the same data encrypted with different keys
generate two different checksums and no collisions occur for multiple
encryption keys.
@ -112,14 +112,14 @@ the previous backup, do not need to be encrypted and uploaded.
Caveats and Limitations
-----------------------
Notes on Hash Collisions
Notes on hash collisions
^^^^^^^^^^^^^^^^^^^^^^^^
Every hashing algorithm has a chance to produce collisions, meaning two (or
more) inputs generate the same checksum. For SHA-256, this chance is
negligible. To calculate the chances of such a collision, one can use the ideas
of the 'birthday problem' from probability theory. For big numbers, this is
actually unfeasible to calculate with regular computers, but there is a good
negligible. To calculate such a collision, one can use the ideas of the
'birthday problem' from probability theory. For big numbers, this is actually
infeasible to calculate with regular computers, but there is a good
approximation:
.. math::
@ -127,7 +127,7 @@ approximation:
p(n, d) = 1 - e^{-n^2/(2d)}
Where `n` is the number of tries, and `d` is the number of possibilities.
For a concrete example, lets assume a large datastore of 1 PiB and an average
For a concrete example lets assume a large datastore of 1 PiB, and an average
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
2^{256}` possibilities. Inserting those values in the formula from earlier you
will see that the probability of a collision in that scenario is:
@ -136,96 +136,31 @@ will see that the probability of a collision in that scenario is:
3.1115 * 10^{-61}
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
chance of a collision is about the same as winning 13 such lottery games *in a
row*.
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
a collision is about the same as winning 13 such lotto games *in a row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.
Additionally, SHA-256 is prone to length extension attacks, but since there is
an upper limit for how big the chunks are, this is not a problem, because a
an upper limit for how big the chunk are, this is not a problem, since a
potential attacker cannot arbitrarily add content to the data beyond that
limit.
File-Based Backup
File-based Backup
^^^^^^^^^^^^^^^^^
Since dynamically sized chunks (for file-based backups) are created on a custom
archive format (pxar) and not over the files directly, there is no relation
between the files and chunks. This means that the Proxmox Backup Client has to
between files and the chunks. This means that the Proxmox Backup client has to
read all files again for every backup, otherwise it would not be possible to
generate a consistent, independent pxar archive where the original chunks can be
reused. Note that in spite of this, only new or changed chunks will be uploaded.
generate a consistent independent pxar archive where the original chunks can be
reused. Note that there will be still only new or change chunks be uploaded.
Verification of Encrypted Chunks
Verification of encrypted chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For encrypted chunks, only the checksum of the original (plaintext) data is
available, making it impossible for the server (without the encryption key) to
available, making it impossible for the server (without the encryption key), to
verify its content against it. Instead only the CRC-32 checksum gets checked.
Troubleshooting
---------------
Index files(*.fidx*, *.didx*) contain information about how to rebuild a file.
More precisely, they contain an ordered list of references to the chunks that
the original file was split into. If there is something wrong with a snapshot,
it might be useful to find out which chunks are referenced in it, and check
whether they are present and intact. The ``proxmox-backup-debug`` command line
tool can be used to inspect such files and recover their contents. For example,
to get a list of the referenced chunks of a *.fidx* index:
.. code-block:: console
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
The same command can be used to inspect *.blob* files. Without the ``--decode``
parameter, just the size and the encryption type, if any, are printed. If
``--decode`` is set, the blob file is decoded into the specified file ('-' will
decode it directly to stdout).
The following example would print the decoded contents of
`qemu-server.conf.blob`. If the file you're trying to inspect is encrypted, a
path to the key file must be provided using ``--keyfile``.
.. code-block:: console
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
You can also check in which index files a specific chunk file is referenced
with:
.. code-block:: console
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
Here ``--reference-filter`` specifies where index files should be searched. This
can be an arbitrary path. If, for some reason, the filename of the chunk was
changed, you can explicitly specify the digest using ``--digest``. By default, the
chunk filename is used as the digest to look for. If no ``--reference-filter``
is specified, it will only print the CRC and encryption status of the chunk. You
can also decode chunks, by setting the ``--decode`` flag. If the chunk is
encrypted, a ``--keyfile`` must be provided, in order to decode it.
Restore without a Running Proxmox Backup Server
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's possible to restore specific files from a snapshot, without a running
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
have access to the intact index and chunk files. Note that you also need the
corresponding key file if the backup was encrypted.
.. code-block:: console
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
In the above example, the `/path/to/.chunks` argument is the path to the
directory that contains the chunks, and `drive-scsi0.img.fidx` is the index file
of the file you'd like to restore. Both paths can be absolute or relative. With
``--skip-crc``, it's possible to disable the CRC checks of the chunks. This
will speed up the process slightly and allow for trying to restore (partially)
corrupt chunks. It's recommended to always try without the skip-CRC option
first.

View File

@ -41,23 +41,23 @@ Binary Data (BLOBs)
~~~~~~~~~~~~~~~~~~~
This type is used to store smaller (< 16MB) binary data such as
configuration files. Larger files should be stored as image archives.
configuration files. Larger files should be stored as image archive.
.. caution:: Please do not store all files as BLOBs. Instead, use the
file archive to store entire directory trees.
file archive to store whole directory trees.
Catalog File: ``catalog.pcat1``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The catalog file is an index for file archives. It contains
the list of included files and is used to speed up search operations.
the list of files and is used to speed up search operations.
The Manifest: ``index.json``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The manifest contains a list of all backed up files, and their
The manifest contains the list of all backup files, their
sizes and checksums. It is used to verify the consistency of a
backup.
@ -68,19 +68,18 @@ Backup Type
The backup server groups backups by *type*, where *type* is one of:
``vm``
This type is used for :term:`virtual machine`\ s. It typically
This type is used for :term:`virtual machine`\ s. Typically
consists of the virtual machine's configuration file and an image archive
for each disk.
``ct``
This type is used for :term:`container`\ s. It consists of the container's
configuration and a single file archive for the filesystem's contents.
This type is used for :term:`container`\ s. Consists of the container's
configuration and a single file archive for the filesystem content.
``host``
This type is used for file/directory backups created from within a machine.
Typically this would be a physical host, but could also be a virtual machine
or container. Such backups may contain file and image archives; there are no
restrictions in this regard.
This type is used for backups created from within the backed up machine.
Typically this would be a physical host but could also be a virtual machine
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
Backup ID

View File

@ -1,101 +0,0 @@
.. _sysadmin_traffic_control:
Traffic Control
---------------
.. image:: images/screenshots/pbs-gui-traffic-control-add.png
:align: right
:alt: Add a traffic control limit
Creating and restoring backups can produce lots of traffic and impact other
users of the network or shared storages.
Proxmox Backup Server allows to limit network traffic for clients within
specified networks using a token bucket filter (TBF).
This allows you to avoid network congestion or to prioritize traffic from
certain hosts.
You can manage the traffic controls either over the web-interface or using the
``traffic-control`` commandos of the ``proxmox-backup-manager`` command-line
tool.
.. note:: Sync jobs on the server are not affected by its rate-in limits. If
you want to limit the incomming traffic that a pull-based sync job
generates, you need to setup a job-specific rate-in limit. See
:ref:`syncjobs`.
The following command adds a traffic control rule to limit all IPv4 clients
(network ``0.0.0.0/0``) to 100 MB/s:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule0 --network 0.0.0.0/0 \
--rate-in 100MB --rate-out 100MB \
--comment "Default rate limit (100MB/s) for all clients"
.. note:: To limit both IPv4 and IPv6 network spaces you need to pass two
network parameters ``::/0`` and ``0.0.0.0/0``.
It is possible to restrict rules to certain time frames, for example the
company office hours:
.. tip:: You can use SI (base 10: KB, MB, ...) or IEC (base 2: KiB, MiB, ...)
units.
.. code-block:: console
# proxmox-backup-manager traffic-control update rule0 \
--timeframe "mon..fri 8-12" \
--timeframe "mon..fri 14:30-18"
If there are more rules, the server uses the rule with the smaller network. For
example, we can overwrite the setting for our private network (and the server
itself) with:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule1 \
--network 192.168.2.0/24 \
--network 127.0.0.0/8 \
--rate-in 20GB --rate-out 20GB \
--comment "Use 20GB/s for the local network"
.. note:: The behavior is undefined if there are several rules for the same network.
If there are multiple rules that match the same network all of them will be
applied, which means that the smallest one wins, as it's bucket fills up the
fastest.
To list the current rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control list
┌───────┬─────────────┬─────────────┬─────────────────────────┬────────────...─┐
│ name │ rate-in │ rate-out │ network │ timeframe ... │
╞═══════╪═════════════╪═════════════╪═════════════════════════╪════════════...═╡
│ rule0 │ 100 MB │ 100 MB │ ["0.0.0.0/0"] │ ["mon..fri ... │
├───────┼─────────────┼─────────────┼─────────────────────────┼────────────...─┤
│ rule1 │ 20 GB │ 20 GB │ ["192.168.2.0/24", ...] │ ... │
└───────┴─────────────┴─────────────┴─────────────────────────┴────────────...─┘
Rules can also be removed:
.. code-block:: console
# proxmox-backup-manager traffic-control remove rule1
To show the state (current data rate) of all configured rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control traffic
┌───────┬─────────────┬──────────────┐
│ name │ cur-rate-in │ cur-rate-out │
╞═══════╪═════════════╪══════════════╡
│ rule0 │ 0 B │ 0 B │
├───────┼─────────────┼──────────────┤
│ rule1 │ 1.161 GiB │ 19.146 KiB │
└───────┴─────────────┴──────────────┘

View File

@ -15,19 +15,17 @@ Proxmox Backup Server supports several authentication realms, and you need to
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user (users need to exist on the
authenticate as Linux system user (Users need to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
:openid: OpenID Connect server. Users can authenticate against an external
OpenID Connect server.
After installation, there is a single user, ``root@pam``, which corresponds to
the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
command line tool to list or manipulate users:
After installation, there is a single user ``root@pam``, which
corresponds to the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the
``proxmox-backup-manager`` command line tool to list or manipulate
users:
.. code-block:: console
@ -42,13 +40,13 @@ command line tool to list or manipulate users:
:align: right
:alt: Add a new user
The superuser has full administration rights on everything, so it's recommended
to add other users with less privileges. You can add a new
The superuser has full administration rights on everything, so you
normally want to add other users with less privileges. You can add a new
user with the ``user create`` subcommand or through the web
interface, under the **User Management** tab of **Configuration -> Access
Control**. The ``create`` subcommand lets you specify many options like
``--email`` or ``--password``. You can update or change any user properties
using the ``user update`` subcommand later (**Edit** in the GUI):
using the ``update`` subcommand later (**Edit** in the GUI):
.. code-block:: console
@ -73,16 +71,16 @@ The resulting user list looks like this:
│ root@pam │ 1 │ │ │ │ │ Superuser │
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
Newly created users do not have any permissions. Please read the :ref:`user_acl`
Newly created users do not have any permissions. Please read the Access Control
section to learn how to set access permissions.
You can disable a user account by setting ``--enable`` to ``0``:
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
.. code-block:: console
# proxmox-backup-manager user update john@pbs --enable 0
Or completely remove a user with:
Or completely remove the user with:
.. code-block:: console
@ -97,7 +95,7 @@ API Tokens
:align: right
:alt: API Token Overview
Any authenticated user can generate API tokens, which can in turn be used to
Any authenticated user can generate API tokens which can in turn be used to
configure various clients, instead of directly providing the username and
password.
@ -119,7 +117,7 @@ The API token is passed from the client to the server by setting the
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
``TOKENID:TOKENSECRET``.
You can generate tokens from the GUI or by using ``proxmox-backup-manager``:
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
.. code-block:: console
@ -156,9 +154,9 @@ section to learn how to set access permissions.
Access Control
--------------
By default, new users and API tokens do not have any permissions. Instead you
By default new users and API tokens do not have any permission. Instead you
need to specify what is allowed and what is not. You can do this by assigning
roles to users/tokens on specific objects, like datastores or remotes. The
roles to users/tokens on specific objects like datastores or remotes. The
following roles exist:
**NoAccess**
@ -178,7 +176,7 @@ following roles exist:
is not allowed to read the actual data.
**DatastoreReader**
Can Inspect datastore content and do restores.
Can Inspect datastore content and can do restores.
**DatastoreBackup**
Can backup and restore owned backups.
@ -195,18 +193,6 @@ following roles exist:
**RemoteSyncOperator**
Is allowed to read data from a remote.
**TapeAudit**
Can view tape related configuration and status
**TapeAdministrat**
Can do anything related to tape backup
**TapeOperator**
Can do tape backup and restore (but no configuration changes)
**TapeReader**
Can read and inspect tape configuration and media content
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
:align: right
:alt: Add permissions for user
@ -250,8 +236,7 @@ You can list the ACLs of each user/token using the following command:
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
└──────────┴───────────────────┴───────────┴────────────────┘
A single user/token can be assigned multiple permission sets for different
datastores.
A single user/token can be assigned multiple permission sets for different datastores.
.. Note::
Naming convention is important here. For datastores on the host,
@ -262,11 +247,11 @@ datastores.
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
the remote.
API Token Permissions
API Token permissions
~~~~~~~~~~~~~~~~~~~~~
API token permissions are calculated based on ACLs containing their ID,
independently of those of their corresponding user. The resulting permission set
API token permissions are calculated based on ACLs containing their ID
independent of those of their corresponding user. The resulting permission set
on a given path is then intersected with that of the corresponding user.
In practice this means:
@ -274,17 +259,17 @@ In practice this means:
#. API tokens require their own ACL entries
#. API tokens can never do more than their corresponding user
Effective Permissions
Effective permissions
~~~~~~~~~~~~~~~~~~~~~
To calculate and display the effective permission set of a user or API token,
To calculate and display the effective permission set of a user or API token
you can use the ``proxmox-backup-manager user permission`` command:
.. code-block:: console
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Audit (*)
- Datastore.Backup (*)
@ -292,17 +277,17 @@ you can use the ``proxmox-backup-manager user permission`` command:
- Datastore.Prune (*)
- Datastore.Read (*)
- Datastore.Verify (*)
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
Privileges with (*) have the propagate flag set
Path: /datastore/store1
- Datastore.Backup (*)
.. _user_tfa:
Two-Factor Authentication
Two-factor authentication
-------------------------
Introduction
@ -311,7 +296,7 @@ Introduction
With simple authentication, only a password (single factor) is required to
successfully claim an identity (authenticate), for example, to be able to log in
as `root@pam` on a specific instance of Proxmox Backup Server. In this case, if
the password gets leaked or stolen, anybody can use it to log in - even if they
the password gets stolen or leaked, anybody can use it to log in - even if they
should not be allowed to do so.
With two-factor authentication (TFA), a user is asked for an additional factor
@ -374,18 +359,16 @@ WebAuthn
For WebAuthn to work, you need to have two things:
* A trusted HTTPS certificate (for example, by using `Let's Encrypt
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
While it probably works with an untrusted certificate, some browsers may warn
or refuse WebAuthn operations if it is not trusted.
* Setup the WebAuthn configuration (see **Configuration -> Authentication** in
the Proxmox Backup Server web interface). This can be auto-filled in most
setups.
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
Once you have fulfilled both of these requirements, you can add a WebAuthn
configuration in the **Two Factor Authentication** tab of the **Access Control**
panel.
configuration in the *Access Control* panel.
.. _user_tfa_setup_recovery_keys:
@ -397,8 +380,7 @@ Recovery Keys
:alt: Add a new user
Recovery key codes do not need any preparation; you can simply create a set of
recovery keys in the **Two Factor Authentication** tab of the **Access Control**
panel.
recovery keys in the *Access Control* panel.
.. note:: There can only be one set of single-use recovery keys per user at any
time.

View File

@ -1 +1 @@
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise

View File

@ -1,4 +1,4 @@
use anyhow::Error;
use anyhow::{Error};
// chacha20-poly1305

View File

@ -1,7 +1,6 @@
use anyhow::{Error};
use proxmox_schema::*;
use proxmox_router::cli::*;
use proxmox::api::{*, cli::*};
#[api(
input: {

View File

@ -1,9 +1,9 @@
use std::io::Write;
use anyhow::Error;
use anyhow::{Error};
use pbs_api_types::Authid;
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter {
bytes: usize,
@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
.await?;
@ -59,7 +59,7 @@ async fn run() -> Result<(), Error> {
}
fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) {
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
eprintln!("ERROR: {}", err);
}
println!("DONE");

View File

@ -1,9 +1,9 @@
use anyhow::{bail, Error};
use std::thread;
use std::path::PathBuf;
use std::io::Write;
use anyhow::{bail, Error};
// tar handle files that shrink during backup, by simply padding with zeros.
//
// this binary run multiple thread which writes some large files, then truncates

View File

@ -69,7 +69,7 @@ fn send_request(
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -69,7 +69,7 @@ fn send_request(
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -6,10 +6,10 @@ use hyper::{Body, Request, Response};
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir;
use proxmox_backup::configdir;
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream};
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {

View File

@ -5,7 +5,7 @@ extern crate proxmox_backup;
use anyhow::{Error};
use std::io::{Read, Write};
use pbs_datastore::Chunker;
use proxmox_backup::backup::*;
struct ChunkWriter {
chunker: Chunker,

View File

@ -1,6 +1,7 @@
extern crate proxmox_backup;
use pbs_datastore::Chunker;
//use proxmox_backup::backup::chunker::*;
use proxmox_backup::backup::*;
fn main() {

View File

@ -3,7 +3,7 @@ use futures::*;
extern crate proxmox_backup;
use pbs_client::ChunkStream;
use proxmox_backup::backup::*;
// Test Chunker with real data read from a file.
//
@ -13,7 +13,7 @@ use pbs_client::ChunkStream;
// Note: I can currently get about 830MB/s
fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) {
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
panic!("ERROR: {}", err);
}
}

View File

@ -1,7 +1,7 @@
use anyhow::{Error};
use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
use pbs_api_types::Authid;
use proxmox_backup::api2::types::Authid;
use proxmox_backup::client::*;
async fn upload_speed() -> Result<f64, Error> {
@ -16,7 +16,7 @@ async fn upload_speed() -> Result<f64, Error> {
let client = HttpClient::new(host, 8007, auth_id, options)?;
let backup_time = proxmox_time::epoch_i64();
let backup_time = proxmox::tools::time::epoch_i64();
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
@ -27,7 +27,7 @@ async fn upload_speed() -> Result<f64, Error> {
}
fn main() {
match proxmox_async::runtime::main(upload_speed()) {
match proxmox_backup::tools::runtime::main(upload_speed()) {
Ok(mbs) => {
println!("average upload speed: {} MB/s", mbs);
}

View File

@ -1,22 +0,0 @@
[package]
name = "pbs-api-types"
version = "0.1.0"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "general API type helpers for PBS"
[dependencies]
anyhow = "1.0"
hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] }
proxmox = "0.15.3"
proxmox-lang = "1.0.0"
proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] }
proxmox-time = "1.1"
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }

View File

@ -1,280 +0,0 @@
use std::str::FromStr;
use serde::de::{value, IntoDeserializer};
use serde::{Deserialize, Serialize};
use proxmox_lang::constnamedbitmap;
use proxmox_schema::{
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
};
const_regex! {
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
}
// define Privilege bitfield
constnamedbitmap! {
/// Contains a list of privilege name to privilege value mappings.
///
/// The names are used when displaying/persisting privileges anywhere, the values are used to
/// allow easy matching of privileges as bitflags.
PRIVILEGES: u64 => {
/// Sys.Audit allows knowing about the system and its status
PRIV_SYS_AUDIT("Sys.Audit");
/// Sys.Modify allows modifying system-level configuration
PRIV_SYS_MODIFY("Sys.Modify");
/// Sys.Modify allows to poweroff/reboot/.. the system
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
/// Datastore.Audit allows knowing about a datastore,
/// including reading the configuration entry and listing its contents
PRIV_DATASTORE_AUDIT("Datastore.Audit");
/// Datastore.Allocate allows creating or deleting datastores
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
/// Datastore.Modify allows modifying a datastore and its contents
PRIV_DATASTORE_MODIFY("Datastore.Modify");
/// Datastore.Read allows reading arbitrary backup contents
PRIV_DATASTORE_READ("Datastore.Read");
/// Allows verifying a datastore
PRIV_DATASTORE_VERIFY("Datastore.Verify");
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_BACKUP("Datastore.Backup");
/// Datastore.Prune allows deleting snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_PRUNE("Datastore.Prune");
/// Permissions.Modify allows modifying ACLs
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
PRIV_REMOTE_AUDIT("Remote.Audit");
/// Remote.Modify allows modifying remote.cfg
PRIV_REMOTE_MODIFY("Remote.Modify");
/// Remote.Read allows reading data from a configured `Remote`
PRIV_REMOTE_READ("Remote.Read");
/// Sys.Console allows access to the system's console
PRIV_SYS_CONSOLE("Sys.Console");
/// Tape.Audit allows reading tape backup configuration and status
PRIV_TAPE_AUDIT("Tape.Audit");
/// Tape.Modify allows modifying tape backup configuration
PRIV_TAPE_MODIFY("Tape.Modify");
/// Tape.Write allows writing tape media
PRIV_TAPE_WRITE("Tape.Write");
/// Tape.Read allows reading tape backup configuration and media contents
PRIV_TAPE_READ("Tape.Read");
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
PRIV_REALM_ALLOCATE("Realm.Allocate");
}
}
/// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NO_ACCESS: u64 = 0;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Audit can view configuration and status information, but not modify it.
pub const ROLE_AUDIT: u64 = 0
| PRIV_SYS_AUDIT
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Admin can do anything on the datastore.
pub const ROLE_DATASTORE_ADMIN: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_BACKUP
| PRIV_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Reader can read/verify datastore content and do restore
pub const ROLE_DATASTORE_READER: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Backup can do backup and restore, but no prune.
pub const ROLE_DATASTORE_BACKUP: u64 = 0
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.PowerUser can do backup, restore, and prune.
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
| PRIV_DATASTORE_PRUNE
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Audit can audit the datastore.
pub const ROLE_DATASTORE_AUDIT: u64 = 0
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Audit can audit the remote
pub const ROLE_REMOTE_AUDIT: u64 = 0
| PRIV_REMOTE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Admin can do anything on the remote.
pub const ROLE_REMOTE_ADMIN: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_MODIFY
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.SyncOperator can do read and prune on the remote.
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Audit can audit the tape backup configuration and media content
pub const ROLE_TAPE_AUDIT: u64 = 0
| PRIV_TAPE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Admin can do anything on the tape backup
pub const ROLE_TAPE_ADMIN: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_MODIFY
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Operator can do tape backup and restore (but no configuration changes)
pub const ROLE_TAPE_OPERATOR: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Reader can do read and inspect tape content
pub const ROLE_TAPE_READER: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
#[api(
type_text: "<role>",
)]
#[repr(u64)]
#[derive(Serialize, Deserialize)]
/// Enum representing roles via their [PRIVILEGES] combination.
///
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
/// single, unique `u64` value that is used in this enum definition.
pub enum Role {
/// Administrator
Admin = ROLE_ADMIN,
/// Auditor
Audit = ROLE_AUDIT,
/// Disable Access
NoAccess = ROLE_NO_ACCESS,
/// Datastore Administrator
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
/// Datastore Reader (inspect datastore content and do restores)
DatastoreReader = ROLE_DATASTORE_READER,
/// Datastore Backup (backup and restore owned backups)
DatastoreBackup = ROLE_DATASTORE_BACKUP,
/// Datastore PowerUser (backup, restore and prune owned backup)
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
/// Datastore Auditor
DatastoreAudit = ROLE_DATASTORE_AUDIT,
/// Remote Auditor
RemoteAudit = ROLE_REMOTE_AUDIT,
/// Remote Administrator
RemoteAdmin = ROLE_REMOTE_ADMIN,
/// Syncronisation Opertator
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
/// Tape Auditor
TapeAudit = ROLE_TAPE_AUDIT,
/// Tape Administrator
TapeAdmin = ROLE_TAPE_ADMIN,
/// Tape Operator
TapeOperator = ROLE_TAPE_OPERATOR,
/// Tape Reader
TapeReader = ROLE_TAPE_READER,
}
impl FromStr for Role {
type Err = value::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
.format(&ACL_PATH_FORMAT)
.min_length(1)
.max_length(128)
.schema();
pub const ACL_PROPAGATE_SCHEMA: Schema =
BooleanSchema::new("Allow to propagate (inherit) permissions.")
.default(true)
.schema();
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("user", "User"),
EnumEntry::new("group", "Group"),
]))
.schema();
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize)]
/// ACL list entry.
pub struct AclListItem {
pub path: String,
pub ugid: String,
pub ugid_type: String,
pub propagate: bool,
pub roleid: String,
}

View File

@ -1,98 +0,0 @@
use std::fmt::{self, Display};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
pub enum CryptMode {
/// Don't encrypt.
None,
/// Encrypt.
Encrypt,
/// Only sign.
SignOnly,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
#[serde(transparent)]
/// 32-byte fingerprint, usually calculated with SHA256.
pub struct Fingerprint {
#[serde(with = "bytes_as_fingerprint")]
bytes: [u8; 32],
}
impl Fingerprint {
pub fn new(bytes: [u8; 32]) -> Self {
Self { bytes }
}
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
}
/// Display as short key ID
impl Display for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
}
}
impl std::str::FromStr for Fingerprint {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string();
tmp.retain(|c| c != ':');
let bytes = proxmox::tools::hex_to_digest(&tmp)?;
Ok(Fingerprint::new(bytes))
}
}
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>().join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Serializer, Deserializer};
pub fn serialize<S>(
bytes: &[u8; 32],
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(
deserializer: D,
) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

View File

@ -1,627 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
Schema, StringSchema, Updater,
};
use crate::{
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
Fingerprint, Userid, Authid,
GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA,
};
const_regex!{
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
}
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
.min_length(1)
.max_length(4096)
.schema();
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.schema();
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
.format(&BACKUP_ID_FORMAT)
.schema();
pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("vm", "Virtual Machine Backup"),
EnumEntry::new("ct", "Container Backup"),
EnumEntry::new("host", "Host Backup"),
]))
.schema();
pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
.minimum(1_547_797_308)
.schema();
pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
.format(&BACKUP_GROUP_FORMAT)
.schema();
pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
.format(&CHUNK_DIGEST_FORMAT)
.schema();
pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
.format(&DATASTORE_MAP_FORMAT)
.min_length(3)
.max_length(65)
.type_text("(<source>=)?<target>")
.schema();
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
.schema();
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
"A list of Datastore mappings (or single datastore), comma separated. \
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
all other sources to the default 'e'. If no default is given, only the \
specified sources are mapped.")
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
.schema();
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
IntegerSchema::new("Number of hourly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
IntegerSchema::new("Number of monthly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
IntegerSchema::new("Number of weekly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
IntegerSchema::new("Number of yearly backups to keep.")
.minimum(1)
.schema();
#[api(
properties: {
"keep-last": {
schema: PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneOptions {
#[serde(skip_serializing_if="Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_yearly: Option<u64>,
}
#[api(
properties: {
name: {
schema: DATASTORE_SCHEMA,
},
path: {
schema: DIR_NAME_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
},
"prune-schedule": {
optional: true,
schema: PRUNE_SCHEDULE_SCHEMA,
},
"keep-last": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST,
},
"keep-hourly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_HOURLY,
},
"keep-daily": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_DAILY,
},
"keep-weekly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
},
"keep-monthly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
},
"keep-yearly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
"verify-new": {
description: "If enabled, all new backups will be verified right after completion.",
optional: true,
type: bool,
},
}
)]
#[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all="kebab-case")]
/// Datastore configuration properties.
pub struct DataStoreConfig {
#[updater(skip)]
pub name: String,
#[updater(skip)]
pub path: String,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub gc_schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub prune_schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub keep_yearly: Option<u64>,
/// If enabled, all backups will be verified right after completion.
#[serde(skip_serializing_if="Option::is_none")]
pub verify_new: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>,
/// Send notification only for job errors
#[serde(skip_serializing_if="Option::is_none")]
pub notify: Option<String>,
}
#[api(
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a datastore.
pub struct DataStoreListItem {
pub store: String,
pub comment: Option<String>,
}
#[api(
properties: {
"filename": {
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Basic information about archive files inside a backup snapshot.
pub struct BackupContent {
pub filename: String,
/// Info if file is encrypted, signed, or neither.
#[serde(skip_serializing_if = "Option::is_none")]
pub crypt_mode: Option<CryptMode>,
/// Archive size (from backup manifest).
#[serde(skip_serializing_if = "Option::is_none")]
pub size: Option<u64>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Result of a verify operation.
pub enum VerifyState {
/// Verification was successful
Ok,
/// Verification reported one or more errors
Failed,
}
#[api(
properties: {
upid: {
type: UPID,
},
state: {
type: VerifyState,
},
},
)]
#[derive(Serialize, Deserialize)]
/// Task properties.
pub struct SnapshotVerifyState {
/// UPID of the verify task
pub upid: UPID,
/// State of the verification. Enum.
pub state: VerifyState,
}
#[api(
properties: {
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
},
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
verification: {
type: SnapshotVerifyState,
optional: true,
},
fingerprint: {
type: String,
optional: true,
},
files: {
items: {
schema: BACKUP_ARCHIVE_NAME_SCHEMA
},
},
owner: {
type: Authid,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Basic information about backup snapshot.
pub struct SnapshotListItem {
pub backup_type: String, // enum
pub backup_id: String,
pub backup_time: i64,
/// The first line from manifest "notes"
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// The result of the last run verify task
#[serde(skip_serializing_if = "Option::is_none")]
pub verification: Option<SnapshotVerifyState>,
/// Fingerprint of encryption key
#[serde(skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<Fingerprint>,
/// List of contained archive files.
pub files: Vec<BackupContent>,
/// Overall snapshot size (sum of all archive sizes).
#[serde(skip_serializing_if = "Option::is_none")]
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
/// Protection from prunes
#[serde(default)]
pub protected: bool,
}
#[api(
properties: {
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"last-backup": {
schema: BACKUP_TIME_SCHEMA,
},
"backup-count": {
type: Integer,
},
files: {
items: {
schema: BACKUP_ARCHIVE_NAME_SCHEMA
},
},
owner: {
type: Authid,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a backup group.
pub struct GroupListItem {
pub backup_type: String, // enum
pub backup_id: String,
pub last_backup: i64,
/// Number of contained snapshots
pub backup_count: u64,
/// List of contained archive files.
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
/// The first line from group "notes"
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api(
properties: {
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Prune result.
pub struct PruneListItem {
pub backup_type: String, // enum
pub backup_id: String,
pub backup_time: i64,
/// Keep snapshot
pub keep: bool,
}
#[api(
properties: {
ct: {
type: TypeCounts,
optional: true,
},
host: {
type: TypeCounts,
optional: true,
},
vm: {
type: TypeCounts,
optional: true,
},
other: {
type: TypeCounts,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Default)]
/// Counts of groups/snapshots per BackupType.
pub struct Counts {
/// The counts for CT backups
pub ct: Option<TypeCounts>,
/// The counts for Host backups
pub host: Option<TypeCounts>,
/// The counts for VM backups
pub vm: Option<TypeCounts>,
/// The counts for other backup types
pub other: Option<TypeCounts>,
}
#[api()]
#[derive(Serialize, Deserialize, Default)]
/// Backup Type group/snapshot counts.
pub struct TypeCounts {
/// The number of groups of the type.
pub groups: u64,
/// The number of snapshots of the type.
pub snapshots: u64,
}
#[api(
properties: {
"upid": {
optional: true,
type: UPID,
},
},
)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Garbage collection status.
pub struct GarbageCollectionStatus {
pub upid: Option<String>,
/// Number of processed index files.
pub index_file_count: usize,
/// Sum of bytes referred by index files.
pub index_data_bytes: u64,
/// Bytes used on disk.
pub disk_bytes: u64,
/// Chunks used on disk.
pub disk_chunks: usize,
/// Sum of removed bytes.
pub removed_bytes: u64,
/// Number of removed chunks.
pub removed_chunks: usize,
/// Sum of pending bytes (pending removal - kept for safety).
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
/// Number of chunks marked as .bad by verify that have been removed by GC.
pub removed_bad: usize,
/// Number of chunks still marked as .bad after garbage collection.
pub still_bad: usize,
}
impl Default for GarbageCollectionStatus {
fn default() -> Self {
GarbageCollectionStatus {
upid: None,
index_file_count: 0,
index_data_bytes: 0,
disk_bytes: 0,
disk_chunks: 0,
removed_bytes: 0,
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
removed_bad: 0,
still_bad: 0,
}
}
}
#[api(
properties: {
"gc-status": {
type: GarbageCollectionStatus,
optional: true,
},
counts: {
type: Counts,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// Overall Datastore status and useful information.
pub struct DataStoreStatus {
/// Total space (bytes).
pub total: u64,
/// Used space (bytes).
pub used: u64,
/// Available space (bytes).
pub avail: u64,
/// Status of last GC
#[serde(skip_serializing_if="Option::is_none")]
pub gc_status: Option<GarbageCollectionStatus>,
/// Group/Snapshot counts
#[serde(skip_serializing_if="Option::is_none")]
pub counts: Option<Counts>,
}
pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
"Returns the list of snapshots.",
&SnapshotListItem::API_SCHEMA,
).schema(),
};
pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
"Returns the list of archive files inside a backup snapshots.",
&BackupContent::API_SCHEMA,
).schema(),
};
pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
"Returns the list of backup groups.",
&GroupListItem::API_SCHEMA,
).schema(),
};
pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
&PruneListItem::API_SCHEMA,
).schema(),
};

View File

@ -1,341 +0,0 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
/// Size units for byte sizes
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum SizeUnit {
Byte,
// SI (base 10)
KByte,
MByte,
GByte,
TByte,
PByte,
// IEC (base 2)
Kibi,
Mebi,
Gibi,
Tebi,
Pebi,
}
impl SizeUnit {
/// Returns the scaling factor
pub fn factor(&self) -> f64 {
match self {
SizeUnit::Byte => 1.0,
// SI (base 10)
SizeUnit::KByte => 1_000.0,
SizeUnit::MByte => 1_000_000.0,
SizeUnit::GByte => 1_000_000_000.0,
SizeUnit::TByte => 1_000_000_000_000.0,
SizeUnit::PByte => 1_000_000_000_000_000.0,
// IEC (base 2)
SizeUnit::Kibi => 1024.0,
SizeUnit::Mebi => 1024.0 * 1024.0,
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
}
}
/// gets the biggest possible unit still having a value greater zero before the decimal point
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
if binary {
let bits = 64 - (size as u64).leading_zeros();
match bits {
51.. => SizeUnit::Pebi,
41..=50 => SizeUnit::Tebi,
31..=40 => SizeUnit::Gibi,
21..=30 => SizeUnit::Mebi,
11..=20 => SizeUnit::Kibi,
_ => SizeUnit::Byte,
}
} else {
if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else {
SizeUnit::Byte
}
}
}
}
/// Returns the string repesentation
impl std::fmt::Display for SizeUnit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SizeUnit::Byte => write!(f, "B"),
// SI (base 10)
SizeUnit::KByte => write!(f, "KB"),
SizeUnit::MByte => write!(f, "MB"),
SizeUnit::GByte => write!(f, "GB"),
SizeUnit::TByte => write!(f, "TB"),
SizeUnit::PByte => write!(f, "PB"),
// IEC (base 2)
SizeUnit::Kibi => write!(f, "KiB"),
SizeUnit::Mebi => write!(f, "MiB"),
SizeUnit::Gibi => write!(f, "GiB"),
SizeUnit::Tebi => write!(f, "TiB"),
SizeUnit::Pebi => write!(f, "PiB"),
}
}
}
/// Strips a trailing SizeUnit inclusive trailing whitespace
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
fn strip_unit(v: &str) -> (&str, SizeUnit) {
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
let (v, binary) = match v.strip_suffix('i') {
Some(n) => (n, true),
None => (v, false),
};
let mut unit = SizeUnit::Byte;
(v.strip_suffix(|c: char| match c {
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
// binary (IEC recommended) variants
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
_ => false
}).unwrap_or(v).trim_end(), unit)
}
/// Byte size which can be displayed in a human friendly way
#[derive(Debug, Copy, Clone, UpdaterType)]
pub struct HumanByte {
/// The siginficant value, it does not includes any factor of the `unit`
size: f64,
/// The scale/unit of the value
unit: SizeUnit,
}
fn verify_human_byte(s: &str) -> Result<(), Error> {
match s.parse::<HumanByte>() {
Ok(_) => Ok(()),
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
}
}
impl ApiType for HumanByte {
const API_SCHEMA: Schema = StringSchema::new(
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
)
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
.min_length(1)
.max_length(64)
.schema();
}
impl HumanByte {
/// Create instance with size and unit (size must be positive)
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
if size < 0.0 {
bail!("byte size may not be negative");
}
Ok(HumanByte { size, unit })
}
/// Create a new instance with optimal binary unit computed
pub fn new_binary(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, true);
HumanByte { size: size / unit.factor(), unit }
}
/// Create a new instance with optimal decimal unit computed
pub fn new_decimal(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, false);
HumanByte { size: size / unit.factor(), unit }
}
/// Returns the size as u64 number of bytes
pub fn as_u64(&self) -> u64 {
self.as_f64() as u64
}
/// Returns the size as f64 number of bytes
pub fn as_f64(&self) -> f64 {
self.size * self.unit.factor()
}
/// Returns a copy with optimal binary unit computed
pub fn auto_scale_binary(self) -> Self {
HumanByte::new_binary(self.as_f64())
}
/// Returns a copy with optimal decimal unit computed
pub fn auto_scale_decimal(self) -> Self {
HumanByte::new_decimal(self.as_f64())
}
}
impl From<u64> for HumanByte {
fn from(v: u64) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl From<usize> for HumanByte {
fn from(v: usize) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl std::fmt::Display for HumanByte {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let precision = f.precision().unwrap_or(3) as f64;
let precision_factor = 1.0 * 10.0_f64.powf(precision);
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
let size = ((self.size * precision_factor).round()) / precision_factor;
write!(f, "{} {}", size, self.unit)
}
}
impl std::str::FromStr for HumanByte {
type Err = Error;
fn from_str(v: &str) -> Result<Self, Error> {
let (v, unit) = strip_unit(v);
HumanByte::with_unit(v.parse()?, unit)
}
}
proxmox::forward_deserialize_to_from_str!(HumanByte);
proxmox::forward_serialize_to_display!(HumanByte);
#[test]
fn test_human_byte_parser() -> Result<(), Error> {
assert!("-10".parse::<HumanByte>().is_err()); // negative size
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
let h: HumanByte = v.parse()?;
if h.size != size {
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
}
if h.unit != unit {
bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit);
}
let new = h.to_string();
if &new != as_str {
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
}
Ok(())
}
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
match do_test(v, size, unit, as_str) {
Ok(_) => true,
Err(err) => {
eprintln!("{}", err); // makes debugging easier
false
}
}
}
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
let h: HumanByte = "1.2345678".parse()?;
assert_eq!(&format!("{:.0}", h), "1 B");
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
assert_eq!(&format!("{:.1}", h), "1.2 B");
assert_eq!(&format!("{:.2}", h), "1.23 B");
assert_eq!(&format!("{:.3}", h), "1.235 B");
assert_eq!(&format!("{:.4}", h), "1.2346 B");
assert_eq!(&format!("{:.5}", h), "1.23457 B");
assert_eq!(&format!("{:.6}", h), "1.234568 B");
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B"));
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
Ok(())
}
#[test]
fn test_human_byte_auto_unit_decimal() {
fn convert(b: u64) -> String {
HumanByte::new_decimal(b as f64).to_string()
}
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1.022 KB");
assert_eq!(convert(9_000), "9 KB");
assert_eq!(convert(1_000), "1 KB");
assert_eq!(convert(1_000_000), "1 MB");
assert_eq!(convert(1_000_000_000), "1 GB");
assert_eq!(convert(1_000_000_000_000), "1 TB");
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
}
#[test]
fn test_human_byte_auto_unit_binary() {
fn convert(b: u64) -> String {
HumanByte::from(b).to_string()
}
assert_eq!(convert(0), "0 B");
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1022 B");
assert_eq!(convert(9_000), "8.789 KiB");
assert_eq!(convert(10_000_000), "9.537 MiB");
assert_eq!(convert(10_000_000_000), "9.313 GiB");
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
assert_eq!(convert(1 << 10), "1 KiB");
assert_eq!(convert((1 << 10) * 10), "10 KiB");
assert_eq!(convert(1 << 20), "1 MiB");
assert_eq!(convert(1 << 30), "1 GiB");
assert_eq!(convert(1 << 40), "1 TiB");
assert_eq!(convert(1 << 50), "1 PiB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
}

View File

@ -1,464 +0,0 @@
use anyhow::format_err;
use std::str::FromStr;
use regex::Regex;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
Userid, Authid, RateLimitConfig,
REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA,
};
const_regex!{
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
.default(false)
.schema();
#[api(
properties: {
"next-run": {
description: "Estimated time of the next run (UNIX epoch).",
optional: true,
type: Integer,
},
"last-run-state": {
description: "Result of the last run.",
optional: true,
type: String,
},
"last-run-upid": {
description: "Task UPID of the last run.",
optional: true,
type: String,
},
"last-run-endtime": {
description: "Endtime of the last run.",
optional: true,
type: Integer,
},
}
)]
#[derive(Serialize,Deserialize,Default)]
#[serde(rename_all="kebab-case")]
/// Job Scheduling Status
pub struct JobScheduleStatus {
#[serde(skip_serializing_if="Option::is_none")]
pub next_run: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_state: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_upid: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_endtime: Option<i64>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// When do we send notifications
pub enum Notify {
/// Never send notification
Never,
/// Send notifications for failed and successful jobs
Always,
/// Send notifications for failed jobs only
Error,
}
#[api(
properties: {
gc: {
type: Notify,
optional: true,
},
verify: {
type: Notify,
optional: true,
},
sync: {
type: Notify,
optional: true,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Datastore notify settings
pub struct DatastoreNotify {
/// Garbage collection settings
pub gc: Option<Notify>,
/// Verify job setting
pub verify: Option<Notify>,
/// Sync job setting
pub sync: Option<Notify>,
}
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
"Datastore notification setting")
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.")
.default(true)
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
"Days after that a verification becomes outdated")
.minimum(1)
.schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
"ignore-verified": {
optional: true,
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
},
"outdated-after": {
optional: true,
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA,
},
}
)]
#[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all="kebab-case")]
/// Verification Job
pub struct VerificationJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
/// the datastore ID this verificaiton job affects
pub store: String,
#[serde(skip_serializing_if="Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter
/// out recent ones, depending on 'outdated_after' configuration.
pub ignore_verified: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
pub outdated_after: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// when to schedule this job in calendar event notation
pub schedule: Option<String>,
}
#[api(
properties: {
config: {
type: VerificationJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Status of Verification Job
pub struct VerificationJobStatus {
#[serde(flatten)]
pub config: VerificationJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
#[api(
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
drive: {
schema: DRIVE_NAME_SCHEMA,
},
"eject-media": {
description: "Eject media upon job completion.",
type: bool,
optional: true,
},
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
"latest-only": {
description: "Backup latest snapshots only.",
type: bool,
optional: true,
},
"notify-user": {
optional: true,
type: Userid,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all="kebab-case")]
/// Tape Backup Job Setup
pub struct TapeBackupJobSetup {
pub store: String,
pub pool: String,
pub drive: String,
#[serde(skip_serializing_if="Option::is_none")]
pub eject_media: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub export_media_set: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub latest_only: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>,
#[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
}
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
setup: {
type: TapeBackupJobSetup,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
}
)]
#[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all="kebab-case")]
/// Tape Backup Job
pub struct TapeBackupJobConfig {
#[updater(skip)]
pub id: String,
#[serde(flatten)]
pub setup: TapeBackupJobSetup,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>,
}
#[api(
properties: {
config: {
type: TapeBackupJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Status of Tape Backup Job
pub struct TapeBackupJobStatus {
#[serde(flatten)]
pub config: TapeBackupJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
/// Next tape used (best guess)
#[serde(skip_serializing_if="Option::is_none")]
pub next_media_label: Option<String>,
}
#[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum GroupFilter {
/// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(String),
/// Full identifier of BackupGroup, including type
Group(String),
/// A regular expression matched against the full identifier of the BackupGroup
Regex(Regex),
}
impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(":") {
Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())),
Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
}.map_err(|err| format_err!("'{}' - {}", s, err))
}
}
// used for serializing below, caution!
impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
proxmox::forward_deserialize_to_from_str!(GroupFilter);
proxmox::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
}
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
"owner": {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all="kebab-case")]
/// Sync Job
pub struct SyncJobConfig {
#[updater(skip)]
pub id: String,
pub store: String,
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Authid>,
pub remote: String,
pub remote_store: String,
#[serde(skip_serializing_if="Option::is_none")]
pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
}
#[api(
properties: {
config: {
type: SyncJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Status of Sync Job
pub struct SyncJobStatus {
#[serde(flatten)]
pub config: SyncJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -1,56 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
#[api(default: "scrypt")]
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
/// Key derivation function for password protected encryption keys.
pub enum Kdf {
/// Do not encrypt the key.
None,
/// Encrypt they key with a password using SCrypt.
Scrypt,
/// Encrtypt the Key with a password using PBKDF2
PBKDF2,
}
impl Default for Kdf {
#[inline]
fn default() -> Self {
Kdf::Scrypt
}
}
#[api(
properties: {
kdf: {
type: Kdf,
},
fingerprint: {
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
optional: true,
},
},
)]
#[derive(Deserialize, Serialize)]
/// Encryption Key Information
pub struct KeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
pub kdf: Kdf,
/// Key creation time
pub created: i64,
/// Key modification time
pub modified: i64,
/// Key fingerprint
#[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>,
/// Password hint
#[serde(skip_serializing_if="Option::is_none")]
pub hint: Option<String>,
}

View File

@ -1,468 +0,0 @@
//! Basic API types used by most of the PBS code.
use serde::{Deserialize, Serialize};
use anyhow::bail;
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
};
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
use proxmox_time::parse_daily_duration;
#[rustfmt::skip]
#[macro_export]
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
#[rustfmt::skip]
#[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => (
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
);
}
mod acl;
pub use acl::*;
mod datastore;
pub use datastore::*;
mod human_byte;
pub use human_byte::HumanByte;
mod jobs;
pub use jobs::*;
mod key_derivation;
pub use key_derivation::{Kdf, KeyInfo};
mod network;
pub use network::*;
#[macro_use]
mod userid;
pub use userid::Authid;
pub use userid::Userid;
pub use userid::{Realm, RealmRef};
pub use userid::{Tokenname, TokennameRef};
pub use userid::{Username, UsernameRef};
pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
#[macro_use]
mod user;
pub use user::*;
pub use proxmox_schema::upid::*;
mod crypto;
pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint};
pub mod file_restore;
mod openid;
pub use openid::*;
mod remote;
pub use remote::*;
mod tape;
pub use tape::*;
mod traffic_control;
pub use traffic_control::*;
mod zfs;
pub use zfs::*;
#[rustfmt::skip]
#[macro_use]
mod local_macros {
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
macro_rules! DNS_ALIAS_NAME {
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
}
}
const_regex! {
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
/// Regex for safe identifiers.
///
/// This
/// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html)
/// contains further information why it is reasonable to restict
/// names this way. This is not only useful for filenames, but for
/// any identifier command line tools work with.
pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
pub BACKUP_REPO_URL_REGEX = concat!(
r"^^(?:(?:(",
USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
")@)?(",
DNS_NAME!(), "|", IPRE_BRACKET!(),
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$"
);
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
}
pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX);
pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX);
pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX);
pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX);
pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX);
pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema();
pub const FIRST_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("First name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const SECOND_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Second name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const THIRD_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Third name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).")
.format(&HOSTNAME_FORMAT)
.schema();
pub const DNS_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
.format(&DNS_NAME_OR_IP_FORMAT)
.schema();
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
.format(&ApiStringFormat::VerifyFn(|node| {
if node == "localhost" || node == proxmox::tools::nodename() {
Ok(())
} else {
bail!("no such node '{}'", node);
}
}))
.schema();
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
.format(&BLOCKDEVICE_NAME_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
.schema();
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
"A list of disk names, comma separated.")
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
.schema();
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(5)
.max_length(64)
.schema();
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
StringSchema::new("X509 certificate fingerprint (sha256).")
.format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
.format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
.max_length(16)
.schema();
pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
.max_length(256)
.schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \
SHA256 digest. This can be used to prevent concurrent \
modifications.",
)
.format(&PVE_CONFIG_DIGEST_FORMAT)
.schema();
/// API schema format definition for repository URLs
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
// Complex type definitions
#[api()]
#[derive(Default, Serialize, Deserialize)]
/// Storage space usage information.
pub struct StorageStatus {
/// Total space (bytes).
pub total: u64,
/// Used space (bytes).
pub used: u64,
/// Available space (bytes).
pub avail: u64,
}
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(1)
.max_length(64)
.schema();
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
/// Hex-encoded RSA modulus
pub modulus: String,
/// Key (modulus) length in bits
pub length: usize,
}
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
let modulus = value.n().to_hex_str()?.to_string();
let exponent = value.e().to_dec_str()?.to_string();
let length = value.size() as usize * 8;
Ok(Self {
path: None,
exponent,
modulus,
length,
})
}
}
#[api()]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
/// Describes a package for which an update is available.
pub struct APTUpdateInfo {
/// Package name
pub package: String,
/// Package title
pub title: String,
/// Package architecture
pub arch: String,
/// Human readable package description
pub description: String,
/// New version to be updated to
pub version: String,
/// Old version currently installed
pub old_version: String,
/// Package origin
pub origin: String,
/// Package priority in human-readable form
pub priority: String,
/// Package section
pub section: String,
/// URL under which the package's changelog can be retrieved
pub change_log_url: String,
/// Custom extra field for additional package information
#[serde(skip_serializing_if="Option::is_none")]
pub extra_info: Option<String>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Node Power command type.
pub enum NodePowerCommand {
/// Restart the server
Reboot,
/// Shutdown the server
Shutdown,
}
#[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TaskStateType {
/// Ok
OK,
/// Warning
Warning,
/// Error
Error,
/// Unknown
Unknown,
}
#[api(
properties: {
upid: { schema: UPID::API_SCHEMA },
},
)]
#[derive(Serialize, Deserialize)]
/// Task properties.
pub struct TaskListItem {
pub upid: String,
/// The node name where the task is running on.
pub node: String,
/// The Unix PID
pub pid: i64,
/// The task start time (Epoch)
pub pstart: u64,
/// The task start time (Epoch)
pub starttime: i64,
/// Worker type (arbitrary ASCII string)
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
/// The authenticated entity who started the task
pub user: String,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
/// Task end status
#[serde(skip_serializing_if="Option::is_none")]
pub status: Option<String>,
}
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
"A list of tasks.",
&TaskListItem::API_SCHEMA,
).schema(),
};
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
/// RRD consolidation mode
pub enum RRDMode {
/// Maximum
Max,
/// Average
Average,
}
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// RRD time frame
pub enum RRDTimeFrame {
/// Hour
Hour,
/// Day
Day,
/// Week
Week,
/// Month
Month,
/// Year
Year,
/// Decade (10 years)
Decade,
}

View File

@ -1,308 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
PROXMOX_SAFE_ID_REGEX,
IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT,
CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT,
};
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const IP_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT)
.max_length(15)
.schema();
pub const IP_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT)
.max_length(39)
.schema();
pub const IP_SCHEMA: Schema =
StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT)
.max_length(39)
.schema();
pub const CIDR_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT)
.max_length(18)
.schema();
pub const CIDR_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT)
.max_length(43)
.schema();
pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT)
.max_length(43)
.schema();
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Interface configuration method
pub enum NetworkConfigMethod {
/// Configuration is done manually using other tools
Manual,
/// Define interfaces with statically allocated addresses.
Static,
/// Obtain an address via DHCP
DHCP,
/// Define the loopback interface.
Loopback,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
#[repr(u8)]
/// Linux Bond Mode
pub enum LinuxBondMode {
/// Round-robin policy
balance_rr = 0,
/// Active-backup policy
active_backup = 1,
/// XOR policy
balance_xor = 2,
/// Broadcast policy
broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation
#[serde(rename = "802.3ad")]
ieee802_3ad = 4,
/// Adaptive transmit load balancing
balance_tlb = 5,
/// Adaptive load balancing
balance_alb = 6,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
#[repr(u8)]
/// Bond Transmit Hash Policy for LACP (802.3ad)
pub enum BondXmitHashPolicy {
/// Layer 2
layer2 = 0,
/// Layer 2+3
#[serde(rename = "layer2+3")]
layer2_3 = 1,
/// Layer 3+4
#[serde(rename = "layer3+4")]
layer3_4 = 2,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Network interface type
pub enum NetworkInterfaceType {
/// Loopback
Loopback,
/// Physical Ethernet device
Eth,
/// Linux Bridge
Bridge,
/// Linux Bond
Bond,
/// Linux VLAN (eth.10)
Vlan,
/// Interface Alias (eth:1)
Alias,
/// Unknown interface type
Unknown,
}
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT)
.min_length(1)
.max_length(libc::IFNAMSIZ-1)
.schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
.schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
"A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
.schema();
#[api(
properties: {
name: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
type: NetworkInterfaceType,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
options: {
description: "Option list (inet)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
options6: {
description: "Option list (inet6)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet6, may span multiple lines)",
type: String,
optional: true,
},
bridge_ports: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
},
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
}
)]
#[derive(Debug, Serialize, Deserialize)]
/// Network Interface configuration
pub struct Interface {
/// Autostart interface
#[serde(rename = "autostart")]
pub autostart: bool,
/// Interface is active (UP)
pub active: bool,
/// Interface name
pub name: String,
/// Interface type
#[serde(rename = "type")]
pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if="Option::is_none")]
pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if="Option::is_none")]
pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv4 address with netmask
pub cidr: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv4 gateway
pub gateway: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv6 address with netmask
pub cidr6: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv6 gateway
pub gateway6: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty")]
pub options: Vec<String>,
#[serde(skip_serializing_if="Vec::is_empty")]
pub options6: Vec<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comments: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comments6: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// Maximum Transmission Unit
pub mtu: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support.
#[serde(skip_serializing_if="Option::is_none")]
pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if="Option::is_none")]
#[serde(rename = "bond-primary")]
pub bond_primary: Option<String>,
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
}
impl Interface {
pub fn new(name: String) -> Self {
Self {
name,
interface_type: NetworkInterfaceType::Unknown,
autostart: false,
active: false,
method: None,
method6: None,
cidr: None,
gateway: None,
cidr6: None,
gateway6: None,
options: Vec::new(),
options6: Vec::new(),
comments: None,
comments6: None,
mtu: None,
bridge_ports: None,
bridge_vlan_aware: None,
slaves: None,
bond_mode: None,
bond_primary: None,
bond_xmit_hash_policy: None,
}
}
}

View File

@ -1,121 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater,
};
use super::{
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
.format(&OPENID_ACR_LIST_FORMAT)
.schema();
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
"Use the value of this attribute/claim as unique user name. It \
is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \
attribute by himself!")
.max_length(64)
.min_length(1)
.format(&PROXMOX_SAFE_ID_FORMAT) .schema();
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
"scopes": {
schema: OPENID_SCOPE_LIST_SCHEMA,
optional: true,
},
"acr-values": {
schema: OPENID_ACR_LIST_SCHEMA,
optional: true,
},
prompt: {
description: "OpenID Prompt",
type: String,
format: &PROXMOX_SAFE_ID_FORMAT,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
schema: OPENID_USERNAME_CLAIM_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all="kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
#[serde(skip_serializing_if="Option::is_none")]
pub scopes: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub acr_values: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub prompt: Option<String>,
/// OpenID Client Key
#[serde(skip_serializing_if="Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if="Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if="Option::is_none")]
pub username_claim: Option<String>,
}

View File

@ -1,86 +0,0 @@
use serde::{Deserialize, Serialize};
use super::*;
use proxmox_schema::*;
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[api(
properties: {
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
host: {
schema: DNS_NAME_OR_IP_SCHEMA,
},
port: {
optional: true,
description: "The (optional) port",
type: u16,
},
"auth-id": {
type: Authid,
},
fingerprint: {
optional: true,
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
},
},
)]
#[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")]
/// Remote configuration properties.
pub struct RemoteConfig {
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
pub host: String,
#[serde(skip_serializing_if="Option::is_none")]
pub port: Option<u16>,
pub auth_id: Authid,
#[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>,
}
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
password: {
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
},
},
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct Remote {
pub name: String,
// Note: The stored password is base64 encoded
#[serde(skip_serializing_if="String::is_empty")]
#[serde(with = "proxmox::tools::serde::string_as_base64")]
pub password: String,
#[serde(flatten)]
pub config: RemoteConfig,
}

View File

@ -1,91 +0,0 @@
//! Types for tape backup API
mod device;
pub use device::*;
mod changer;
pub use changer::*;
mod drive;
pub use drive::*;
mod media_pool;
pub use media_pool::*;
mod media_status;
pub use media_status::*;
mod media_location;
pub use media_location::*;
mod media;
pub use media::*;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat};
use proxmox_uuid::Uuid;
use crate::{
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
};
const_regex!{
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
}
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
"Tape encryption key fingerprint (sha256)."
)
.format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
"A snapshot in the format: 'store:type/id/time")
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
.type_text("store:type/id/time")
.schema();
#[api(
properties: {
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
optional: true,
},
"label-text": {
schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
"media": {
schema: MEDIA_UUID_SCHEMA,
optional: true,
},
"media-set": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
optional: true,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Content list filter parameters
pub struct MediaContentListFilter {
pub pool: Option<String>,
pub label_text: Option<String>,
pub media: Option<Uuid>,
pub media_set: Option<Uuid>,
pub backup_type: Option<String>,
pub backup_id: Option<String>,
}

View File

@ -1,122 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
use crate::{
HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT,
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new(
"Timeframe to specify when the rule is actice.")
.format(&DAILY_DURATION_FORMAT)
.schema();
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new(
"Rate limit (for Token bucket filter) in bytes/second.")
.minimum(100_000)
.schema();
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new(
"Size of the token bucket (for Token bucket filter) in bytes.")
.minimum(1000)
.schema();
#[api(
properties: {
"rate-in": {
type: HumanByte,
optional: true,
},
"burst-in": {
type: HumanByte,
optional: true,
},
"rate-out": {
type: HumanByte,
optional: true,
},
"burst-out": {
type: HumanByte,
optional: true,
},
},
)]
#[derive(Serialize,Deserialize,Default,Clone,Updater)]
#[serde(rename_all = "kebab-case")]
/// Rate Limit Configuration
pub struct RateLimitConfig {
#[serde(skip_serializing_if="Option::is_none")]
pub rate_in: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub burst_in: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub rate_out: Option<HumanByte>,
#[serde(skip_serializing_if="Option::is_none")]
pub burst_out: Option<HumanByte>,
}
impl RateLimitConfig {
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
Self {
rate_in: rate,
burst_in: burst,
rate_out: rate,
burst_out: burst,
}
}
}
#[api(
properties: {
name: {
schema: TRAFFIC_CONTROL_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
network: {
type: Array,
items: {
schema: CIDR_SCHEMA,
},
},
timeframe: {
type: Array,
items: {
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
},
optional: true,
},
},
)]
#[derive(Serialize,Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule
pub struct TrafficControlRule {
#[updater(skip)]
pub name: String,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// Rule applies to Source IPs within this networks
pub network: Vec<String>,
#[serde(flatten)]
pub limit: RateLimitConfig,
// fixme: expose this?
// /// Bandwidth is shared accross all connections
// #[serde(skip_serializing_if="Option::is_none")]
// pub shared: Option<bool>,
/// Enable the rule at specific times
#[serde(skip_serializing_if="Option::is_none")]
pub timeframe: Option<Vec<String>>,
}

View File

@ -1,207 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
};
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
"Enable the account (default). You can set this to '0' to disable the account.")
.default(true)
.schema();
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
"Account expiration date (seconds since epoch). '0' means no expiration date.")
.default(0)
.minimum(0)
.schema();
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: ApiToken
},
},
}
)]
#[derive(Serialize,Deserialize)]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub tokens: Vec<ApiToken>,
}
#[api(
properties: {
tokenid: {
schema: PROXMOX_TOKEN_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
}
)]
#[derive(Serialize,Deserialize)]
/// ApiToken properties.
pub struct ApiToken {
pub tokenid: Authid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
}
impl ApiToken {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize,Updater)]
/// User properties.
pub struct User {
#[updater(skip)]
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
}
impl User {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}

View File

@ -1,79 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
const_regex! {
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
}
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
"Pool sector size exponent.")
.minimum(9)
.maximum(16)
.default(12)
.schema();
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
.schema();
#[api(default: "On")]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS compression algorithm to use.
pub enum ZfsCompressionType {
/// Gnu Zip
Gzip,
/// LZ4
Lz4,
/// LZJB
Lzjb,
/// ZLE
Zle,
/// ZStd
ZStd,
/// Enable compression using the default algorithm.
On,
/// Disable compression.
Off,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS RAID level to use.
pub enum ZfsRaidLevel {
/// Single Disk
Single,
/// Mirror
Mirror,
/// Raid10
Raid10,
/// RaidZ
RaidZ,
/// RaidZ2
RaidZ2,
/// RaidZ3
RaidZ3,
}
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// zpool list item
pub struct ZpoolListItem {
/// zpool name
pub name: String,
/// Health
pub health: String,
/// Total size
pub size: u64,
/// Used size
pub alloc: u64,
/// Free space
pub free: u64,
/// ZFS fragnentation level
pub frag: u64,
/// ZFS deduplication ratio
pub dedup: f64,
}

View File

@ -1,9 +0,0 @@
[package]
name = "pbs-buildcfg"
version = "2.1.2"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "macros used for pbs related paths such as configdir and rundir"
build = "build.rs"
[dependencies]

View File

@ -1,45 +0,0 @@
[package]
name = "pbs-client"
version = "0.1.0"
authors = ["Wolfgang Bumiller <w.bumiller@proxmox.com>"]
edition = "2018"
description = "The main proxmox backup client crate"
[dependencies]
anyhow = "1.0"
bitflags = "1.2.1"
bytes = "1.0"
futures = "0.3"
h2 = { version = "0.3", features = [ "stream" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
openssl = "0.10"
percent-encoding = "2.1"
pin-project-lite = "0.2"
regex = "1.2"
rustyline = "7"
serde_json = "1.0"
tokio = { version = "1.6", features = [ "fs", "signal" ] }
tokio-stream = "0.1.0"
tower-service = "0.3.0"
xdg = "2.2"
pathpatterns = "0.1.2"
proxmox = "0.15.3"
proxmox-async = "0.2"
proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = "1"
proxmox-time = "1"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-datastore = { path = "../pbs-datastore" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -1,43 +0,0 @@
//! Client side interface to the proxmox backup server
//!
//! This library implements the client side to access the backups
//! server using https.
pub mod catalog_shell;
pub mod pxar;
pub mod tools;
mod merge_known_chunks;
pub mod pipe_to_stream;
mod http_client;
pub use http_client::*;
mod vsock_client;
pub use vsock_client::*;
mod task_log;
pub use task_log::*;
mod backup_reader;
pub use backup_reader::*;
mod backup_writer;
pub use backup_writer::*;
mod remote_chunk_reader;
pub use remote_chunk_reader::*;
mod pxar_backup_stream;
pub use pxar_backup_stream::*;
mod backup_repo;
pub use backup_repo::*;
mod backup_specification;
pub use backup_specification::*;
mod chunk_stream;
pub use chunk_stream::{ChunkStream, FixedChunkStream};
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;

View File

@ -1,31 +0,0 @@
[package]
name = "pbs-config"
version = "0.1.0"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "Configuration file management for PBS"
[dependencies]
anyhow = "1.0"
hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
once_cell = "1.3.1"
openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
proxmox = "0.15.3"
proxmox-lang = "1"
proxmox-router = { version = "1.1", default-features = false }
proxmox-schema = "1"
proxmox-section-config = "1"
proxmox-time = "1"
proxmox-shared-memory = "0.1.1"
proxmox-sys = "0.1.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -1,121 +0,0 @@
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::mem::MaybeUninit;
use anyhow::{bail, Error};
use once_cell::sync::OnceCell;
use nix::sys::stat::Mode;
use proxmox::tools::fs::{create_path, CreateOptions};
// openssl::sha::sha256(b"Proxmox Backup ConfigVersionCache v1.0")[0..8];
pub const PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0: [u8; 8] = [25, 198, 168, 230, 154, 132, 143, 131];
const FILE_PATH: &str = pbs_buildcfg::rundir!("/shmem/config-versions");
use proxmox_shared_memory::*;
#[derive(Debug)]
#[repr(C)]
struct ConfigVersionCacheData {
magic: [u8; 8],
// User (user.cfg) cache generation/version.
user_cache_generation: AtomicUsize,
// Traffic control (traffic-control.cfg) generation/version.
traffic_control_generation: AtomicUsize,
// Add further atomics here (and reduce padding size)
padding: [u8; 4096 - 3*8],
}
impl Init for ConfigVersionCacheData {
fn initialize(this: &mut MaybeUninit<Self>) {
unsafe {
let me = &mut *this.as_mut_ptr();
me.magic = PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0;
}
}
fn check_type_magic(this: &MaybeUninit<Self>) -> Result<(), Error> {
unsafe {
let me = &*this.as_ptr();
if me.magic != PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0 {
bail!("ConfigVersionCache: wrong magic number");
}
Ok(())
}
}
}
pub struct ConfigVersionCache {
shmem: SharedMemory<ConfigVersionCacheData>
}
static INSTANCE: OnceCell<Arc< ConfigVersionCache>> = OnceCell::new();
impl ConfigVersionCache {
/// Open the memory based communication channel singleton.
pub fn new() -> Result<Arc<Self>, Error> {
INSTANCE.get_or_try_init(Self::open).map(Arc::clone)
}
// Actual work of `new`:
fn open() -> Result<Arc<Self>, Error> {
let user = crate::backup_user()?;
let dir_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o770))
.owner(user.uid)
.group(user.gid);
let file_path = Path::new(FILE_PATH);
let dir_path = file_path.parent().unwrap();
create_path(
dir_path,
Some(dir_opts.clone()),
Some(dir_opts))?;
let file_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o660))
.owner(user.uid)
.group(user.gid);
let shmem: SharedMemory<ConfigVersionCacheData> =
SharedMemory::open(file_path, file_opts)?;
Ok(Arc::new(Self { shmem }))
}
/// Returns the user cache generation number.
pub fn user_cache_generation(&self) -> usize {
self.shmem.data()
.user_cache_generation.load(Ordering::Acquire)
}
/// Increase the user cache generation number.
pub fn increase_user_cache_generation(&self) {
self.shmem.data()
.user_cache_generation
.fetch_add(1, Ordering::AcqRel);
}
/// Returns the traffic control generation number.
pub fn traffic_control_generation(&self) -> usize {
self.shmem.data()
.traffic_control_generation.load(Ordering::Acquire)
}
/// Increase the traffic control generation number.
pub fn increase_traffic_control_generation(&self) {
self.shmem.data()
.traffic_control_generation
.fetch_add(1, Ordering::AcqRel);
}
}

View File

@ -1,94 +0,0 @@
use anyhow::{Error};
use lazy_static::lazy_static;
use std::collections::HashMap;
use proxmox_schema::{ApiType, Schema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let obj_schema = match DataStoreConfig::API_SCHEMA {
Schema::Object(ref obj_schema) => obj_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("datastore".to_string(), Some(String::from("name")), obj_schema);
let mut config = SectionConfig::new(&DATASTORE_SCHEMA);
config.register_plugin(plugin);
config
}
pub const DATASTORE_CFG_FILENAME: &str = "/etc/proxmox-backup/datastore.cfg";
pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(DATASTORE_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?;
replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
pub fn complete_datastore_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}
pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut list = Vec::new();
list.push(String::from("/"));
list.push(String::from("/datastore"));
list.push(String::from("/datastore/"));
if let Ok((data, _digest)) = config() {
for id in data.sections.keys() {
list.push(format!("/datastore/{}", id));
}
}
list.push(String::from("/remote"));
list.push(String::from("/remote/"));
list.push(String::from("/tape"));
list.push(String::from("/tape/"));
list.push(String::from("/tape/drive"));
list.push(String::from("/tape/drive/"));
list.push(String::from("/tape/changer"));
list.push(String::from("/tape/changer/"));
list.push(String::from("/tape/pool"));
list.push(String::from("/tape/pool/"));
list.push(String::from("/tape/job"));
list.push(String::from("/tape/job/"));
list
}
pub fn complete_calendar_event(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
// just give some hints about possible values
["minutely", "hourly", "daily", "mon..fri", "0:0"]
.iter().map(|s| String::from(*s)).collect()
}

View File

@ -1,68 +0,0 @@
use std::collections::HashMap;
use anyhow::{Error};
use lazy_static::lazy_static;
use proxmox_schema::{ApiType, Schema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{OpenIdRealmConfig, REALM_ID_SCHEMA};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let obj_schema = match OpenIdRealmConfig::API_SCHEMA {
Schema::Object(ref obj_schema) => obj_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("openid".to_string(), Some(String::from("realm")), obj_schema);
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
config.register_plugin(plugin);
config
}
pub const DOMAINS_CFG_FILENAME: &str = "/etc/proxmox-backup/domains.cfg";
pub const DOMAINS_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.domains.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(DOMAINS_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(DOMAINS_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(DOMAINS_CFG_FILENAME, &config)?;
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
pub fn complete_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}
pub fn complete_openid_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter()
.filter_map(|(id, (t, _))| if t == "openid" { Some(id.to_string()) } else { None })
.collect(),
Err(_) => return vec![],
}
}

View File

@ -1,108 +0,0 @@
pub mod acl;
mod cached_user_info;
pub use cached_user_info::CachedUserInfo;
pub mod datastore;
pub mod domains;
pub mod drive;
pub mod key_config;
pub mod media_pool;
pub mod network;
pub mod remote;
pub mod sync;
pub mod tape_encryption_keys;
pub mod tape_job;
pub mod token_shadow;
pub mod traffic_control;
pub mod user;
pub mod verify;
mod config_version_cache;
pub use config_version_cache::ConfigVersionCache;
use anyhow::{format_err, Error};
pub use pbs_buildcfg::{BACKUP_USER_NAME, BACKUP_GROUP_NAME};
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
pub fn backup_user() -> Result<nix::unistd::User, Error> {
pbs_tools::sys::query_user(BACKUP_USER_NAME)?
.ok_or_else(|| format_err!("Unable to lookup '{}' user.", BACKUP_USER_NAME))
}
/// Return Group info for the 'backup' group (``getgrnam(3)``)
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
pbs_tools::sys::query_group(BACKUP_GROUP_NAME)?
.ok_or_else(|| format_err!("Unable to lookup '{}' group.", BACKUP_GROUP_NAME))
}
pub struct BackupLockGuard(Option<std::fs::File>);
#[doc(hidden)]
/// Note: do not use for production code, this is only intended for tests
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
BackupLockGuard(None)
}
/// Open or create a lock file owned by user "backup" and lock it.
///
/// Owner/Group of the file is set to backup/backup.
/// File mode is 0660.
/// Default timeout is 10 seconds.
///
/// Note: This method needs to be called by user "root" or "backup".
pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
path: P,
timeout: Option<std::time::Duration>,
exclusive: bool,
) -> Result<BackupLockGuard, Error> {
let user = backup_user()?;
let options = proxmox::tools::fs::CreateOptions::new()
.perm(nix::sys::stat::Mode::from_bits_truncate(0o660))
.owner(user.uid)
.group(user.gid);
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
let file = proxmox::tools::fs::open_file_locked(&path, timeout, exclusive, options)?;
Ok(BackupLockGuard(Some(file)))
}
/// Atomically write data to file owned by "root:backup" with permission "0640"
///
/// Only the superuser can write those files, but group 'backup' can read them.
pub fn replace_backup_config<P: AsRef<std::path::Path>>(
path: P,
data: &[u8],
) -> Result<(), Error> {
let backup_user = backup_user()?;
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
// set the correct owner/group/permissions while saving file
// owner(rw) = root, group(r)= backup
let options = proxmox::tools::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(backup_user.gid);
proxmox::tools::fs::replace_file(path, data, options, true)?;
Ok(())
}
/// Atomically write data to file owned by "root:root" with permission "0600"
///
/// Only the superuser can read and write those files.
pub fn replace_secret_config<P: AsRef<std::path::Path>>(
path: P,
data: &[u8],
) -> Result<(), Error> {
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
// set the correct owner/group/permissions while saving file
// owner(rw) = root, group(r)= root
let options = proxmox::tools::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
proxmox::tools::fs::replace_file(path, data, options, true)?;
Ok(())
}

View File

@ -1,59 +0,0 @@
use std::collections::HashMap;
use anyhow::Error;
use lazy_static::lazy_static;
use proxmox_schema::*;
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{Remote, REMOTE_ID_SCHEMA};
use crate::{open_backup_lockfile, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let obj_schema = match Remote::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("remote".to_string(), Some("name".to_string()), obj_schema);
let mut config = SectionConfig::new(&REMOTE_ID_SCHEMA);
config.register_plugin(plugin);
config
}
pub const REMOTE_CFG_FILENAME: &str = "/etc/proxmox-backup/remote.cfg";
pub const REMOTE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.remote.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(REMOTE_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(REMOTE_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(REMOTE_CFG_FILENAME, &config)?;
crate::replace_backup_config(REMOTE_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
pub fn complete_remote_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}

View File

@ -1,60 +0,0 @@
use std::collections::HashMap;
use anyhow::Error;
use lazy_static::lazy_static;
use proxmox_schema::{ApiType, Schema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{JOB_ID_SCHEMA, SyncJobConfig};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let obj_schema = match SyncJobConfig::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("sync".to_string(), Some(String::from("id")), obj_schema);
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
config.register_plugin(plugin);
config
}
pub const SYNC_CFG_FILENAME: &str = "/etc/proxmox-backup/sync.cfg";
pub const SYNC_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.sync.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(SYNC_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(SYNC_CFG_FILENAME, &config)?;
replace_backup_config(SYNC_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
pub fn complete_sync_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}

View File

@ -1,60 +0,0 @@
use anyhow::{Error};
use lazy_static::lazy_static;
use std::collections::HashMap;
use proxmox_schema::{Schema, ApiType};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let obj_schema = match TapeBackupJobConfig::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("backup".to_string(), Some(String::from("id")), obj_schema);
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
config.register_plugin(plugin);
config
}
pub const TAPE_JOB_CFG_FILENAME: &str = "/etc/proxmox-backup/tape-job.cfg";
pub const TAPE_JOB_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.tape-job.lck";
/// Get exclusive lock
pub fn lock() -> Result<BackupLockGuard, Error> {
open_backup_lockfile( TAPE_JOB_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(TAPE_JOB_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, &config)?;
replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
/// List all tape job IDs
pub fn complete_tape_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}

View File

@ -1,97 +0,0 @@
//! Traffic Control Settings (Network rate limits)
use std::collections::HashMap;
use anyhow::Error;
use lazy_static::lazy_static;
use proxmox_schema::{ApiType, Schema};
use pbs_api_types::{TrafficControlRule, TRAFFIC_CONTROL_ID_SCHEMA};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use crate::ConfigVersionCache;
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
/// Static [`SectionConfig`] to access parser/writer functions.
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
let mut config = SectionConfig::new(&TRAFFIC_CONTROL_ID_SCHEMA);
let obj_schema = match TrafficControlRule::API_SCHEMA {
Schema::AllOf(ref allof_schema) => allof_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new("rule".to_string(), Some("name".to_string()), obj_schema);
config.register_plugin(plugin);
config
}
/// Configuration file name
pub const TRAFFIC_CONTROL_CFG_FILENAME: &str = "/etc/proxmox-backup/traffic-control.cfg";
/// Lock file name (used to prevent concurrent access)
pub const TRAFFIC_CONTROL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.traffic-control.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(TRAFFIC_CONTROL_CFG_LOCKFILE, None, true)
}
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
let content = proxmox::tools::fs::file_read_optional_string(TRAFFIC_CONTROL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;
Ok((data, digest))
}
/// Save the configuration file
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, &config)?;
replace_backup_config(TRAFFIC_CONTROL_CFG_FILENAME, raw.as_bytes())?;
// increase traffic control version
// We use this in TrafficControlCache
let version_cache = ConfigVersionCache::new()?;
version_cache.increase_traffic_control_generation();
Ok(())
}
// shell completion helper
pub fn complete_traffic_control_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test1() -> Result<(), Error> {
let content = "rule: rule1
comment localnet at working hours
network 192.168.2.0/24
network 192.168.3.0/24
rate-in 500000
timeframe mon..wed 8:00-16:30
timeframe fri 9:00-12:00
";
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;
eprintln!("GOT {:?}", data);
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More