Compare commits

..

3 Commits

Author SHA1 Message Date
Thomas Lamprecht
edc876c58e bump version to 2.0.12-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-10-19 10:48:54 +02:00
Wolfgang Bumiller
ac383beb0a bump d/control
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-10-19 10:45:23 +02:00
Wolfgang Bumiller
716753f1a8 pbs-tools: drop borrow module
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2021-10-19 10:45:23 +02:00
538 changed files with 22331 additions and 33062 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "2.2.3" version = "2.0.12"
authors = [ authors = [
"Dietmar Maurer <dietmar@proxmox.com>", "Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>", "Dominik Csapak <d.csapak@proxmox.com>",
@ -25,8 +25,11 @@ members = [
"pbs-config", "pbs-config",
"pbs-datastore", "pbs-datastore",
"pbs-fuse-loop", "pbs-fuse-loop",
"pbs-runtime",
"proxmox-rest-server", "proxmox-rest-server",
"proxmox-rrd-api-types",
"proxmox-rrd", "proxmox-rrd",
"proxmox-systemd",
"pbs-tape", "pbs-tape",
"pbs-tools", "pbs-tools",
@ -43,12 +46,12 @@ path = "src/lib.rs"
[dependencies] [dependencies]
apt-pkg-native = "0.3.2" apt-pkg-native = "0.3.2"
base64 = "0.13" base64 = "0.12"
bitflags = "1.2.1" bitflags = "1.2.1"
bytes = "1.0" bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1" crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] } endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.7"
flate2 = "1.0" flate2 = "1.0"
anyhow = "1.0" anyhow = "1.0"
thiserror = "1.0" thiserror = "1.0"
@ -60,16 +63,16 @@ http = "0.2"
hyper = { version = "0.14", features = [ "full" ] } hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4" lazy_static = "1.4"
libc = "0.2" libc = "0.2"
log = "0.4.17" log = "0.4"
nix = "0.24" nix = "0.19.1"
num-traits = "0.2" num-traits = "0.2"
once_cell = "1.3.1" once_cell = "1.3.1"
openssl = "0.10.38" # currently patched! openssl = "0.10"
pam = "0.7" pam = "0.7"
pam-sys = "0.5" pam-sys = "0.5"
percent-encoding = "2.1" percent-encoding = "2.1"
regex = "1.5.5" regex = "1.2"
rustyline = "9" rustyline = "7"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
siphasher = "0.3" siphasher = "0.3"
@ -77,12 +80,13 @@ syslog = "4.0"
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] } tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio-openssl = "0.6.1" tokio-openssl = "0.6.1"
tokio-stream = "0.1.0" tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "codec", "io" ] } tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0" tower-service = "0.3.0"
udev = "0.4" udev = ">= 0.3, <0.5"
url = "2.1" url = "2.1"
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true } #valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
walkdir = "2" walkdir = "2"
webauthn-rs = "0.2.5"
xdg = "2.2" xdg = "2.2"
nom = "5.1" nom = "5.1"
crossbeam-channel = "0.5" crossbeam-channel = "0.5"
@ -93,58 +97,39 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2" pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] } pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox-http = { version = "0.6.1", features = [ "client", "http-helpers", "websocket" ] } proxmox = { version = "0.14.0", features = [ "sortable-macro" ] }
proxmox-http = { version = "0.5.0", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1" proxmox-io = "1"
proxmox-lang = "1.1" proxmox-lang = "1"
proxmox-router = { version = "1.2.2", features = [ "cli" ] } proxmox-router = { version = "1", features = [ "cli" ] }
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] } proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-section-config = "1" proxmox-section-config = "1"
proxmox-tfa = { version = "2", features = [ "api", "api-types" ] } proxmox-tfa = { version = "1", features = [ "u2f" ] }
proxmox-time = "1.1.2" proxmox-time = "1"
proxmox-uuid = "1" proxmox-uuid = "1"
proxmox-serde = "0.1"
proxmox-shared-memory = "0.2"
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
proxmox-compression = "0.1"
proxmox-acme-rs = "0.2.1"
proxmox-acme-rs = "0.4"
proxmox-apt = "0.8.0" proxmox-apt = "0.8.0"
proxmox-async = "0.4" proxmox-openid = "0.8.0"
proxmox-openid = "0.9.0"
pbs-api-types = { path = "pbs-api-types" } pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" } pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" } pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" } pbs-config = { path = "pbs-config" }
pbs-datastore = { path = "pbs-datastore" } pbs-datastore = { path = "pbs-datastore" }
pbs-runtime = { path = "pbs-runtime" }
proxmox-rest-server = { path = "proxmox-rest-server" } proxmox-rest-server = { path = "proxmox-rest-server" }
proxmox-rrd-api-types = { path = "proxmox-rrd-api-types" }
proxmox-rrd = { path = "proxmox-rrd" } proxmox-rrd = { path = "proxmox-rrd" }
proxmox-systemd = { path = "proxmox-systemd" }
pbs-tools = { path = "pbs-tools" } pbs-tools = { path = "pbs-tools" }
pbs-tape = { path = "pbs-tape" } pbs-tape = { path = "pbs-tape" }
# Local path overrides # Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect! # NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io] [patch.crates-io]
#proxmox-acme-rs = { path = "../proxmox-acme-rs" } #proxmox = { path = "../proxmox/proxmox" }
#proxmox-apt = { path = "../proxmox-apt" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" } #proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-openid = { path = "../proxmox-openid-rs" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#pxar = { path = "../pxar" } #pxar = { path = "../pxar" }
[features] [features]

View File

@ -38,8 +38,11 @@ SUBCRATES := \
pbs-config \ pbs-config \
pbs-datastore \ pbs-datastore \
pbs-fuse-loop \ pbs-fuse-loop \
pbs-runtime \
proxmox-rest-server \ proxmox-rest-server \
proxmox-rrd-api-types \
proxmox-rrd \ proxmox-rrd \
proxmox-systemd \
pbs-tape \ pbs-tape \
pbs-tools \ pbs-tools \
proxmox-backup-banner \ proxmox-backup-banner \
@ -221,6 +224,9 @@ install: $(COMPILED_BINS)
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;) install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
$(MAKE) -C www install $(MAKE) -C www install
$(MAKE) -C docs install $(MAKE) -C docs install
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
endif
.PHONY: upload .PHONY: upload
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB} upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}

View File

@ -1,7 +1,3 @@
Build & Release Notes
*********************
``rustup`` Toolchain ``rustup`` Toolchain
==================== ====================
@ -44,44 +40,41 @@ example for proxmox crate above).
Build Build
===== =====
on Debian 11 Bullseye on Debian Buster
Setup: Setup:
1. # echo 'deb http://download.proxmox.com/debian/devel/ bullseye main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list 1. # echo 'deb http://download.proxmox.com/debian/devel/ buster main' >> /etc/apt/sources.list.d/proxmox-devel.list
2. # sudo wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg 2. # sudo wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
3. # sudo apt update 3. # sudo apt update
4. # sudo apt install devscripts debcargo clang 4. # sudo apt install devscripts debcargo clang
5. # git clone git://git.proxmox.com/git/proxmox-backup.git 5. # git clone git://git.proxmox.com/git/proxmox-backup.git
6. # cd proxmox-backup; sudo mk-build-deps -ir 6. # sudo mk-build-deps -ir
Note: 2. may be skipped if you already added the PVE or PBS package repository Note: 2. may be skipped if you already added the PVE or PBS package repository
You are now able to build using the Makefile or cargo itself, e.g.:: You are now able to build using the Makefile or cargo itself.
# make deb-all
# # or for a non-package build
# cargo build --all --release
Design Notes Design Notes
************ ============
Here are some random thought about the software design (unless I find a better place). Here are some random thought about the software design (unless I find a better place).
Large chunk sizes Large chunk sizes
================= -----------------
It is important to notice that large chunk sizes are crucial for performance. It is important to notice that large chunk sizes are crucial for
We have a multi-user system, where different people can do different operations performance. We have a multi-user system, where different people can do
on a datastore at the same time, and most operation involves reading a series different operations on a datastore at the same time, and most operation
of chunks. involves reading a series of chunks.
So what is the maximal theoretical speed we can get when reading a series of So what is the maximal theoretical speed we can get when reading a
chunks? Reading a chunk sequence need the following steps: series of chunks? Reading a chunk sequence need the following steps:
- seek to the first chunk's start location - seek to the first chunk start location
- read the chunk data - read the chunk data
- seek to the next chunk's start location - seek to the first chunk start location
- read the chunk data - read the chunk data
- ... - ...

519
debian/changelog vendored
View File

@ -1,522 +1,3 @@
rust-proxmox-backup (2.2.3-1) bullseye; urgency=medium
* datastore: swap dirtying the datastore cache every 60s by just using the
available config digest to detect any changes accuratly when the actually
happen
* api: datastore list and datastore status: avoid opening datastore and
possibly iterating over namespace (for lesser privileged users), but
rather use the in-memory ACL tree directly to check if there's access to
any namespace below.
-- Proxmox Support Team <support@proxmox.com> Sat, 04 Jun 2022 16:30:05 +0200
rust-proxmox-backup (2.2.2-3) bullseye; urgency=medium
* datastore: lookup: reuse ChunkStore on stale datastore re-open
* bump tokio (async framework) dependency
-- Proxmox Support Team <support@proxmox.com> Thu, 02 Jun 2022 17:25:01 +0200
rust-proxmox-backup (2.2.2-2) bullseye; urgency=medium
* improvement of error handling when removing status files and locks from
jobs that were never executed.
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 16:22:22 +0200
rust-proxmox-backup (2.2.2-1) bullseye; urgency=medium
* Revert "verify: allow '0' days for reverification", was already possible
by setting "ignore-verified" to false
* ui: datastore permissions: allow ACL path edit & query namespaces
* accessible group iter: allow NS descending with DATASTORE_READ privilege
* prune datastore: rework worker tak log
* prune datastore: support max-depth and improve priv checks
* ui: prune input: support opt-in recursive/max-depth field
* add prune job config and api, allowing one to setup a scheduled pruning
for a specific namespace only
* ui: add ui for prune jobs
* api: disable setting prune options in datastore.cfg and transform any
existing prune tasks from datastore config to new prune job config in a
post installation hook
* proxmox-tape: use correct api call for 'load-media-from-slot'
* avoid overly strict privilege restrictions for some API endpoints and
actions when using namespaces. Better support navigating the user
interface when only having Datastore.Admin on a (sub) namespace.
* include required privilege names in some permission errors
* docs: fix some typos
* api: status: include empty entry for stores with ns-only privs
* ui: datastore options: avoid breakage if rrd store ore active-ops cannot
be queried
* ui: datastore content: only mask the inner treeview, not the top bar on
error to allow a user to trigger a manual reload
* ui: system config: improve bottom margins and scroll behavior
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 15:09:36 +0200
rust-proxmox-backup (2.2.1-1) bullseye; urgency=medium
* docs: update some screenshots and add new ones
* docs: port overcertificate management chapters from Proxmox VE
* ui: datastore/Summary: correctly show the io-delay chart
* ui: sync/verify jobs: use pmxDisplayEditField to fix editing
* ui: server status: use power of two base for memory and swap
* ui: use base 10 (SI) for all storage related displays
* ui: datastore selector: show maintenance mode in selector
* docs: basic maintenance mode section
* docs: storage: refer to options
* storage: add some initial namespace docs
* ui: tape restore: fix form validation for datastore mapping
* ui: namespace selector: show picker empty text if no namespace
-- Proxmox Support Team <support@proxmox.com> Tue, 17 May 2022 13:56:50 +0200
rust-proxmox-backup (2.2.0-2) bullseye; urgency=medium
* client: add CLI auto-completion callbacks for ns parameters
* ui: fix setting protection in namespace
* ui: switch summary repo status to widget toolkit one
* ui: verify outdated: disallow blank and drop wrong empty text
* docs: add namespace section to sync documentation
* ui: datastore summary: add maintenance mask for offline entries
* ui: verify/sync: allow to optionally override ID again
* prune: fix workerid issues
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 19:01:13 +0200
rust-proxmox-backup (2.2.0-1) bullseye; urgency=medium
* cli: improve namespace integration in proxmox-backup-client and
proxmox-tape
* docs: tape: add information about namespaces
* api: datastore status: make counts for groups and snapshots iterate over
all accessible namespaces recursively
* ui: fix storeId casing to register store correctly, so that we can query
it again for the ACL permission path selector
* ui: trigger datastore update after maintenance mode edit
* ui: namespace selector: set queryMode to local to avoid bogus background
requests on typing
* ui: sync job: fix clearing value of remote target-namespace by mistake on
edit
* ui: remote target ns selector: add clear trigger
* ui: prune group: add namespace info to title
* fix #4001: ui: add prefix to files downloaded through the pxar browser
* ui: datastore: reload content tree on successful datastore add
* ui: datastore: allow deleting currently shown namespace
* docs: rework access control, list available privileges
* docs: access control: add "Objects and Paths" section and fix
add-permission screenshot
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 11:06:05 +0200
rust-proxmox-backup (2.1.10-1) bullseye; urgency=medium
* datastore: drop bogus chunk size check, can cause trouble
* pull/sync: detect remote lack of namespace support
* pull/sync: correctly query with remote-ns as parent
* ui: sync: add reduced max-depth selector
* ui: group filter: make also local filter NS aware
* api types: set NS_MAX_DEPTH schema default to MAX_NAMESPACE_DEPTH instead
of 0
* tape: notify when arriving at end of media
* tree-wide: rename 'backup-ns' API parameters to 'ns'
* tape: add namespaces/recursion depth to tape backup jobs
* api: tape/restore: add namespace mapping
* tape: bump catalog/snapshot archive magic
* ui: tape: backup overview: show namespaces as their own level above groups
* ui: tape restore: allow simple namespace mapping
-- Proxmox Support Team <support@proxmox.com> Fri, 13 May 2022 14:26:32 +0200
rust-proxmox-backup (2.1.9-2) bullseye; urgency=medium
* api: tape restore: lock the target datastore, not the source one
* chunk store: force write chunk again if it exist but its metadata length
is zero
* completion: fix 'group-filter' parameter name
* implement backup namespaces for datastores, allowing one to reuse a single
chunkstore deduplication domain for multiple sources without naming
conflicts and with fine-grained access control.
* make various datastore related API calls backup namespace aware
* make sync and pull backup namespace aware
* ui: datastore content: show namespaces but only one level at a time
* ui: make various datastore related UI components namespace aware
* fix various bugs, add namespace support to file-restore
-- Proxmox Support Team <support@proxmox.com> Thu, 12 May 2022 14:25:53 +0200
rust-proxmox-backup (2.1.8-1) bullseye; urgency=medium
* api: status: return gc-status again
* proxmox-backup-proxy: stop accept() loop on daemon shutdown to avoid that
new request get accepted while the REST stack is already stopped, for
example on the reload triggered by a package upgrade.
* pull: improve filtering local removal candidates
-- Proxmox Support Team <support@proxmox.com> Mon, 02 May 2022 17:36:11 +0200
rust-proxmox-backup (2.1.7-1) bullseye; urgency=medium
* pbs-tape: sgutils2: check sense data when status is 'CHECK_CONDITION'
* rework & refactor datastore implementation for a more hierarchical access
structure
* datastore: implement Iterator for backup group and snapshot listing to
allow more efficient access for cases where we do not need the whole list
in memory
* pbs-client: extract: rewrite create_zip with sequential decoder
* pbs-client: extract: add top-level dir in tar.zst
* fix #3067: ui: add a separate notes view for longer markdown notes and
copy the markdown primer from Proxmox VE to Proxmox Backup Server docs
* restore-daemon: start disk initialization in parallel to the api
* restore-daemon: put blocking code into 'block_in_place'
* restore-daemon: avoid auto-pre-mounting zpools completely, the upfront
(time) cost can be to big to pay up initially, e.g., if there are many
subvolumes present, so only mount on demand.
* file-restore: add 'timeout' and 'json-error' parameter
* ui: add summary mask when in maintenance mode
* ui: update datastore's navigation icon and tooltip if it is in maintenance
mode
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Apr 2022 19:53:53 +0200
rust-proxmox-backup (2.1.6-1) bullseye; urgency=medium
* api: verify: allow passing '0 days' for immediate re-verification
* fix #3103. node configuration: allow to configure default UI language
* fix #3856: tape: encryption key's password hint parameter is not optional
* re-use PROXMOX_DEBUG environment variable to control log level filter
* ui: WebAuthn: fix stopping store upgrades on destroy and decrease interval
* report: add tape, traffic control and disk infos and tune output order
* fix #3853: cli/api: add force option to tape key change-passphrase
* fix #3323: cli client: add dry-run option for backup command
* tape: make iterating over chunks to backup smarter to avoid some work
* bin: daily-update: make single checks/updates fail gracefully and log
to syslog directly instead of going through stdout indirectly.
* datastore: allow to turn of inode-sorting for chunk iteration. While inode
sorting benefits read-performance on block devices with higher latency
(e.g., spinning disks), it's also some extra work to get the metadata
required for sorting, so its a trade-off. For setups that have either very
slow or very fast metadata IO the benefits may turn into a net cost.
* docs: explain retention time for event allocation policy in more detail
* docs: add tape schedule examples
* proxmox-backup-debug api: parse parameters before sending to api
* ui: fix panel height in the dashboard for three-column view mode
* fix #3934 tape owner-selector to auth-id (user OR token)
* fix #3067: api: add support for multi-line comments in the node
configuration
* pbs-client: print error when we couldn't download previous FIDX/DIDX for
incremental change tracking
* fix #3854 add command to import a key from a file (json or paper-key
format) to proxmox-tape
* improve IO access pattern for some scenarios like TFA with high user and
login count or the file-restore-for-block-backup VM's internal driver.
* pxar create: fix anchored path pattern matching when adding entries
* docs: client: file exclusion: add note about leading slash
* rest-server: add option to rotate task logs by 'max_days' instead of
'max_files'
* pbs-datastore: add active operations tracking and use it to implement a
graceful transition into the also newly added maintenance mode (read-only
or offline) for datastores. Note that the UI implementation may still show
some rough edges if a datastore is in offline mode for maintenance.
* add new streaming-response type for API call responses and enable it for
the snapshot and task-log list, which can both get rather big. This avoids
allocation of a potentially big intermediate memory buffer and thus
overall memory usage.
* pxar: accompany existing .zip download support with a tar.zst(d) one. The
tar archive supports more file types (e.g., hard links or device nodes)
and zstd allows for a efficient but still effective compression.
-- Proxmox Support Team <support@proxmox.com> Wed, 13 Apr 2022 17:00:53 +0200
rust-proxmox-backup (2.1.5-1) bullseye; urgency=medium
* tell system allocator to always use mmap for allocations >= 128 KiB to
improve reclaimability of free'd memory to the OS and reduce peak and avg.
RSS consumption
* file restore: always wait up to 25s for the file-restore-VM to have
scanned all possible filesystems in a backup. While theoretically there
are some edge cases where the tool waits less now, most common ones should
be waiting more compared to the 12s "worst" case previously.
-- Proxmox Support Team <support@proxmox.com> Wed, 26 Jan 2022 16:23:09 +0100
rust-proxmox-backup (2.1.4-1) bullseye; urgency=medium
* config: add tls ciphers to NodeConfig
* pbs-tools: improve memory foot print of LRU Cache
* update dependencies to avoid a ref-count leak in async helpers
-- Proxmox Support Team <support@proxmox.com> Fri, 21 Jan 2022 10:48:14 +0100
rust-proxmox-backup (2.1.3-1) bullseye; urgency=medium
* fix #3618: proxmox-async: zip: add conditional EFS flag to zip files to
improve non-ascii code point extraction under windows.
* OpenID Connect login: improve error message for disabled users
* ui: tape: backup job: add second tab for group-filters to add/edit window
* ui: sync job: add second tab for group-filters to add/edit window
* ui: calendar event: add once daily example and clarify the workday one
* fix #3794: api types: set backup time (since the UNIX epoch) lower limit
to 1
* ui: fix opening settings window in datastore panel
* api: zfs: create zpool with `relatime=on` flag set
* fix #3763: disable SSL/TLS renegotiation
* node config: add email-from parameter to control notification sender
address
* ui: configuration: rename the "Authentication" tab to "Other" and add a
"General" section with HTTP-proxy and email-from settings
* datastore stats: not include the unavailable `io_ticks` for ZFS
datastores
* ui: hide RRD chart for IO delay if no `io_ticks` are returned
* fix #3058: ui: improve remote edit UX by clarifying ID vs host fields
* docs: fix some minor typos
* api-types: relax nodename API schema, make it a simple regex check like in
Proxmox VE
-- Proxmox Support Team <support@proxmox.com> Wed, 12 Jan 2022 16:49:13 +0100
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
* docs: backup-client: fix wrong reference
* docs: remotes: note that protected flags will not be synced
* sync job: correctly apply rate limit
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Nov 2021 13:56:15 +0100
rust-proxmox-backup (2.1.1-2) bullseye; urgency=medium
* docs: update and add traffic control related screenshots
* docs: mention traffic control (bandwidth limits) for sync jobs
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 16:07:39 +0100
rust-proxmox-backup (2.1.1-1) bullseye; urgency=medium
* fix proxmox-backup-manager sync-job list
* ui, api: sync-job: allow one to configure a rate limit
* api: snapshot list: set default for 'protected' flag
* ui: datastore content: rework rendering protection state
* docs: update traffic control docs (use HumanBytes)
* ui: traffic-control: include ipv6 in 'all' networks
* ui: traffic-control edit: add spaces between networks for more
readabillity
* tape: fix passing-through key-fingerprint
* avoid a bogus error regarding logrotate-path due to a reversed check
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Nov 2021 12:24:31 +0100
rust-proxmox-backup (2.1.0-1) bullseye; urgency=medium
* rest server: make successful-ticket auth log a debug one to avoid
syslog spam
* traffic-controls: add API/CLI to show current traffic
* docs: add traffic control section
* ui: use TFA widgets from widget toolkit
* sync: allow pulling groups selectively
* fix #3533: tape backup: filter groups according to config
* proxmox-tape: add missing notify-user option to backup command
* openid: allow arbitrary username-claims
* openid: support configuring the prompt, scopes and ACR values
* use human-byte for traffic-control rate-in/out and burst-in/out config
* ui: add traffic control view and editor
-- Proxmox Support Team <support@proxmox.com> Sat, 20 Nov 2021 22:44:07 +0100
rust-proxmox-backup (2.0.14-1) bullseye; urgency=medium
* fix directory permission problems
* add traffic control configuration config with API
* proxmox-backup-proxy: implement traffic control
* proxmox-backup-client: add rate/burst parameter to backup/restore CLI
* openid_login: vertify that firstname, lastname and email fits our
schema definitions
* docs: add info about protection flag to client docs
* fix #3602: ui: datastore/Content: add action to set protection status
* ui: add protected icon to snapshot (if they are protected)
* ui: PruneInputPanel: add keepReason 'protected' for protected backups
* proxmox-backup-client: add 'protected' commands
* acme: interpret no TOS as accepted
* acme: new_account: prevent replacing existing accounts
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Nov 2021 08:04:55 +0100
rust-proxmox-backup (2.0.13-1) bullseye; urgency=medium
* tape: simplify export_media_set for pool writer
* tape: improve export_media error message for not found tape
* rest-server: use hashmap for parameter errors
* proxmox-rrd: use new file firmat with higher resolution
* proxmox-rrd: use a journal to reduce amount of bytes written
* use new fsync parameter to replace_file and atomic_open_or_create
* docs: langauge and formatting fixup
* docs: Update for new features/functionality
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 08:17:00 +0200
rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium rust-proxmox-backup (2.0.12-1) bullseye; urgency=medium
* proxmox-backup-proxy: clean up old tasks when their reference was rotated * proxmox-backup-proxy: clean up old tasks when their reference was rotated

72
debian/control vendored
View File

@ -8,15 +8,14 @@ Build-Depends: debhelper (>= 12),
libstd-rust-dev, libstd-rust-dev,
librust-anyhow-1+default-dev, librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~), librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-base64-0.13+default-dev, librust-base64-0.12+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~), librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev, librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-crc32fast-1+default-dev, librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev, librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev, librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev, librust-endian-trait-0.6+default-dev,
librust-env-logger-0.9+default-dev, librust-env-logger-0.7+default-dev,
librust-flate2-1+default-dev, librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev, librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev, librust-futures-0.3+default-dev,
@ -24,61 +23,46 @@ Build-Depends: debhelper (>= 12),
librust-h2-0.3+stream-dev, librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev, librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~), librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-http-0.2+default-dev, librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev (>= 0.14.5-~~), librust-hyper-0.14+default-dev,
librust-hyper-0.14+full-dev (>= 0.14.5-~~), librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~), librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev, librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~) <!nocheck>, librust-log-0.4+default-dev,
librust-nix-0.24+default-dev, librust-nix-0.19+default-dev (>= 0.19.1-~~),
librust-nom-5+default-dev (>= 5.1-~~), librust-nom-5+default-dev (>= 5.1-~~),
librust-num-traits-0.2+default-dev, librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~), librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.38-~~), librust-openssl-0.10+default-dev,
librust-pam-0.7+default-dev, librust-pam-0.7+default-dev,
librust-pam-sys-0.5+default-dev, librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~), librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev, librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-rs-0.4+default-dev, librust-proxmox-0.14+sortable-macro-dev,
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-apt-0.8+default-dev, librust-proxmox-apt-0.8+default-dev,
librust-proxmox-async-0.4+default-dev,
librust-proxmox-borrow-1+default-dev, librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~), librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-http-0.6+client-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+client-dev,
librust-proxmox-http-0.6+default-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+default-dev ,
librust-proxmox-http-0.6+http-helpers-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+http-helpers-dev,
librust-proxmox-http-0.6+websocket-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+websocket-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~), librust-proxmox-io-1+tokio-dev,
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~), librust-proxmox-lang-1+default-dev,
librust-proxmox-lang-1+default-dev (>= 1.1-~~), librust-proxmox-openid-0.8+default-dev,
librust-proxmox-openid-0.9+default-dev, librust-proxmox-router-1+cli-dev (>= 1.1.0-~~),
librust-proxmox-router-1+cli-dev (>= 1.2-~~), librust-proxmox-schema-1+api-macro-dev,
librust-proxmox-router-1+default-dev (>= 1.2.2-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.3.1-~~),
librust-proxmox-schema-1+default-dev (>= 1.3.1-~~),
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.3.1-~~),
librust-proxmox-section-config-1+default-dev, librust-proxmox-section-config-1+default-dev,
librust-proxmox-serde-0.1+default-dev, librust-proxmox-tfa-1+u2f-dev,
librust-proxmox-shared-memory-0.2+default-dev, librust-proxmox-time-1+default-dev,
librust-proxmox-sys-0.3+default-dev,
librust-proxmox-sys-0.3+logrotate-dev,
librust-proxmox-sys-0.3+sortable-macro-dev,
librust-proxmox-tfa-2+api-dev,
librust-proxmox-tfa-2+api-types-dev,
librust-proxmox-tfa-2+default-dev,
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
librust-proxmox-uuid-1+default-dev, librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-pxar-0.10+default-dev (>= 0.10.1-~~), librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~), librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.5.5-~~), librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-9+default-dev, librust-rustyline-7+default-dev,
librust-serde-1+default-dev, librust-serde-1+default-dev,
librust-serde-1+derive-dev, librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev, librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev, librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev, librust-syslog-4+default-dev,
@ -94,17 +78,17 @@ Build-Depends: debhelper (>= 12),
librust-tokio-1+rt-dev (>= 1.6-~~), librust-tokio-1+rt-dev (>= 1.6-~~),
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~), librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
librust-tokio-1+signal-dev (>= 1.6-~~), librust-tokio-1+signal-dev (>= 1.6-~~),
librust-tokio-1+sync-dev (>= 1.6-~~),
librust-tokio-1+time-dev (>= 1.6-~~), librust-tokio-1+time-dev (>= 1.6-~~),
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~), librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev, librust-tokio-stream-0.1+default-dev,
librust-tokio-util-0.7+codec-dev, librust-tokio-util-0.6+codec-dev,
librust-tokio-util-0.7+default-dev, librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.7+io-dev, librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev, librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev, librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~), librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev, librust-walkdir-2+default-dev,
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
librust-xdg-2+default-dev (>= 2.2-~~), librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.6+bindgen-dev, librust-zstd-0.6+bindgen-dev,
librust-zstd-0.6+default-dev, librust-zstd-0.6+default-dev,
@ -152,7 +136,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent, postfix | mail-transport-agent,
proxmox-backup-docs, proxmox-backup-docs,
proxmox-mini-journalreader, proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 3.4-3), proxmox-widget-toolkit (>= 3.3-2),
pve-xtermjs (>= 4.7.0-1), pve-xtermjs (>= 4.7.0-1),
sg3-utils, sg3-utils,
smartmontools, smartmontools,

38
debian/postinst vendored
View File

@ -4,14 +4,6 @@ set -e
#DEBHELPER# #DEBHELPER#
update_sync_job() {
job="$1"
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|| echo "Failed, please check sync.cfg manually!"
}
case "$1" in case "$1" in
configure) configure)
# need to have user backup in the tape group # need to have user backup in the tape group
@ -40,36 +32,6 @@ case "$1" in
echo "Fixing up termproxy user id in task log..." echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi fi
if dpkg --compare-versions "$2" 'lt' '2.2.2~'; then
echo "moving prune schedule from datacenter config to new prune job config"
proxmox-backup-manager update-to-prune-jobs-config \
|| echo "Failed to move prune jobs, please check manually"
true
fi
if dpkg --compare-versions "$2" 'lt' '2.1.3~' && test -e /etc/proxmox-backup/sync.cfg; then
prev_job=""
# read from HERE doc because POSIX sh limitations
while read -r key value; do
if test "$key" = "sync:"; then
if test -n "$prev_job"; then
# previous job doesn't have an explicit value
update_sync_job "$prev_job"
fi
prev_job=$value
else
prev_job=""
fi
done <<EOF
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
EOF
if test -n "$prev_job"; then
# last job doesn't have an explicit value
update_sync_job "$prev_job"
fi
fi
fi fi
;; ;;

3
debian/rules vendored
View File

@ -32,6 +32,9 @@ override_dh_auto_build:
override_dh_missing: override_dh_missing:
dh_missing --fail-missing dh_missing --fail-missing
override_dh_auto_test:
# ignore here to avoid rebuilding the binaries with the wrong target
override_dh_auto_install: override_dh_auto_install:
dh_auto_install -- \ dh_auto_install -- \
PROXY_USER=backup \ PROXY_USER=backup \

View File

@ -71,7 +71,7 @@ Environment Variables
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your .. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
convenience, Proxmox Backup Server only uses the first line as password, so convienience, Proxmox Backup Server only uses the first line as password, so
you can add arbitrary comments after the first newline. you can add arbitrary comments after the first newline.
@ -120,11 +120,11 @@ This will prompt you for a password, then upload a file archive named
(i.e. ``--include-dev /boot/efi``). You can use this option (i.e. ``--include-dev /boot/efi``). You can use this option
multiple times for each mount point that should be included. multiple times for each mount point that should be included.
The ``--repository`` option can get quite long and is used by all commands. You The ``--repository`` option can get quite long and is used by all
can avoid having to enter this value by setting the environment variable commands. You can avoid having to enter this value by setting the
``PBS_REPOSITORY``. Note that if you would like this to remain set over environment variable ``PBS_REPOSITORY``. Note that if you would like this to
multiple sessions, you should instead add the below line to your ``.bashrc`` remain set over multiple sessions, you should instead add the below line to your
file. ``.bashrc`` file.
.. code-block:: console .. code-block:: console
@ -142,16 +142,9 @@ you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
This creates a backup of both disks. This creates a backup of both disks.
If you want to use a namespace for the backup target you can add the `--ns` The backup command takes a list of backup specifications, which
parameter: include the archive name on the server, the type of the archive, and the
archive source at the client. The format is:
.. code-block:: console
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2 --ns a/b/c
The backup command takes a list of backup specifications, which include the
archive name on the server, the type of the archive, and the archive source at
the client. The format is:
<archive-name>.<type>:<source-path> <archive-name>.<type>:<source-path>
@ -166,25 +159,21 @@ device images. To create a backup of a block device, run the following command:
Excluding Files/Directories from a Backup Excluding Files/Directories from a Backup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes it is desired to exclude certain files or directories from a backup Sometimes it is desired to exclude certain files or directories from a backup archive.
archive. To tell the Proxmox Backup client when and how to ignore files and To tell the Proxmox Backup client when and how to ignore files and directories,
directories, place a text file named ``.pxarexclude`` in the filesystem place a text file named ``.pxarexclude`` in the filesystem hierarchy.
hierarchy. Whenever the backup client encounters such a file in a directory, Whenever the backup client encounters such a file in a directory, it interprets
it interprets each line as a glob match pattern for files and directories that each line as a glob match pattern for files and directories that are to be excluded
are to be excluded from the backup. from the backup.
The file must contain a single glob pattern per line. Empty lines and lines The file must contain a single glob pattern per line. Empty lines and lines
starting with ``#`` (indicating a comment) are ignored. starting with ``#`` (indicating a comment) are ignored.
A ``!`` at the beginning of a line reverses the glob match pattern from an A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
exclusion to an explicit inclusion. This makes it possible to exclude all to an explicit inclusion. This makes it possible to exclude all entries in a
entries in a directory except for a few single files/subdirectories. directory except for a few single files/subdirectories.
Lines ending in ``/`` match only on directories. Lines ending in ``/`` match only on directories.
The directory containing the ``.pxarexclude`` file is considered to be the root The directory containing the ``.pxarexclude`` file is considered to be the root of
of the given patterns. It is only possible to match files in this directory and the given patterns. It is only possible to match files in this directory and its subdirectories.
its subdirectories.
.. Note:: Patterns without a leading ``/`` will also match in subdirectories,
while patterns with a leading ``/`` will only match in the current directory.
``\`` is used to escape special glob characters. ``\`` is used to escape special glob characters.
``?`` matches any single character. ``?`` matches any single character.
@ -193,15 +182,15 @@ its subdirectories.
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
a directory and its subdirectories. a directory and its subdirectories.
``[...]`` matches a single character from any of the provided characters within ``[...]`` matches a single character from any of the provided characters within
the brackets. ``[!...]`` does the complementary and matches any single the brackets. ``[!...]`` does the complementary and matches any single character
character not contained within the brackets. It is also possible to specify not contained within the brackets. It is also possible to specify ranges with two
ranges with two characters separated by ``-``. For example, ``[a-z]`` matches characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
any lowercase alphabetic character, and ``[0-9]`` matches any single digit. alphabetic character, and ``[0-9]`` matches any single digit.
The order of the glob match patterns defines whether a file is included or The order of the glob match patterns defines whether a file is included or
excluded, that is to say, later entries override earlier ones. excluded, that is to say, later entries override earlier ones.
This is also true for match patterns encountered deeper down the directory This is also true for match patterns encountered deeper down the directory tree,
tree, which can override a previous exclusion. which can override a previous exclusion.
.. Note:: Excluded directories will **not** be read by the backup client. Thus, .. Note:: Excluded directories will **not** be read by the backup client. Thus,
a ``.pxarexclude`` file in an excluded subdirectory will have no effect. a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
@ -416,11 +405,6 @@ list command provides a list of all the snapshots on the server:
├────────────────────────────────┼─────────────┼────────────────────────────────────┤ ├────────────────────────────────┼─────────────┼────────────────────────────────────┤
... ...
.. tip:: List will by default only output the backup snapshots of the root
namespace itself. To list backups from another namespace use the ``--ns
<ns>`` option
You can inspect the catalog to find specific files. You can inspect the catalog to find specific files.
.. code-block:: console .. code-block:: console
@ -578,10 +562,10 @@ user that has ``Datastore.Modify`` privileges on the datastore.
# proxmox-backup-client change-owner vm/103 john@pbs # proxmox-backup-client change-owner vm/103 john@pbs
This can also be done from within the web interface, by navigating to the This can also be done from within the web interface, by navigating to the
`Content` section of the datastore that contains the backup group and selecting `Content` section of the datastore that contains the backup group and
the user icon under the `Actions` column. Common cases for this could be to selecting the user icon under the `Actions` column. Common cases for this could
change the owner of a sync job from ``root@pam``, or to repurpose a backup be to change the owner of a sync job from ``root@pam``, or to repurpose a
group. backup group.
.. _backup-pruning: .. _backup-pruning:
@ -589,24 +573,16 @@ group.
Pruning and Removing Backups Pruning and Removing Backups
---------------------------- ----------------------------
You can manually delete a backup snapshot using the ``forget`` command: You can manually delete a backup snapshot using the ``forget``
command:
.. code-block:: console .. code-block:: console
# proxmox-backup-client snapshot forget <snapshot> # proxmox-backup-client snapshot forget <snapshot>
.. caution:: This command removes all archives in this backup snapshot. They .. caution:: This command removes all archives in this backup
will be inaccessible and *unrecoverable*. snapshot. They will be inaccessible and unrecoverable.
Don't forget to add the namespace ``--ns`` parameter if you want to forget a
snapshot that is contained in the root namespace:
.. code-block:: console
# proxmox-backup-client snapshot forget <snapshot> --ns <ns>
Although manual removal is sometimes required, the ``prune`` Although manual removal is sometimes required, the ``prune``
@ -678,25 +654,6 @@ shows the list of existing snapshots and what actions prune would take.
in the chunk-store. The chunk-store still contains the data blocks. To free in the chunk-store. The chunk-store still contains the data blocks. To free
space you need to perform :ref:`client_garbage-collection`. space you need to perform :ref:`client_garbage-collection`.
It is also possible to protect single snapshots from being pruned or deleted:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> true
This will set the protected flag on the snapshot and prevent pruning or manual
deletion of this snapshot untilt he flag is removed again with:
.. code-block:: console
# proxmox-backup-client snapshot protected update <snapshot> false
When a group is with a protected snapshot is deleted, only the non-protected
ones are removed and the group will remain.
.. note:: This flag will not be synced when using pull or sync jobs. If you
want to protect a synced snapshot, you have to manually to this again on
the target backup server.
.. _client_garbage-collection: .. _client_garbage-collection:

View File

@ -1,10 +1,10 @@
Backup Protocol Backup Protocol
=============== ===============
Proxmox Backup Server uses a REST-based API. While the management Proxmox Backup Server uses a REST based API. While the management
interface uses normal HTTP, the actual backup and restore interface uses interface use normal HTTP, the actual backup and restore interface use
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
standards, so the following section assumes that you are familiar with standards, so the following section assumes that you are familiar on
how to use them. how to use them.
@ -13,35 +13,35 @@ Backup Protocol API
To start a new backup, the API call ``GET /api2/json/backup`` needs to To start a new backup, the API call ``GET /api2/json/backup`` needs to
be upgraded to a HTTP/2 connection using be upgraded to a HTTP/2 connection using
``proxmox-backup-protocol-v1`` as the protocol name:: ``proxmox-backup-protocol-v1`` as protocol name::
GET /api2/json/backup HTTP/1.1 GET /api2/json/backup HTTP/1.1
UPGRADE: proxmox-backup-protocol-v1 UPGRADE: proxmox-backup-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code, The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on the updated HTTP/2 connection. and you can then issue REST commands on that updated HTTP/2 connection.
The backup protocol allows you to upload three different kind of files: The backup protocol allows you to upload three different kind of files:
- Chunks and blobs (binary data) - Chunks and blobs (binary data)
- Fixed indexes (List of chunks with fixed size) - Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (List of chunks with variable size) - Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to upload such The following section gives a short introduction how to upload such
files. Please use the `API Viewer <api-viewer/index.html>`_ for files. Please use the `API Viewer <api-viewer/index.html>`_ for
details about the available REST commands. details about available REST commands.
Upload Blobs Upload Blobs
~~~~~~~~~~~~ ~~~~~~~~~~~~
Blobs are uploaded using ``POST /blob``. The HTTP body contains the Uploading blobs is done using ``POST /blob``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`. data encoded as :ref:`Data Blob <data-blob-format>`).
The file name must end with ``.blob``, and is automatically added The file name needs to end with ``.blob``, and is automatically added
to the backup manifest, following the call to ``POST /finish``. to the backup manifest.
Upload Chunks Upload Chunks
@ -56,41 +56,40 @@ encoded as :ref:`Data Blob <data-blob-format>`).
Upload Fixed Indexes Upload Fixed Indexes
~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~
Fixed indexes are used to store VM image data. The VM image is split Fixed indexes are use to store VM image data. The VM image is split
into equally sized chunks, which are uploaded individually. The index into equally sized chunks, which are uploaded individually. The index
file simply contains a list of chunk digests. file simply contains a list to chunk digests.
You create a fixed index with ``POST /fixed_index``. Then, upload You create a fixed index with ``POST /fixed_index``. Then upload
chunks with ``POST /fixed_chunk``, and append them to the index with chunks with ``POST /fixed_chunk``, and append them to the index with
``PUT /fixed_index``. When finished, you need to close the index using ``PUT /fixed_index``. When finished, you need to close the index using
``POST /fixed_close``. ``POST /fixed_close``.
The file name needs to end with ``.fidx``, and is automatically added The file name needs to end with ``.fidx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``. to the backup manifest.
Upload Dynamic Indexes Upload Dynamic Indexes
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
Dynamic indexes are used to store file archive data. The archive data Dynamic indexes are use to store file archive data. The archive data
is split into dynamically sized chunks, which are uploaded is split into dynamically sized chunks, which are uploaded
individually. The index file simply contains a list of chunk digests individually. The index file simply contains a list to chunk digests
and offsets. and offsets.
You can create a dynamically sized index with ``POST /dynamic_index``. Then, You create a dynamic sized index with ``POST /dynamic_index``. Then
upload chunks with ``POST /dynamic_chunk``, and append them to the index with upload chunks with ``POST /dynamic_chunk``, and append them to the index with
``PUT /dynamic_index``. When finished, you need to close the index using ``PUT /dynamic_index``. When finished, you need to close the index using
``POST /dynamic_close``. ``POST /dynamic_close``.
The filename needs to end with ``.didx``, and is automatically added The file name needs to end with ``.didx``, and is automatically added
to the backup manifest, following the call to ``POST /finish``. to the backup manifest.
Finish Backup Finish Backup
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
Once you have uploaded all data, you need to call ``POST /finish``. This Once you have uploaded all data, you need to call ``POST
commits all data and ends the backup protocol. /finish``. This commits all data and ends the backup protocol.
Restore/Reader Protocol API Restore/Reader Protocol API
@ -103,39 +102,39 @@ be upgraded to a HTTP/2 connection using
GET /api2/json/reader HTTP/1.1 GET /api2/json/reader HTTP/1.1
UPGRADE: proxmox-backup-reader-protocol-v1 UPGRADE: proxmox-backup-reader-protocol-v1
The server replies with the ``HTTP 101 Switching Protocol`` status code, The server replies with HTTP 101 Switching Protocol status code,
and you can then issue REST commands on that updated HTTP/2 connection. and you can then issue REST commands on that updated HTTP/2 connection.
The reader protocol allows you to download three different kinds of files: The reader protocol allows you to download three different kind of files:
- Chunks and blobs (binary data) - Chunks and blobs (binary data)
- Fixed indexes (list of chunks with fixed size) - Fixed Indexes (List of chunks with fixed size)
- Dynamic indexes (list of chunks with variable size) - Dynamic Indexes (List of chunk with variable size)
The following section provides a short introduction on how to download such The following section gives a short introduction how to download such
files. Please use the `API Viewer <api-viewer/index.html>`_ for details about files. Please use the `API Viewer <api-viewer/index.html>`_ for details about
the available REST commands. available REST commands.
Download Blobs Download Blobs
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
Blobs are downloaded using ``GET /download``. The HTTP body contains the Downloading blobs is done using ``GET /download``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`. data encoded as :ref:`Data Blob <data-blob-format>`.
Download Chunks Download Chunks
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
Chunks are downloaded using ``GET /chunk``. The HTTP body contains the Downloading chunks is done using ``GET /chunk``. The HTTP body contains the
data encoded as :ref:`Data Blob <data-blob-format>`. data encoded as :ref:`Data Blob <data-blob-format>`).
Download Index Files Download Index Files
~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~
Index files are downloaded using ``GET /download``. The HTTP body Downloading index files is done using ``GET /download``. The HTTP body
contains the data encoded as :ref:`Fixed Index <fixed-index-format>` contains the data encoded as :ref:`Fixed Index <fixed-index-format>`
or :ref:`Dynamic Index <dynamic-index-format>`. or :ref:`Dynamic Index <dynamic-index-format>`.

View File

@ -37,7 +37,7 @@ Each field can contain multiple values in the following formats:
* and a combination of the above: e.g., 01,05..10,12/02 * and a combination of the above: e.g., 01,05..10,12/02
* or a `*` for every possible value: e.g., \*:00 * or a `*` for every possible value: e.g., \*:00
There are some special values that have a specific meaning: There are some special values that have specific meaning:
================================= ============================== ================================= ==============================
Value Syntax Value Syntax
@ -81,19 +81,19 @@ Not all features of systemd calendar events are implemented:
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify * no Unix timestamps (e.g. `@12345`): instead use date and time to specify
a specific point in time a specific point in time
* no timezone: all schedules use the timezone of the server * no timezone: all schedules use the set timezone on the server
* no sub-second resolution * no sub-second resolution
* no reverse day syntax (e.g. 2020-03~01) * no reverse day syntax (e.g. 2020-03~01)
* no repetition of ranges (e.g. 1..10/2) * no repetition of ranges (e.g. 1..10/2)
Notes on Scheduling Notes on scheduling
------------------- -------------------
In `Proxmox Backup`_, scheduling for most tasks is done in the In `Proxmox Backup`_ scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules `proxmox-backup-proxy`. This daemon checks all job schedules
every minute, to see if any are due. This means that even though if they are due every minute. This means that even if
`calendar events` can contain seconds, it will only be checked `calendar events` can contain seconds, it will only be checked
once per minute. once a minute.
Also, all schedules will be checked against the timezone set Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server. in the `Proxmox Backup`_ server.

View File

@ -1,333 +0,0 @@
.. _sysadmin_certificate_management:
Certificate Management
----------------------
Access to the API and thus the web-based administration interface is always
encrypted through ``https``. Each `Proxmox Backup`_ host creates by default its
own (self-signed) certificate. This certificate is used for encrypted
communication with the hosts ``proxmox-backup-proxy`` service, for any API
call between a user or backup-client and the web-interface.
Certificate verification when sending backups to a `Proxmox Backup`_ server
is either done based on pinning the certificate fingerprints in the storage/remote
configuration, or by using certificates, signed by a trusted certificate authority.
.. _sysadmin_certs_api_gui:
Certificates for the API and SMTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ stores it certificate and key in:
- ``/etc/proxmox-backup/proxy.pem``
- ``/etc/proxmox-backup/proxy.key``
You have the following options for the certificate:
1. Keep using the default self-signed certificate in
``/etc/proxmox-backup/proxy.pem``.
2. Use an externally provided certificate (for example, signed by a
commercial Certificate Authority (CA)).
3. Use an ACME provider like Lets Encrypt to get a trusted certificate
with automatic renewal; this is also integrated in the `Proxmox Backup`_
API and web interface.
Certificates are managed through the `Proxmox Backup`_
web-interface/API or using the the ``proxmox-backup-manager`` CLI tool.
.. _sysadmin_certs_upload_custom:
Upload Custom Certificate
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have a certificate which you want to use for a Proxmox
Mail Gateway host, you can simply upload that certificate over the web
interface.
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
:align: right
:alt: Upload a custom certificate
Note that any certificate key files must not be password protected.
.. _sysadmin_certs_get_trusted_acme_cert:
Trusted certificates via Lets Encrypt (ACME)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ includes an implementation of the **A**\ utomatic
**C**\ ertificate **M**\ anagement **E**\ nvironment (**ACME**)
protocol, allowing `Proxmox Backup`_ admins to use an ACME provider
like Lets Encrypt for easy setup of TLS certificates, which are
accepted and trusted by modern operating systems and web browsers out of
the box.
Currently, the two ACME endpoints implemented are the `Lets Encrypt
(LE) <https://letsencrypt.org>`_ production and staging environments.
Our ACME client supports validation of ``http-01`` challenges using a
built-in web server and validation of ``dns-01`` challenges using a DNS
plugin supporting all the DNS API endpoints
`acme.sh <https://acme.sh>`_ does.
.. _sysadmin_certs_acme_account:
ACME Account
^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-acme-create-account.png
:align: right
:alt: Create ACME Account
You need to register an ACME account per cluster, with the endpoint you
want to use. The email address used for that account will serve as the
contact point for renewal-due or similar notifications from the ACME
endpoint.
You can register or deactivate ACME accounts over the web interface
``Certificates -> ACME Accounts`` or using the ``proxmox-backup-manager`` command
line tool.
::
proxmox-backup-manager acme account register <account-name> <mail@example.com>
.. tip::
Because of
`rate-limits <https://letsencrypt.org/docs/rate-limits/>`_ you
should use LE ``staging`` for experiments or if you use ACME for the
very first time until all is working there, and only then switch over
to the production directory.
.. _sysadmin_certs_acme_plugins:
ACME Plugins
^^^^^^^^^^^^
The ACME plugins role is to provide automatic verification that you,
and thus the `Proxmox Backup`_ server under your operation, are the
real owner of a domain. This is the basic building block of automatic
certificate management.
The ACME protocol specifies different types of challenges, for example
the ``http-01``, where a web server provides a file with a specific
token to prove that it controls a domain. Sometimes this isnt possible,
either because of technical limitations or if the address of a record is
not reachable from the public internet. The ``dns-01`` challenge can be
used in such cases. This challenge is fulfilled by creating a certain
DNS record in the domains zone.
.. image:: images/screenshots/pbs-gui-acme-create-challenge-plugin.png
:align: right
:alt: Create ACME Account
`Proxmox Backup`_ supports both of those challenge types out of the
box, you can configure plugins either over the web interface under
``Certificates -> ACME Challenges``, or using the
``proxmox-backup-manager acme plugin add`` command.
ACME Plugin configurations are stored in ``/etc/proxmox-backup/acme/plugins.cfg``.
.. _domains:
Domains
^^^^^^^
You can add new or manage existing domain entries under
``Certificates``, or using the ``proxmox-backup-manager`` command.
.. image:: images/screenshots/pbs-gui-acme-add-domain.png
:align: right
:alt: Add a Domain for ACME verification
After configuring the desired domain(s) for a node and ensuring that the
desired ACME account is selected, you can order your new certificate
over the web-interface. On success, the interface will reload after
roughly 10 seconds.
Renewal will happen `automatically <#sysadmin-certs-acme-automatic-renewal>`_
.. _sysadmin_certs_acme_http_challenge:
ACME HTTP Challenge Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~
There is always an implicitly configured ``standalone`` plugin for
validating ``http-01`` challenges via the built-in web server spawned on
port 80.
.. note::
The name ``standalone`` means that it can provide the validation on
its own, without any third party service.
There are a few prerequisites to use this for certificate management
with Lets Encrypts ACME.
- You have to accept the ToS of Lets Encrypt to register an account.
- **Port 80** of the node needs to be reachable from the internet.
- There **must** be no other listener on port 80.
- The requested (sub)domain needs to resolve to a public IP of the
`Proxmox Backup`_ host.
.. _sysadmin_certs_acme_dns_challenge:
ACME DNS API Challenge Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On systems where external access for validation via the ``http-01``
method is not possible or desired, it is possible to use the ``dns-01``
validation method. This validation method requires a DNS server that
allows provisioning of ``TXT`` records via an API.
.. _sysadmin_certs_acme_dns_api_config:
Configuring ACME DNS APIs for validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`Proxmox Backup`_ re-uses the DNS plugins developed for the
``acme.sh`` [1]_ project. Please refer to its documentation for details
on configuration of specific APIs.
The easiest way to configure a new plugin with the DNS API is using the
web interface (``Certificates -> ACME Accounts/Challenges``).
Here you can add a new challenge plugin by selecting your API provider
and entering the credential data to access your account over their API.
.. tip::
See the acme.sh `How to use DNS
API <https://github.com/acmesh-official/acme.sh/wiki/dnsapi#how-to-use-dns-api>`_
wiki for more detailed information about getting API credentials for
your provider. Configuration values do not need to be quoted with
single or double quotes; for some plugins that is even an error.
As there are many DNS providers and API endpoints, `Proxmox Backup`_
automatically generates the form for the credentials, but not all
providers are annotated yet. For those you will see a bigger text area,
into which you simply need to copy all the credentials
``KEY``\ =\ ``VALUE`` pairs.
.. _dns_validation_through_cname_alias:
DNS Validation through CNAME Alias
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A special ``alias`` mode can be used to handle validation on a different
domain/DNS server, in case your primary/real DNS does not support
provisioning via an API. Manually set up a permanent ``CNAME`` record
for ``_acme-challenge.domain1.example`` pointing to
``_acme-challenge.domain2.example``, and set the ``alias`` property in
the `Proxmox Backup`_ node configuration file ``/etc/proxmox-backup/node.cfg``
to ``domain2.example`` to allow the DNS server of ``domain2.example`` to
validate all challenges for ``domain1.example``.
.. _sysadmin_certs_acme_dns_wildcard:
Wildcard Certificates
^^^^^^^^^^^^^^^^^^^^^
Wildcard DNS names start with a ``*.`` prefix and are considered valid
for all (one-level) subdomain names of the verified domain. So a
certificate for ``*.domain.example`` is valid for ``foo.domain.example``
and ``bar.domain.example``, but not for ``baz.foo.domain.example``.
Currently, you can only create wildcard certificates with the `DNS
challenge
type <https://letsencrypt.org/docs/challenge-types/#dns-01-challenge>`_.
.. _combination_of_plugins:
Combination of Plugins
^^^^^^^^^^^^^^^^^^^^^^
Combining ``http-01`` and ``dns-01`` validation is possible in case your
node is reachable via multiple domains with different requirements / DNS
provisioning capabilities. Mixing DNS APIs from multiple providers or
instances is also possible by specifying different plugin instances per
domain.
.. tip::
Accessing the same service over multiple domains increases complexity
and should be avoided if possible.
.. _sysadmin_certs_acme_automatic_renewal:
Automatic renewal of ACME certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If a node has been successfully configured with an ACME-provided
certificate (either via ``proxmox-backup-manager`` or via the web-interface/API), the
certificate will be renewed automatically by the ``proxmox-backup-daily-update.service``.
Currently, renewal is triggered if the certificate either has already
expired or if it will expire in the next 30 days.
.. _manually_change_certificate_over_command_line:
Manually Change Certificate over Command-Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to get rid of certificate verification warnings, you have to
generate a valid certificate for your server.
Log in to your `Proxmox Backup`_ via ssh or use the console:
::
openssl req -newkey rsa:2048 -nodes -keyout key.pem -out req.pem
Follow the instructions on the screen, for example:
::
Country Name (2 letter code) [AU]: AT
State or Province Name (full name) [Some-State]:Vienna
Locality Name (eg, city) []:Vienna
Organization Name (eg, company) [Internet Widgits Pty Ltd]: Proxmox GmbH
Organizational Unit Name (eg, section) []:Proxmox Backup
Common Name (eg, YOUR name) []: yourproxmox.yourdomain.com
Email Address []:support@yourdomain.com
Please enter the following 'extra' attributes to be sent with your certificate request
A challenge password []: not necessary
An optional company name []: not necessary
After you have finished the certificate request, you have to send the
file ``req.pem`` to your Certification Authority (CA). The CA will issue
the certificate (BASE64 encoded), based on your request save this file
as ``cert.pem`` to your `Proxmox Backup`_.
To activate the new certificate, do the following on your `Proxmox Backup`_
::
cp key.pem /etc/proxmox-backup/proxy.key
cp cert.pem /etc/proxmox-backup/proxy.pem
Then restart the API servers:
::
systemctl restart proxmox-backup-proxy
Test your new certificate, using your browser.
.. note::
To transfer files to and from your `Proxmox Backup`_, you can use
secure copy: If your desktop runs Linux, you can use the ``scp``
command line tool. If your desktop PC runs windows, please use an scp
client like WinSCP (see https://winscp.net/).
.. [1]
acme.sh https://github.com/acmesh-official/acme.sh

View File

@ -6,37 +6,22 @@ Command Line Tools
.. include:: proxmox-backup-client/description.rst .. include:: proxmox-backup-client/description.rst
``proxmox-file-restore``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-file-restore/description.rst
``proxmox-backup-manager`` ``proxmox-backup-manager``
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-backup-manager/description.rst .. include:: proxmox-backup-manager/description.rst
``proxmox-tape``
~~~~~~~~~~~~~~~~
.. include:: proxmox-tape/description.rst
``pmt``
~~~~~~~
.. include:: pmt/description.rst
``pmtx``
~~~~~~~~
.. include:: pmtx/description.rst
``pxar`` ``pxar``
~~~~~~~~ ~~~~~~~~
.. include:: pxar/description.rst .. include:: pxar/description.rst
``proxmox-file-restore``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-file-restore/description.rst
``proxmox-backup-debug`` ``proxmox-backup-debug``
~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~
.. include:: proxmox-backup-debug/description.rst .. include:: proxmox-backup-debug/description.rst

View File

@ -10,7 +10,7 @@ Command Syntax
Catalog Shell Commands Catalog Shell Commands
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
The following commands are available in an interactive restore shell: Those command are available when you start an interactive restore shell:
.. code-block:: console .. code-block:: console
@ -51,13 +51,3 @@ The following commands are available in an interactive restore shell:
-------- --------
.. include:: pxar/synopsis.rst .. include:: pxar/synopsis.rst
``proxmox-file-restore``
------------------------
.. include:: proxmox-file-restore/synopsis.rst
``proxmox-backup-debug``
------------------------
.. include:: proxmox-backup-debug/synopsis.rst

View File

@ -77,7 +77,7 @@ project = 'Proxmox Backup'
copyright = '2019-2021, Proxmox Server Solutions GmbH' copyright = '2019-2021, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team' author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as a replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
@ -108,14 +108,11 @@ today_fmt = '%A, %d %B %Y'
exclude_patterns = [ exclude_patterns = [
'_build', 'Thumbs.db', '.DS_Store', '_build', 'Thumbs.db', '.DS_Store',
'*/man1.rst', '*/man1.rst',
'certificate-management.rst',
'config/*/man5.rst', 'config/*/man5.rst',
'epilog.rst', 'epilog.rst',
'pbs-copyright.rst', 'pbs-copyright.rst',
'local-zfs.rst', 'local-zfs.rst'
'package-repositories.rst', 'package-repositories.rst',
'system-booting.rst',
'traffic-control.rst',
] ]
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all

View File

@ -2,13 +2,13 @@ This file contains the access control list for the Proxmox Backup
Server API. Server API.
Each line starts with ``acl:``, followed by 4 additional values Each line starts with ``acl:``, followed by 4 additional values
separated by colon. separated by collon.
:propagate: Propagate permissions down the hierarchy :propagate: Propagate permissions down the hierachrchy
:path: The object path :path: The object path
:User/Token: List of users and tokens :User/Token: List of users and token
:Role: List of assigned roles :Role: List of assigned roles

View File

@ -1,5 +1,5 @@
This file contains a list of datastore configuration sections. Each The file contains a list of datastore configuration sections. Each
section starts with the header ``datastore: <name>``, followed by the section starts with a header ``datastore: <name>``, followed by the
datastore configuration options. datastore configuration options.
:: ::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``pool: <name>``, followed by the Each entry starts with a header ``pool: <name>``, followed by the
media pool configuration options. media pool configuration options.
:: ::

View File

@ -1,6 +1,6 @@
This file contains information used to access remote servers. This file contains information used to access remote servers.
Each entry starts with the header ``remote: <name>``, followed by the Each entry starts with a header ``remote: <name>``, followed by the
remote configuration options. remote configuration options.
:: ::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``sync: <name>``, followed by the Each entry starts with a header ``sync: <name>``, followed by the
job configuration options. job configuration options.
:: ::

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``backup: <name>``, followed by the Each entry starts with a header ``backup: <name>``, followed by the
job configuration options. job configuration options.
:: ::

View File

@ -1,7 +1,7 @@
Each LTO drive configuration section starts with the header ``lto: <name>``, Each LTO drive configuration section starts with a header ``lto: <name>``,
followed by the drive configuration options. followed by the drive configuration options.
Tape changer configurations start with the header ``changer: <name>``, Tape changer configurations starts with ``changer: <name>``,
followed by the changer configuration options. followed by the changer configuration options.
:: ::
@ -18,5 +18,5 @@ followed by the changer configuration options.
You can use the ``proxmox-tape drive`` and ``proxmox-tape changer`` You can use the ``proxmox-tape drive`` and ``proxmox-tape changer``
commands to manipulate this file. commands to manipulate this file.
.. NOTE:: The ``virtual:`` drive type is experimental and should only be used .. NOTE:: The ``virtual:`` drive type is experimental and onyl used
for debugging. for debugging.

View File

@ -1,9 +1,9 @@
This file contains the list of API users and API tokens. This file contains the list of API users and API tokens.
Each user configuration section starts with the header ``user: <name>``, Each user configuration section starts with a header ``user: <name>``,
followed by the user configuration options. followed by the user configuration options.
API token configuration starts with the header ``token: API token configuration starts with a header ``token:
<userid!token_name>``, followed by the token configuration. The data <userid!token_name>``, followed by the token configuration. The data
used to authenticate tokens is stored in a separate file used to authenticate tokens is stored in a separate file
(``token.shadow``). (``token.shadow``).

View File

@ -1,4 +1,4 @@
Each entry starts with the header ``verification: <name>``, followed by the Each entry starts with a header ``verification: <name>``, followed by the
job configuration options. job configuration options.
:: ::

View File

@ -1,7 +1,7 @@
Configuration Files Configuration Files
=================== ===================
All Proxmox Backup Server configuration files reside in the directory All Proxmox Backup Server configuration files resides inside directory
``/etc/proxmox-backup/``. ``/etc/proxmox-backup/``.

View File

@ -35,7 +35,7 @@
.. _ZFS: https://en.wikipedia.org/wiki/ZFS .. _ZFS: https://en.wikipedia.org/wiki/ZFS
.. _Proxmox VE: https://pve.proxmox.com .. _Proxmox VE: https://pve.proxmox.com
.. _RFC3339: https://tools.ietf.org/html/rfc3339 .. _RFC3399: https://tools.ietf.org/html/rfc3339
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date .. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date

View File

@ -29,7 +29,7 @@ How long will my Proxmox Backup Server version be supported?
+=======================+======================+===============+============+====================+ +=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba | |Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+ +-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 | |Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
+-----------------------+----------------------+---------------+------------+--------------------+ +-----------------------+----------------------+---------------+------------+--------------------+
@ -69,6 +69,6 @@ be able to read the data.
Is the backup incremental/deduplicated? Is the backup incremental/deduplicated?
--------------------------------------- ---------------------------------------
With Proxmox Backup Server, backups are sent incrementally to the server, and With Proxmox Backup Server, backups are sent incremental and data is
data is then deduplicated on the server. This minimizes both the storage deduplicated on the server.
consumed and the impact on the network. This minimizes both the storage consumed and the network impact.

View File

@ -14,8 +14,7 @@ Proxmox File Archive Format (``.pxar``)
Data Blob Format (``.blob``) Data Blob Format (``.blob``)
---------------------------- ----------------------------
The data blob format is used to store small binary data. The magic number The data blob format is used to store small binary data. The magic number decides the exact format:
decides the exact format:
.. list-table:: .. list-table::
:widths: auto :widths: auto
@ -33,8 +32,7 @@ decides the exact format:
- encrypted - encrypted
- compressed - compressed
The compression algorithm used is ``zstd``. The encryption cipher is Compression algorithm is ``zstd``. Encryption cipher is ``AES_256_GCM``.
``AES_256_GCM``.
Unencrypted blobs use the following format: Unencrypted blobs use the following format:
@ -45,9 +43,9 @@ Unencrypted blobs use the following format:
* - ``CRC32: [u8; 4]`` * - ``CRC32: [u8; 4]``
* - ``Data: (max 16MiB)`` * - ``Data: (max 16MiB)``
Encrypted blobs additionally contain a 16 byte initialization vector (IV), Encrypted blobs additionally contains a 16 byte IV, followed by a 16
followed by a 16 byte authenticated encryption (AE) tag, followed by the byte Authenticated Encyryption (AE) tag, followed by the encrypted
encrypted data: data:
.. list-table:: .. list-table::
@ -74,19 +72,19 @@ All numbers are stored as little-endian.
* - ``ctime: i64``, * - ``ctime: i64``,
- Creation Time (epoch) - Creation Time (epoch)
* - ``index_csum: [u8; 32]``, * - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(digest1||digest2||...)`` - Sha256 over the index (without header) ``SHA256(digest1||digest2||...)``
* - ``size: u64``, * - ``size: u64``,
- Image size - Image size
* - ``chunk_size: u64``, * - ``chunk_size: u64``,
- Chunk size - Chunk size
* - ``reserved: [u8; 4016]``, * - ``reserved: [u8; 4016]``,
- Overall header size is one page (4096 bytes) - overall header size is one page (4096 bytes)
* - ``digest1: [u8; 32]`` * - ``digest1: [u8; 32]``
- First chunk digest - first chunk digest
* - ``digest2: [u8; 32]`` * - ``digest2: [u8; 32]``
- Second chunk digest - next chunk
* - ... * - ...
- Next chunk digest ... - next chunk ...
.. _dynamic-index-format: .. _dynamic-index-format:
@ -105,16 +103,16 @@ All numbers are stored as little-endian.
* - ``ctime: i64``, * - ``ctime: i64``,
- Creation Time (epoch) - Creation Time (epoch)
* - ``index_csum: [u8; 32]``, * - ``index_csum: [u8; 32]``,
- SHA-256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)`` - Sha256 over the index (without header) ``SHA256(offset1||digest1||offset2||digest2||...)``
* - ``reserved: [u8; 4032]``, * - ``reserved: [u8; 4032]``,
- Overall header size is one page (4096 bytes) - Overall header size is one page (4096 bytes)
* - ``offset1: u64`` * - ``offset1: u64``
- End of first chunk - End of first chunk
* - ``digest1: [u8; 32]`` * - ``digest1: [u8; 32]``
- First chunk digest - first chunk digest
* - ``offset2: u64`` * - ``offset2: u64``
- End of second chunk - End of second chunk
* - ``digest2: [u8; 32]`` * - ``digest2: [u8; 32]``
- Second chunk digest - second chunk digest
* - ... * - ...
- Next chunk offset/digest - next chunk offset/digest

View File

@ -11,7 +11,7 @@ Glossary
`Container`_ `Container`_
A container is an isolated user space. Programs run directly on A container is an isolated user space. Programs run directly on
the host's kernel, but with limited access to the host's resources. the host's kernel, but with limited access to the host resources.
Datastore Datastore
@ -23,19 +23,19 @@ Glossary
Rust is a new, fast and memory-efficient system programming Rust is a new, fast and memory-efficient system programming
language. It has no runtime or garbage collector. Rusts rich type language. It has no runtime or garbage collector. Rusts rich type
system and ownership model guarantee memory-safety and system and ownership model guarantee memory-safety and
thread-safety. This can eliminate many classes of bugs thread-safety. I can eliminate many classes of bugs
at compile-time. at compile-time.
`Sphinx`_ `Sphinx`_
Is a tool that makes it easy to create intelligent and nicely formatted Is a tool that makes it easy to create intelligent and
documentation. It was originally created for the documentation of the beautiful documentation. It was originally created for the
Python programming language. It has excellent facilities for the documentation of the Python programming language. It has excellent facilities for the
documentation of software projects in a range of languages. documentation of software projects in a range of languages.
`reStructuredText`_ `reStructuredText`_
Is an easy-to-read, what-you-see-is-what-you-get, plaintext Is an easy-to-read, what-you-see-is-what-you-get plaintext
markup syntax and parser system. markup syntax and parser system.
`FUSE` `FUSE`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 174 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 132 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -50,7 +50,6 @@ in the section entitled "GNU Free Documentation License".
file-formats.rst file-formats.rst
backup-protocol.rst backup-protocol.rst
calendarevents.rst calendarevents.rst
markdown-primer.rst
glossary.rst glossary.rst
GFDL.rst GFDL.rst

View File

@ -5,11 +5,10 @@ What is Proxmox Backup Server?
------------------------------ ------------------------------
Proxmox Backup Server is an enterprise-class, client-server backup solution that Proxmox Backup Server is an enterprise-class, client-server backup solution that
is capable of backing up :term:`virtual machine<Virtual machine>`\ s, is capable of backing up :term:`virtual machine`\ s, :term:`container`\ s, and
:term:`container<Container>`\ s, and physical hosts. It is specially optimized physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
for the `Proxmox Virtual Environment`_ platform and allows you to back up your platform and allows you to back up your data securely, even between remote
data securely, even between remote sites, providing easy management through a sites, providing easy management through a web-based user interface.
web-based user interface.
It supports deduplication, compression, and authenticated It supports deduplication, compression, and authenticated
encryption (AE_). Using :term:`Rust` as the implementation language guarantees encryption (AE_). Using :term:`Rust` as the implementation language guarantees
@ -35,18 +34,18 @@ For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
integrated client. integrated client.
A single backup is allowed to contain several archives. For example, when you A single backup is allowed to contain several archives. For example, when you
backup a :term:`virtual machine<Virtual machine>`, each disk is stored as a backup a :term:`virtual machine`, each disk is stored as a separate archive
separate archive inside that backup. The VM configuration itself is stored as inside that backup. The VM configuration itself is stored as an extra file.
an extra file. This way, it's easy to access and restore only the important This way, it's easy to access and restore only the important parts of the
parts of the backup, without the need to scan the whole backup. backup, without the need to scan the whole backup.
Main Features Main Features
------------- -------------
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully :Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
supported, and you can easily backup :term:`virtual machine<Virtual machine>`\ s and supported, and you can easily backup :term:`virtual machine`\ s and
:term:`container<Container>`\ s. :term:`container`\ s.
:Performance: The whole software stack is written in :term:`Rust`, :Performance: The whole software stack is written in :term:`Rust`,
in order to provide high speed and memory efficiency. in order to provide high speed and memory efficiency.

View File

@ -191,12 +191,12 @@ With `systemd-boot`:
.. code-block:: console .. code-block:: console
# proxmox-boot-tool format <new ESP> # pve-efiboot-tool format <new disk's ESP>
# proxmox-boot-tool init <new ESP> # pve-efiboot-tool init <new disk's ESP>
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on .. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
bootable disks setup by the `Proxmox Backup`_ installer. For details, see bootable disks setup by the {pve} installer since version 5.4. For details, see
:ref:`Setting up a new partition for use as synced ESP <systembooting-proxmox-boot-setup>`. xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
With `grub`: With `grub`:
@ -211,22 +211,27 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
Activate e-mail notification Activate e-mail notification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ZFS comes with an event daemon ``ZED``, which monitors events generated by the ZFS comes with an event daemon, which monitors events generated by the
ZFS kernel module. The daemon can also send emails on ZFS events like pool ZFS kernel module. The daemon can also send emails on ZFS events like
errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``, pool errors. Newer ZFS packages ship the daemon in a separate package,
which should already be installed by default in `Proxmox Backup`_. and you can install it using `apt-get`:
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc`` with your .. code-block:: console
favorite editor. The required setting for email notification is
``ZED_EMAIL_ADDR``, which is set to ``root`` by default. # apt-get install zfs-zed
To activate the daemon, it is necessary to to uncomment the ZED_EMAIL_ADDR
setting, in the file `/etc/zfs/zed.d/zed.rc`.
.. code-block:: console .. code-block:: console
ZED_EMAIL_ADDR="root" ZED_EMAIL_ADDR="root"
Please note that `Proxmox Backup`_ forwards mails to `root` to the email address Please note that Proxmox Backup forwards mails to `root` to the email address
configured for the root user. configured for the root user.
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
other settings are optional.
Limit ZFS memory usage Limit ZFS memory usage
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
@ -249,7 +254,6 @@ The above example limits the usage to 8 GiB ('8 * 2^30^').
configuration in `/etc/modprobe.d/zfs.conf`, with: configuration in `/etc/modprobe.d/zfs.conf`, with:
.. code-block:: console .. code-block:: console
options zfs zfs_arc_min=8589934591 options zfs zfs_arc_min=8589934591
options zfs zfs_arc_max=8589934592 options zfs zfs_arc_max=8589934592
@ -269,7 +273,8 @@ Swap on ZFS
^^^^^^^^^^^ ^^^^^^^^^^^
Swap-space created on a zvol may cause some issues, such as blocking the Swap-space created on a zvol may cause some issues, such as blocking the
server or generating a high IO load. server or generating a high IO load, often seen when starting a Backup
to an external Storage.
We strongly recommend using enough memory, so that you normally do not We strongly recommend using enough memory, so that you normally do not
run into low memory situations. Should you need or want to add swap, it is run into low memory situations. Should you need or want to add swap, it is
@ -306,20 +311,18 @@ ZFS compression
^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
To activate compression: To activate compression:
.. code-block:: console .. code-block:: console
# zpool set compression=lz4 <pool> # zpool set compression=lz4 <pool>
We recommend using the `lz4` algorithm, since it adds very little CPU overhead. We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
Other algorithms such as `lzjb`, `zstd` and `gzip-N` (where `N` is an integer from `1-9` Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer from `1-9`
representing the compression ratio, where 1 is fastest and 9 is best representing the compression ratio, where 1 is fastest and 9 is best
compression) are also available. Depending on the algorithm and how compression) are also available. Depending on the algorithm and how
compressible the data is, having compression enabled can even increase I/O compressible the data is, having compression enabled can even increase I/O
performance. performance.
You can disable compression at any time with: You can disable compression at any time with:
.. code-block:: console .. code-block:: console
# zfs set compression=off <dataset> # zfs set compression=off <dataset>

View File

@ -173,10 +173,6 @@ scheduled verification, garbage-collection and synchronization tasks results.
By default, notifications are sent to the email address configured for the By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore. `root@pam` user. You can instead set this user for each datastore.
.. image:: images/screenshots/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
You can also change the level of notification received per task type, the You can also change the level of notification received per task type, the
following options are available: following options are available:
@ -186,20 +182,3 @@ following options are available:
* Errors: send a notification for any scheduled task that results in an error * Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all * Never: do not send any notification at all
.. _maintenance_mode:
Maintenance Mode
----------------
Proxmox Backup Server implements setting the `read-only` and `offline`
maintenance modes for a datastore.
Once enabled, depending on the mode, new reads and/or writes to the datastore
are blocked, allowing an administrator to safely execute maintenance tasks, for
example, on the underlying storage.
Internally Proxmox Backup Server tracks whether each datastore access is a
write or read operation, so that it can gracefully enter the respective mode,
by allowing conflicting operations that started before enabling the maintenance
mode to finish.

View File

@ -1,5 +1,5 @@
Managing Remotes & Sync Managing Remotes
======================= ================
.. _backup_remote: .. _backup_remote:
@ -85,125 +85,12 @@ To set up sync jobs, the configuring user needs the following permissions:
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path #. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
#. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``) #. At least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
.. note:: A sync job can only sync backup groups that the configured remote's
user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
snapshots owned by that user/API token can be synced.
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
the local datastore as well. If the ``owner`` option is not set (defaulting to the local datastore as well. If the ``owner`` option is not set (defaulting to
``root@pam``) or is set to something other than the configuring user, ``root@pam``) or is set to something other than the configuring user,
``Datastore.Modify`` is required as well. ``Datastore.Modify`` is required as well.
If the ``group-filter`` option is set, only backup groups matching at least one .. note:: A sync job can only sync backup groups that the configured remote's
of the specified criteria are synced. The available criteria are: user/API token can read. If a remote is configured with a user/API token that
only has ``Datastore.Backup`` privileges, only the limited set of accessible
* backup type, for example to only sync groups of the `ct` (Container) type: snapshots owned by that user/API token can be synced.
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter type:ct
* full group identifier
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100
* regular expression matched against the full group identifier
.. todo:: add example for regex
The same filter is applied to local groups for handling of the
``remove-vanished`` option.
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Namespace Support
^^^^^^^^^^^^^^^^^
Sync jobs can be configured to not only sync datastores, but also sub-sets of
datastores in the form of namespaces or namespace sub-trees. The following
parameters influence how namespaces are treated as part of a sync job
execution:
- ``remote-ns``: the remote namespace anchor (default: the root namespace)
- ``ns``: the local namespace anchor (default: the root namespace)
- ``max-depth``: whether to recursively iterate over sub-namespaces of the remote
namespace anchor (default: `None`)
If ``max-depth`` is set to `0`, groups are synced from ``remote-ns`` into
``ns``, without any recursion. If it is set to `None` (left empty), recursion
depth will depend on the value of ``remote-ns`` and the remote side's
availability of namespace support:
- ``remote-ns`` set to something other than the root namespace: remote *must*
support namespaces, full recursion starting at ``remote-ns``.
- ``remote-ns`` set to root namespace and remote *supports* namespaces: full
recursion starting at root namespace.
- ``remote-ns`` set to root namespace and remote *does not support* namespaces:
backwards-compat mode, only root namespace will be synced into ``ns``, no
recursion.
Any other value of ``max-depth`` will limit recursion to at most ``max-depth``
levels, for example: ``remote-ns`` set to `location_a/department_b` and
``max-depth`` set to `1` will result in `location_a/department_b` and at most
one more level of sub-namespaces being synced.
The namespace tree starting at ``remote-ns`` will be mapped into ``ns`` up to a
depth of ``max-depth``.
For example, with the following namespaces at the remote side:
- `location_a`
- `location_a/department_x`
- `location_a/department_x/team_one`
- `location_a/department_x/team_two`
- `location_a/department_y`
- `location_a/department_y/team_one`
- `location_a/department_y/team_two`
- `location_b`
and ``remote-ns`` being set to `location_a/department_x` and ``ns`` set to
`location_a_dep_x` resulting in the following namespace tree on the sync
target:
- `location_a_dep_x` (containing the remote's `location_a/department_x`)
- `location_a_dep_x/team_one` (containing the remote's `location_a/department_x/team_one`)
- `location_a_dep_x/team_two` (containing the remote's `location_a/department_x/team_two`)
with the rest of the remote namespaces and groups not being synced (by this
sync job).
If a remote namespace is included in the sync job scope, but does not exist
locally, it will be created (provided the sync job owner has sufficient
privileges).
If the ``remove-vanished`` option is set, namespaces that are included in the
sync job scope but only exist locally are treated as vanished and removed
(provided the sync job owner has sufficient privileges).
.. note:: All other limitations on sync scope (such as remote user/API token
privileges, group filters) also apply for sync jobs involving one or
multiple namespaces.
Bandwidth Limit
^^^^^^^^^^^^^^^
Syncing a datastore to an archive can produce lots of traffic and impact other
users of the network. So, to avoid network or storage congestion you can limit
the bandwidth of the sync job by setting the ``rate-in`` option either in the
web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB

View File

@ -1,178 +0,0 @@
.. _markdown-primer:
Markdown Primer
===============
"Markdown is a text-to-HTML conversion tool for web writers. Markdown allows
you to write using an easy-to-read, easy-to-write plain text format, then
convertit to structurally valid XHTML (or HTML)."
-- John Gruber, https://daringfireball.net/projects/markdown/
The Proxmox Backup Server (PBS) web-interface has support for using Markdown to
rendering rich text formatting in node and virtual guest notes.
PBS supports CommonMark with most extensions of GFM (GitHub Flavoured Markdown),
like tables or task-lists.
.. _markdown_basics:
Markdown Basics
---------------
Note that we only describe the basics here, please search the web for more
extensive resources, for example on https://www.markdownguide.org/
Headings
~~~~~~~~
.. code-block:: md
# This is a Heading h1
## This is a Heading h2
##### This is a Heading h5
Emphasis
~~~~~~~~
Use ``*text*`` or ``_text_`` for emphasis.
Use ``**text**`` or ``__text__`` for bold, heavy-weight text.
Combinations are also possible, for example:
.. code-block:: md
_You **can** combine them_
Links
~~~~~
You can use automatic detection of links, for example,
``https://forum.proxmox.com/`` would transform it into a clickable link.
You can also control the link text, for example:
.. code-block:: md
Now, [the part in brackets will be the link text](https://forum.proxmox.com/).
Lists
~~~~~
Unordered Lists
^^^^^^^^^^^^^^^
Use ``*`` or ``-`` for unordered lists, for example:
.. code-block:: md
* Item 1
* Item 2
* Item 2a
* Item 2b
Adding an indentation can be used to created nested lists.
Ordered Lists
^^^^^^^^^^^^^
.. code-block:: md
1. Item 1
1. Item 2
1. Item 3
1. Item 3a
1. Item 3b
NOTE: The integer of ordered lists does not need to be correct, they will be numbered automatically.
Task Lists
^^^^^^^^^^
Task list use a empty box ``[ ]`` for unfinished tasks and a box with an `X` for finished tasks.
For example:
.. code-block:: md
- [X] First task already done!
- [X] Second one too
- [ ] This one is still to-do
- [ ] So is this one
Tables
~~~~~~
Tables use the pipe symbol ``|`` to separate columns, and ``-`` to separate the
table header from the table body, in that separation one can also set the text
alignment, making one column left-, center-, or right-aligned.
.. code-block:: md
| Left columns | Right columns | Some | More | Cols.| Centering Works Too
| ------------- |--------------:|--------|------|------|:------------------:|
| left foo | right foo | First | Row | Here | >center< |
| left bar | right bar | Second | Row | Here | 12345 |
| left baz | right baz | Third | Row | Here | Test |
| left zab | right zab | Fourth | Row | Here | ☁️☁️☁️ |
| left rab | right rab | And | Last | Here | The End |
Note that you do not need to align the columns nicely with white space, but that makes
editing tables easier.
Block Quotes
~~~~~~~~~~~~
You can enter block quotes by prefixing a line with ``>``, similar as in plain-text emails.
.. code-block:: md
> Markdown is a lightweight markup language with plain-text-formatting syntax,
> created in 2004 by John Gruber with Aaron Swartz.
>
>> Markdown is often used to format readme files, for writing messages in online discussion forums,
>> and to create rich text using a plain text editor.
Code and Snippets
~~~~~~~~~~~~~~~~~
You can use backticks to avoid processing for a few word or paragraphs. That is useful for
avoiding that a code or configuration hunk gets mistakenly interpreted as markdown.
Inline code
^^^^^^^^^^^
Surrounding part of a line with single backticks allows to write code inline,
for examples:
.. code-block:: md
This hosts IP address is `10.0.0.1`.
Whole blocks of code
^^^^^^^^^^^^^^^^^^^^
For code blocks spanning several lines you can use triple-backticks to start
and end such a block, for example:
.. code-block:: md
```
# This is the network config I want to remember here
auto vmbr2
iface vmbr2 inet static
address 10.0.0.1/24
bridge-ports ens20
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 2-4094
```

View File

@ -3,10 +3,6 @@
Network Management Network Management
================== ==================
.. image:: images/screenshots/pbs-gui-system-config.png
:align: right
:alt: System and Network Configuration Overview
Proxmox Backup Server provides both a web interface and a command line tool for Proxmox Backup Server provides both a web interface and a command line tool for
network configuration. You can find the configuration options in the web network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu interface under the **Network Interfaces** section of the **Configuration** menu
@ -35,6 +31,10 @@ To get a list of available interfaces, use the following command:
│ ens19 │ eth │ 1 │ manual │ │ │ │ │ ens19 │ eth │ 1 │ manual │ │ │ │
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘ └───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:align: right
:alt: Add a network interface
To add a new network interface, use the ``create`` subcommand with the relevant To add a new network interface, use the ``create`` subcommand with the relevant
parameters. For example, you may want to set up a bond, for the purpose of parameters. For example, you may want to set up a bond, for the purpose of
network redundancy. The following command shows a template for creating the bond shown network redundancy. The following command shows a template for creating the bond shown
@ -44,10 +44,6 @@ in the list above:
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x # proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:align: right
:alt: Add a network interface
You can make changes to the configuration of a network interface with the You can make changes to the configuration of a network interface with the
``update`` subcommand: ``update`` subcommand:
@ -93,5 +89,3 @@ You can also configure DNS settings, from the **DNS** section
of **Configuration** or by using the ``dns`` subcommand of of **Configuration** or by using the ``dns`` subcommand of
``proxmox-backup-manager``. ``proxmox-backup-manager``.
.. include:: traffic-control.rst

View File

@ -1,5 +1,5 @@
Most commands that produce output support the ``--output-format`` Most commands producing output supports the ``--output-format``
parameter. This accepts the following values: parameter. It accepts the following values:
:``text``: Text format (default). Structured data is rendered as a table. :``text``: Text format (default). Structured data is rendered as a table.

View File

@ -27,10 +27,6 @@ update``.
In addition, you need a package repository from Proxmox to get Proxmox Backup In addition, you need a package repository from Proxmox to get Proxmox Backup
updates. updates.
.. image:: images/screenshots/pbs-gui-administration-apt-repos.png
:align: right
:alt: APT Repository Management in the Web Interface
.. _package_repos_secure_apt: .. _package_repos_secure_apt:
SecureApt SecureApt

View File

@ -51,7 +51,7 @@ ENVIRONMENT
:CHANGER: If set, replaces the `--device` option :CHANGER: If set, replaces the `--device` option
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server :PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
configuration to find the associated changer device. configuration to find the associcated changer device.
.. include:: ../pbs-copyright.rst .. include:: ../pbs-copyright.rst

View File

@ -11,13 +11,8 @@ Disk Management
:alt: List of disks :alt: List of disks
Proxmox Backup Server comes with a set of disk utilities, which are Proxmox Backup Server comes with a set of disk utilities, which are
accessed using the ``disk`` subcommand or the web interface. This subcommand accessed using the ``disk`` subcommand. This subcommand allows you to initialize
allows you to initialize disks, create various filesystems, and get information disks, create various filesystems, and get information about the disks.
about the disks.
.. image:: images/screenshots/pbs-gui-disks.png
:align: right
:alt: Web Interface Administration: Disks
To view the disks connected to the system, navigate to **Administration -> To view the disks connected to the system, navigate to **Administration ->
Storage/Disks** in the web interface or use the ``list`` subcommand of Storage/Disks** in the web interface or use the ``list`` subcommand of
@ -95,10 +90,6 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
:term:`Datastore` :term:`Datastore`
----------------- -----------------
.. image:: images/screenshots/pbs-gui-datastore-summary.png
:align: right
:alt: Datastore Usage Overview
A datastore refers to a location at which backups are stored. The current A datastore refers to a location at which backups are stored. The current
implementation uses a directory inside a standard Unix file system (``ext4``, implementation uses a directory inside a standard Unix file system (``ext4``,
``xfs`` or ``zfs``) to store the backup data. ``xfs`` or ``zfs``) to store the backup data.
@ -120,7 +111,7 @@ Datastore Configuration
.. image:: images/screenshots/pbs-gui-datastore-content.png .. image:: images/screenshots/pbs-gui-datastore-content.png
:align: right :align: right
:alt: Datastore Content Overview :alt: Datastore Overview
You can configure multiple datastores. A minimum of one datastore needs to be You can configure multiple datastores. A minimum of one datastore needs to be
configured. The datastore is identified by a simple *name* and points to a configured. The datastore is identified by a simple *name* and points to a
@ -137,7 +128,7 @@ run periodically, based on a configured schedule (see
Creating a Datastore Creating a Datastore
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create.png .. image:: images/screenshots/pbs-gui-datastore-create-general.png
:align: right :align: right
:alt: Create a datastore :alt: Create a datastore
@ -261,57 +252,3 @@ categorized by checksum, after a backup operation has been executed.
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 . 276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
Once you uploaded some backups, or created namespaces, you may see the Backup
Type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
.. _storage_namespaces:
Backup Namespaces
~~~~~~~~~~~~~~~~~
A datastore can host many backups as long as the underlying storage is big
enough and provides the performance required for one's use case.
But, without any hierarchy or separation its easy to run into naming conflicts,
especially when using the same datastore for multiple Proxmox VE instances or
multiple users.
The backup namespace hierarchy allows you to clearly separate different users
or backup sources in general, avoiding naming conflicts and providing
well-organized backup content view.
Each namespace level can host any backup type, CT, VM or Host but also other
namespaces, up to a depth of 8 level, where the root namespace is the first
level.
Namespace Permissions
^^^^^^^^^^^^^^^^^^^^^
You can make the permission configuration of a datastore more fine-grained by
setting permissions only on a specific namespace.
To see a datastore you need permission that has at least one of `AUDIT`,
`MODIFY`, `READ` or `BACKUP` privilege on any namespace it contains.
To create or delete a namespace you require the modify privilege on the parent
namespace. So, to initially create namespaces you need to have a permission
with a access role that includes the `MODIFY` privilege on the datastore itself.
For backup groups the existing privilege rules still apply, you either need a
powerful permission or be the owner of the backup group, nothing changed here.
.. todo:: continue
Options
~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
There are a few per-datastore options:
* :ref:`Notifications <maintenance_notification>`
* :ref:`Maintenance Mode <maintenance_mode>`
* Verification of incoming backups

View File

@ -15,8 +15,10 @@ through that channel. In addition, we provide our own package
repository to roll out all Proxmox related packages. This includes repository to roll out all Proxmox related packages. This includes
updates to some Debian packages when necessary. updates to some Debian packages when necessary.
We also deliver a specially optimized Linux kernel, based on the Ubuntu We also deliver a specially optimized Linux kernel, where we enable
kernel. That kernel includes drivers for ZFS_. all required virtualization and container features. That kernel
includes drivers for ZFS_, as well as several hardware drivers. For example,
we ship Intel network card drivers to support their newest hardware.
The following sections will concentrate on backup related topics. They The following sections will concentrate on backup related topics. They
will explain things which are different on `Proxmox Backup`_, or will explain things which are different on `Proxmox Backup`_, or
@ -26,10 +28,4 @@ please refer to the standard Debian documentation.
.. include:: local-zfs.rst .. include:: local-zfs.rst
.. include:: system-booting.rst
.. include:: certificate-management.rst
.. include:: services.rst .. include:: services.rst
.. include:: command-line-tools.rst

View File

@ -1,379 +0,0 @@
.. _chapter-systembooting:
Host Bootloader
---------------
`Proxmox Backup`_ currently uses one of two bootloaders depending on the disk setup
selected in the installer.
For EFI Systems installed with ZFS as the root filesystem ``systemd-boot`` is
used. All other deployments use the standard ``grub`` bootloader (this usually
also applies to systems which are installed on top of Debian).
.. _systembooting-installer-part-scheme:
Partitioning Scheme Used by the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The `Proxmox Backup`_ installer creates 3 partitions on all disks selected for
installation.
The created partitions are:
* a 1 MB BIOS Boot Partition (gdisk type EF02)
* a 512 MB EFI System Partition (ESP, gdisk type EF00)
* a third partition spanning the set ``hdsize`` parameter or the remaining space
used for the chosen storage type
Systems using ZFS as root filesystem are booted with a kernel and initrd image
stored on the 512 MB EFI System Partition. For legacy BIOS systems, ``grub`` is
used, for EFI systems ``systemd-boot`` is used. Both are installed and configured
to point to the ESPs.
``grub`` in BIOS mode (``--target i386-pc``) is installed onto the BIOS Boot
Partition of all selected disks on all systems booted with ``grub`` (These are
all installs with root on ``ext4`` or ``xfs`` and installs with root on ZFS on
non-EFI systems).
.. _systembooting-proxmox-boot-tool:
Synchronizing the content of the ESP with ``proxmox-boot-tool``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``proxmox-boot-tool`` is a utility used to keep the contents of the EFI System
Partitions properly configured and synchronized. It copies certain kernel
versions to all ESPs and configures the respective bootloader to boot from
the ``vfat`` formatted ESPs. In the context of ZFS as root filesystem this means
that you can use all optional features on your root pool instead of the subset
which is also present in the ZFS implementation in ``grub`` or having to create a
separate small boot-pool (see: `Booting ZFS on root with grub
<https://github.com/zfsonlinux/zfs/wiki/Debian-Stretch-Root-on-ZFS>`_).
In setups with redundancy all disks are partitioned with an ESP, by the
installer. This ensures the system boots even if the first boot device fails
or if the BIOS can only boot from a particular disk.
The ESPs are not kept mounted during regular operation. This helps to prevent
filesystem corruption to the ``vfat`` formatted ESPs in case of a system crash,
and removes the need to manually adapt ``/etc/fstab`` in case the primary boot
device fails.
``proxmox-boot-tool`` handles the following tasks:
* formatting and setting up a new partition
* copying and configuring new kernel images and initrd images to all listed ESPs
* synchronizing the configuration on kernel upgrades and other maintenance tasks
* managing the list of kernel versions which are synchronized
* configuring the boot-loader to boot a particular kernel version (pinning)
You can view the currently configured ESPs and their state by running:
.. code-block:: console
# proxmox-boot-tool status
.. _systembooting-proxmox-boot-setup:
Setting up a new partition for use as synced ESP
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To format and initialize a partition as synced ESP, e.g., after replacing a
failed vdev in an rpool, ``proxmox-boot-tool`` from ``pve-kernel-helper`` can be used.
WARNING: the ``format`` command will format the ``<partition>``, make sure to pass
in the right device/partition!
For example, to format an empty partition ``/dev/sda2`` as ESP, run the following:
.. code-block:: console
# proxmox-boot-tool format /dev/sda2
To setup an existing, unmounted ESP located on ``/dev/sda2`` for inclusion in
`Proxmox Backup`_'s kernel update synchronization mechanism, use the following:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2
Afterwards `/etc/kernel/proxmox-boot-uuids`` should contain a new line with the
UUID of the newly added partition. The ``init`` command will also automatically
trigger a refresh of all configured ESPs.
.. _systembooting-proxmox-boot-refresh:
Updating the configuration on all ESPs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To copy and configure all bootable kernels and keep all ESPs listed in
``/etc/kernel/proxmox-boot-uuids`` in sync you just need to run:
.. code-block:: console
# proxmox-boot-tool refresh
(The equivalent to running ``update-grub`` systems with ``ext4`` or ``xfs`` on root).
This is necessary should you make changes to the kernel commandline, or want to
sync all kernels and initrds.
.. NOTE:: Both ``update-initramfs`` and ``apt`` (when necessary) will automatically
trigger a refresh.
Kernel Versions considered by ``proxmox-boot-tool``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following kernel versions are configured by default:
* the currently running kernel
* the version being newly installed on package updates
* the two latest already installed kernels
* the latest version of the second-to-last kernel series (e.g. 5.0, 5.3), if applicable
* any manually selected kernels
Manually keeping a kernel bootable
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Should you wish to add a certain kernel and initrd image to the list of
bootable kernels use ``proxmox-boot-tool kernel add``.
For example run the following to add the kernel with ABI version ``5.0.15-1-pve``
to the list of kernels to keep installed and synced to all ESPs:
.. code-block:: console
# proxmox-boot-tool kernel add 5.0.15-1-pve
``proxmox-boot-tool kernel list`` will list all kernel versions currently selected
for booting:
.. code-block:: console
# proxmox-boot-tool kernel list
Manually selected kernels:
5.0.15-1-pve
Automatically selected kernels:
5.0.12-1-pve
4.15.18-18-pve
Run ``proxmox-boot-tool kernel remove`` to remove a kernel from the list of
manually selected kernels, for example:
.. code-block:: console
# proxmox-boot-tool kernel remove 5.0.15-1-pve
.. NOTE:: It's required to run ``proxmox-boot-tool refresh`` to update all EFI System
Partitions (ESPs) after a manual kernel addition or removal from above.
.. _systembooting-determine-bootloader:
Determine which Bootloader is Used
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/boot-grub.png
:target: _images/boot-grub.png
:align: left
:alt: Grub boot screen
The simplest and most reliable way to determine which bootloader is used, is to
watch the boot process of the `Proxmox Backup`_ node.
You will either see the blue box of ``grub`` or the simple black on white
``systemd-boot``.
.. image:: images/screenshots/boot-systemdboot.png
:target: _images/boot-systemdboot.png
:align: right
:alt: systemd-boot screen
Determining the bootloader from a running system might not be 100% accurate. The
safest way is to run the following command:
.. code-block:: console
# efibootmgr -v
If it returns a message that EFI variables are not supported, ``grub`` is used in
BIOS/Legacy mode.
If the output contains a line that looks similar to the following, ``grub`` is
used in UEFI mode.
.. code-block:: console
Boot0005* proxmox [...] File(\EFI\proxmox\grubx64.efi)
If the output contains a line similar to the following, ``systemd-boot`` is used.
.. code-block:: console
Boot0006* Linux Boot Manager [...] File(\EFI\systemd\systemd-bootx64.efi)
By running:
.. code-block:: console
# proxmox-boot-tool status
you can find out if ``proxmox-boot-tool`` is configured, which is a good
indication of how the system is booted.
.. _systembooting-grub:
Grub
~~~~
``grub`` has been the de-facto standard for booting Linux systems for many years
and is quite well documented
(see the `Grub Manual
<https://www.gnu.org/software/grub/manual/grub/grub.html>`_).
Configuration
^^^^^^^^^^^^^
Changes to the ``grub`` configuration are done via the defaults file
``/etc/default/grub`` or config snippets in ``/etc/default/grub.d``. To regenerate
the configuration file after a change to the configuration run:
.. code-block:: console
# update-grub
.. NOTE:: Systems using ``proxmox-boot-tool`` will call
``proxmox-boot-tool refresh`` upon ``update-grub``
.. _systembooting-systemdboot:
Systemd-boot
~~~~~~~~~~~~
``systemd-boot`` is a lightweight EFI bootloader. It reads the kernel and initrd
images directly from the EFI Service Partition (ESP) where it is installed.
The main advantage of directly loading the kernel from the ESP is that it does
not need to reimplement the drivers for accessing the storage. In `Proxmox
Backup`_ :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
keep the configuration on the ESPs synchronized.
.. _systembooting-systemd-boot-config:
Configuration
^^^^^^^^^^^^^
``systemd-boot`` is configured via the file ``loader/loader.conf`` in the root
directory of an EFI System Partition (ESP). See the ``loader.conf(5)`` manpage
for details.
Each bootloader entry is placed in a file of its own in the directory
``loader/entries/``
An example entry.conf looks like this (``/`` refers to the root of the ESP):
.. code-block:: console
title Proxmox
version 5.0.15-1-pve
options root=ZFS=rpool/ROOT/pve-1 boot=zfs
linux /EFI/proxmox/5.0.15-1-pve/vmlinuz-5.0.15-1-pve
initrd /EFI/proxmox/5.0.15-1-pve/initrd.img-5.0.15-1-pve
.. _systembooting-edit-kernel-cmdline:
Editing the Kernel Commandline
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can modify the kernel commandline in the following places, depending on the
bootloader used:
Grub
^^^^
The kernel commandline needs to be placed in the variable
``GRUB_CMDLINE_LINUX_DEFAULT`` in the file ``/etc/default/grub``. Running
``update-grub`` appends its content to all ``linux`` entries in
``/boot/grub/grub.cfg``.
Systemd-boot
^^^^^^^^^^^^
The kernel commandline needs to be placed as one line in ``/etc/kernel/cmdline``.
To apply your changes, run ``proxmox-boot-tool refresh``, which sets it as the
``option`` line for all config files in ``loader/entries/proxmox-*.conf``.
.. _systembooting-kernel-pin:
Override the Kernel-Version for next Boot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To select a kernel that is not currently the default kernel, you can either:
* use the boot loader menu that is displayed at the beginning of the boot
process
* use the ``proxmox-boot-tool`` to ``pin`` the system to a kernel version either
once or permanently (until pin is reset).
This should help you work around incompatibilities between a newer kernel
version and the hardware.
.. NOTE:: Such a pin should be removed as soon as possible so that all current
security patches of the latest kernel are also applied to the system.
For example: To permanently select the version ``5.15.30-1-pve`` for booting you
would run:
.. code-block:: console
# proxmox-boot-tool kernel pin 5.15.30-1-pve
.. TIP:: The pinning functionality works for all `Proxmox Backup`_ systems, not only those using
``proxmox-boot-tool`` to synchronize the contents of the ESPs, if your system
does not use ``proxmox-boot-tool`` for synchronizing you can also skip the
``proxmox-boot-tool refresh`` call in the end.
You can also set a kernel version to be booted on the next system boot only.
This is for example useful to test if an updated kernel has resolved an issue,
which caused you to ``pin`` a version in the first place:
.. code-block:: console
# proxmox-boot-tool kernel pin 5.15.30-1-pve --next-boot
To remove any pinned version configuration use the ``unpin`` subcommand:
.. code-block:: console
# proxmox-boot-tool kernel unpin
While ``unpin`` has a ``--next-boot`` option as well, it is used to clear a pinned
version set with ``--next-boot``. As that happens already automatically on boot,
invonking it manually is of little use.
After setting, or clearing pinned versions you also need to synchronize the
content and configuration on the ESPs by running the ``refresh`` subcommand.
.. TIP:: You will be prompted to automatically do for ``proxmox-boot-tool`` managed
systems if you call the tool interactively.
.. code-block:: console
# proxmox-boot-tool refresh

View File

@ -500,7 +500,7 @@ a single media pool, so a job only uses tapes from that pool.
is less space efficient, because the media from the last set is less space efficient, because the media from the last set
may not be fully written, leaving the remaining space unused. may not be fully written, leaving the remaining space unused.
The advantage is that this produces media sets of minimal The advantage is that this procudes media sets of minimal
size. Small sets are easier to handle, can be moved more conveniently size. Small sets are easier to handle, can be moved more conveniently
to an off-site vault, and can be restored much faster. to an off-site vault, and can be restored much faster.
@ -519,9 +519,8 @@ a single media pool, so a job only uses tapes from that pool.
This balances between space efficiency and media count. This balances between space efficiency and media count.
.. NOTE:: Retention period starts on the creation time of the next .. NOTE:: Retention period starts when the calendar event
media-set or, if that does not exist, when the calendar event triggers.
triggers the next time after the current media-set start time.
Additionally, the following events may allocate a new media set: Additionally, the following events may allocate a new media set:
@ -565,6 +564,13 @@ a single media pool, so a job only uses tapes from that pool.
the password. Please make sure to remember the password, in case the password. Please make sure to remember the password, in case
you need to restore the key. you need to restore the key.
.. NOTE:: We use global content namespace, meaning we do not store the
source datastore name. Because of this, it is impossible to distinguish
store1:/vm/100 from store2:/vm/100. Please use different media pools
if the sources are from different namespaces with conflicting names
(for example, if the sources are from different Proxmox VE clusters).
.. image:: images/screenshots/pbs-gui-tape-pools-add.png .. image:: images/screenshots/pbs-gui-tape-pools-add.png
:align: right :align: right
:alt: Tape Backup: Add a media pool :alt: Tape Backup: Add a media pool
@ -681,16 +687,6 @@ To remove a job, please use:
# proxmox-tape backup-job remove job2 # proxmox-tape backup-job remove job2
By default, all (recursive) namespaces of the datastore are included in a tape
backup. You can specify a single namespace with ``ns`` and a depth with
``max-depth``. For example:
.. code-block:: console
# proxmox-tape backup-job update job2 --ns mynamespace --max-depth 3
If no `max-depth` is given, it will include all recursive namespaces.
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png .. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
:align: right :align: right
:alt: Tape Backup: Add a backup job :alt: Tape Backup: Add a backup job
@ -807,16 +803,6 @@ The following options are available:
media set into import-export slots. The operator can then pick up media set into import-export slots. The operator can then pick up
those tapes and move them to a media vault. those tapes and move them to a media vault.
--ns The namespace to backup.
If you only want to backup a specific namespace. If omitted, the root
namespaces is assumed.
--max-depth The depth to recurse namespaces.
``0`` means no recursion at all (only the given namespace). If omitted,
all namespaces are recursed (below the the given one).
Restore from Tape Restore from Tape
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
@ -851,53 +837,6 @@ data disk (datastore):
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore # proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
Single Snapshot Restore
^^^^^^^^^^^^^^^^^^^^^^^
Sometimes it is not necessary to restore a whole media-set, but only some
specific snapshots from the tape. This can be achieved with the ``snapshots``
parameter:
.. code-block:: console
// proxmox-tape restore <media-set-uuid> <datastore> [<snapshot>]
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore sourcestore:host/hostname/2022-01-01T00:01:00Z
This first restores the snapshot to a temporary location, then restores the relevant
chunk archives, and finally restores the snapshot data to the target datastore.
The ``snapshot`` parameter can be given multiple times, so one can restore
multiple snapshots with one restore action.
.. NOTE:: When using the single snapshot restore, the tape must be traversed
more than once, which, if you restore many snapshots at once, can take longer
than restoring the whole datastore.
Namespaces
^^^^^^^^^^
It is also possible to select and map specific namespaces from a media-set
during a restore. This is possible with the ``namespaces`` parameter.
The format of the parameter is
.. code-block:: console
store=<source-datastore>[,source=<source-ns>][,target=<target-ns>][,max-depth=<depth>]
If ``source`` or ``target`` is not given, the root namespace is assumed.
When no ``max-depth`` is given, the source namespace will be fully recursed.
An example restore command:
.. code-block:: console
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore --namespaces store=sourcedatastore,source=ns1,target=ns2,max-depth=2
The parameter can be given multiple times. It can also be combined with the
``snapshots`` parameter to only restore those snapshots and map them to different
namespaces.
Update Inventory Update Inventory
~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~
@ -1039,76 +978,3 @@ This command does the following:
- run drive cleaning operation - run drive cleaning operation
- unload the cleaning tape (to slot 3) - unload the cleaning tape (to slot 3)
Example Setups
--------------
Here are a few example setups for how to manage media pools and schedules.
This is not an exhaustive list, and there are many more possible combinations
of useful settings.
Single Continued Media Set
~~~~~~~~~~~~~~~~~~~~~~~~~~
The most simple setup: always continue the media-set and never expire.
Allocation policy:
continue
Retention policy:
keep
This setup has the advantage of being easy to manage and is re-using the benefits
from deduplication as much as possible. But, it's also prone to a failure of
any single tape, which would render all backups referring to chunks from that
tape unusable.
If you want to start a new media-set manually, you can set the currently
writable media of the set either to 'full', or set the location to an
offsite vault.
Weekday Scheme
~~~~~~~~~~~~~~
A slightly more complex scheme, where the goal is to have an independent
tape or media set for each weekday, for example from Monday to Friday.
This can be solved by having a separate media pool for each day, so 'Monday',
'Tuesday', etc.
Allocation policy:
should be 'mon' for the 'Monday' pool, 'tue' for the Tuesday pool and so on.
Retention policy:
overwrite
There should be a (or more) tape-backup jobs for each pool on the corresponding
weekday. This scheme is still very manageable with one media set per weekday,
and could be easily moved off-site.
Multiple Pools with Different Policies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Complex setups are also possible with multiple media pools configured with
different allocation and retention policies.
An example would be to have two media pools. The first configured with weekly
allocation and a few weeks of retention:
Allocation policy:
mon
Retention policy:
3 weeks
The second pool configured yearly allocation that does not expire:
Allocation policy:
yearly
Retention policy:
keep
In combination with suited prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the current
backups on smaller media sets that get expired every three plus the current
week (~ 4 weeks).

View File

@ -61,15 +61,6 @@ The manifest contains a list of all backed up files, and their
sizes and checksums. It is used to verify the consistency of a sizes and checksums. It is used to verify the consistency of a
backup. backup.
Backup Namespace
----------------
Namespaces allow for the reuse of a single chunk store deduplication domain for
multiple sources, while avoiding naming conflicts and getting more fine-grained
access control.
Essentially they're implemented as simple directory structure and need no
separate configuration.
Backup Type Backup Type
----------- -----------
@ -77,14 +68,13 @@ Backup Type
The backup server groups backups by *type*, where *type* is one of: The backup server groups backups by *type*, where *type* is one of:
``vm`` ``vm``
This type is used for :term:`virtual machine<Virtual machine>`\ s. It This type is used for :term:`virtual machine`\ s. It typically
typically consists of the virtual machine's configuration file and an image consists of the virtual machine's configuration file and an image archive
archive for each disk. for each disk.
``ct`` ``ct``
This type is used for :term:`container<Container>`\ s. It consists of the This type is used for :term:`container`\ s. It consists of the container's
container's configuration and a single file archive for the filesystem's configuration and a single file archive for the filesystem's contents.
contents.
``host`` ``host``
This type is used for file/directory backups created from within a machine. This type is used for file/directory backups created from within a machine.
@ -92,25 +82,25 @@ The backup server groups backups by *type*, where *type* is one of:
or container. Such backups may contain file and image archives; there are no or container. Such backups may contain file and image archives; there are no
restrictions in this regard. restrictions in this regard.
Backup ID Backup ID
--------- ---------
A unique ID for a specific Backup Type and Backup Namespace. Usually the A unique ID. Usually the virtual machine or container ID. ``host``
virtual machine or container ID. ``host`` type backups normally use the type backups normally use the hostname.
hostname.
Backup Time Backup Time
----------- -----------
The time when the backup was made with second resolution. The time when the backup was made.
Backup Group Backup Group
------------ ------------
The tuple ``<type>/<id>`` is called a backup group. Such a group may contain The tuple ``<type>/<ID>`` is called a backup group. Such a group
one or more backup snapshots. may contain one or more backup snapshots.
.. _term_backup_snapshot: .. _term_backup_snapshot:
@ -126,7 +116,7 @@ uniquely identifies a specific backup within a datastore.
vm/104/2019-10-09T08:01:06Z vm/104/2019-10-09T08:01:06Z
host/elsa/2019-11-08T09:48:14Z host/elsa/2019-11-08T09:48:14Z
As you can see, the time format is RFC3339_ with Coordinated As you can see, the time format is RFC3399_ with Coordinated
Universal Time (UTC_, identified by the trailing *Z*). Universal Time (UTC_, identified by the trailing *Z*).

View File

@ -1,101 +0,0 @@
.. _sysadmin_traffic_control:
Traffic Control
---------------
.. image:: images/screenshots/pbs-gui-traffic-control-add.png
:align: right
:alt: Add a traffic control limit
Creating and restoring backups can produce lots of traffic and impact other
users of the network or shared storages.
Proxmox Backup Server allows to limit network traffic for clients within
specified networks using a token bucket filter (TBF).
This allows you to avoid network congestion or to prioritize traffic from
certain hosts.
You can manage the traffic controls either over the web-interface or using the
``traffic-control`` commandos of the ``proxmox-backup-manager`` command-line
tool.
.. note:: Sync jobs on the server are not affected by its rate-in limits. If
you want to limit the incoming traffic that a pull-based sync job
generates, you need to setup a job-specific rate-in limit. See
:ref:`syncjobs`.
The following command adds a traffic control rule to limit all IPv4 clients
(network ``0.0.0.0/0``) to 100 MB/s:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule0 --network 0.0.0.0/0 \
--rate-in 100MB --rate-out 100MB \
--comment "Default rate limit (100MB/s) for all clients"
.. note:: To limit both IPv4 and IPv6 network spaces you need to pass two
network parameters ``::/0`` and ``0.0.0.0/0``.
It is possible to restrict rules to certain time frames, for example the
company office hours:
.. tip:: You can use SI (base 10: KB, MB, ...) or IEC (base 2: KiB, MiB, ...)
units.
.. code-block:: console
# proxmox-backup-manager traffic-control update rule0 \
--timeframe "mon..fri 8-12" \
--timeframe "mon..fri 14:30-18"
If there are more rules, the server uses the rule with the smaller network. For
example, we can overwrite the setting for our private network (and the server
itself) with:
.. code-block:: console
# proxmox-backup-manager traffic-control create rule1 \
--network 192.168.2.0/24 \
--network 127.0.0.0/8 \
--rate-in 20GB --rate-out 20GB \
--comment "Use 20GB/s for the local network"
.. note:: The behavior is undefined if there are several rules for the same network.
If there are multiple rules that match the same network all of them will be
applied, which means that the smallest one wins, as it's bucket fills up the
fastest.
To list the current rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control list
┌───────┬─────────────┬─────────────┬─────────────────────────┬────────────...─┐
│ name │ rate-in │ rate-out │ network │ timeframe ... │
╞═══════╪═════════════╪═════════════╪═════════════════════════╪════════════...═╡
│ rule0 │ 100 MB │ 100 MB │ ["0.0.0.0/0"] │ ["mon..fri ... │
├───────┼─────────────┼─────────────┼─────────────────────────┼────────────...─┤
│ rule1 │ 20 GB │ 20 GB │ ["192.168.2.0/24", ...] │ ... │
└───────┴─────────────┴─────────────┴─────────────────────────┴────────────...─┘
Rules can also be removed:
.. code-block:: console
# proxmox-backup-manager traffic-control remove rule1
To show the state (current data rate) of all configured rules use:
.. code-block:: console
# proxmox-backup-manager traffic-control traffic
┌───────┬─────────────┬──────────────┐
│ name │ cur-rate-in │ cur-rate-out │
╞═══════╪═════════════╪══════════════╡
│ rule0 │ 0 B │ 0 B │
├───────┼─────────────┼──────────────┤
│ rule1 │ 1.161 GiB │ 19.146 KiB │
└───────┴─────────────┴──────────────┘

View File

@ -157,133 +157,34 @@ Access Control
-------------- --------------
By default, new users and API tokens do not have any permissions. Instead you By default, new users and API tokens do not have any permissions. Instead you
need to specify what is allowed and what is not. need to specify what is allowed and what is not. You can do this by assigning
roles to users/tokens on specific objects, like datastores or remotes. The
Proxmox Backup Server uses a role and path based permission management system. following roles exist:
An entry in the permissions table allows a user, group or token to take on a
specific role when accessing an 'object' or 'path'. This means that such an
access rule can be represented as a triple of '(path, user, role)', '(path,
group, role)' or '(path, token, role)', with the role containing a set of
allowed actions, and the path representing the target of these actions.
Privileges
~~~~~~~~~~
Privileges are the atoms that access roles are made off. They are internally
used to enforce the actual permission checks in the API.
We currently support the following privileges:
**Sys.Audit**
Sys.Audit allows one to know about the system and its status.
**Sys.Modify**
Sys.Modify allows one to modify system-level configuration and apply updates.
**Sys.PowerManagement**
Sys.Modify allows one to to poweroff or reboot the system.
**Datastore.Audit**
Datastore.Audit allows one to know about a datastore, including reading the
configuration entry and listing its contents.
**Datastore.Allocate**
Datastore.Allocate allows one to create or deleting datastores.
**Datastore.Modify**
Datastore.Modify allows one to modify a datastore and its contents, and to
create or delete namespaces inside a datastore.
**Datastore.Read**
Datastore.Read allows one to read arbitrary backup contents, independent of
the backup group owner.
**Datastore.Verify**
Allows verifying the backup snapshots in a datastore.
**Datastore.Backup**
Datastore.Backup allows one create new backup snapshot and gives one also the
privileges of Datastore.Read and Datastore.Verify, but only if the backup
group is owned by the user or one of its tokens.
**Datastore.Prune**
Datastore.Prune allows one to delete snapshots, but additionally requires
backup ownership
**Permissions.Modify**
Permissions.Modify allows one to modifying ACLs
.. note:: One can always configure privileges for their own API tokens, as
they will clamped by the users privileges anyway.
**Remote.Audit**
Remote.Audit allows one to read the remote and the sync configuration entries
**Remote.Modify**
Remote.Modify allows one to modify the remote configuration
**Remote.Read**
Remote.Read allows one to read data from a configured `Remote`
**Sys.Console**
Sys.Console allows one to access to the system's console, note that for all
but `root@pam` a valid system login is still required.
**Tape.Audit**
Tape.Audit allows one to read the configuration and status of tape drives,
changers and backups
**Tape.Modify**
Tape.Modify allows one to modify the configuration of tape drives, changers
and backups
**Tape.Write**
Tape.Write allows one to write to a tape media
**Tape.Read**
Tape.Read allows one to read tape backup configuration and contents from a
tape media
**Realm.Allocate**
Realm.Allocate allows one to view, create, modify and delete authentication
realms for users
Access Roles
~~~~~~~~~~~~
An access role combines one or more privileges into something that can be
assigned to an user or API token on an object path.
Currently there are only built-in roles, that means, you cannot create your
own, custom role.
The following roles exist:
**NoAccess** **NoAccess**
Disable Access - nothing is allowed. Disable Access - nothing is allowed.
**Admin** **Admin**
Can do anything, on the object path assigned. Can do anything.
**Audit** **Audit**
Can view the status and configuration of things, but is not allowed to change Can view things, but is not allowed to change settings.
settings.
**DatastoreAdmin** **DatastoreAdmin**
Can do anything on *existing* datastores. Can do anything on datastores.
**DatastoreAudit** **DatastoreAudit**
Can view datastore metrics, settings and list content. But is not allowed to Can view datastore settings and list content. But
read the actual data. is not allowed to read the actual data.
**DatastoreReader** **DatastoreReader**
Can inspect a datastore's or namespaces content and do restores. Can Inspect datastore content and do restores.
**DatastoreBackup** **DatastoreBackup**
Can backup and restore owned backups. Can backup and restore owned backups.
**DatastorePowerUser** **DatastorePowerUser**
Can backup, restore, and prune *owned* backups. Can backup, restore, and prune owned backups.
**RemoteAdmin** **RemoteAdmin**
Can do anything on remotes. Can do anything on remotes.
@ -294,62 +195,19 @@ The following roles exist:
**RemoteSyncOperator** **RemoteSyncOperator**
Is allowed to read data from a remote. Is allowed to read data from a remote.
**TapeAdmin** **TapeAudit**
Can view tape related configuration and status
**TapeAdministrat**
Can do anything related to tape backup Can do anything related to tape backup
**TapeAudit**
Can view tape related metrics, configuration and status
**TapeOperator** **TapeOperator**
Can do tape backup and restore, but cannot change any configuration Can do tape backup and restore (but no configuration changes)
**TapeReader** **TapeReader**
Can read and inspect tape configuration and media content Can read and inspect tape configuration and media content
Objects and Paths .. image:: images/screenshots/pbs-gui-user-management-add-user.png
~~~~~~~~~~~~~~~~~
Access permissions are assigned to objects, such as a datastore, a namespace or
some system resources.
We use file system like paths to address these objects. These paths form a
natural tree, and permissions of higher levels (shorter paths) can optionally
be propagated down within this hierarchy.
Paths can be templated, that means they can refer to the actual id of an
configuration entry. When an API call requires permissions on a templated
path, the path may contain references to parameters of the API call. These
references are specified in curly braces.
Some examples are:
* `/datastore`: Access to *all* datastores on a Proxmox Backup server
* `/datastore/{store}`: Access to a specific datastore on a Proxmox Backup
server
* `/datastore/{store}/{ns}`: Access to a specific namespace on a specific
datastore
* `/remote`: Access to all remote entries
* `/system/network`: Access to configuring the host network
* `/tape/`: Access to tape devices, pools and jobs
* `/access/users`: User administration
* `/access/openid/{id}`: Administrative access to a specific OpenID Connect realm
Inheritance
^^^^^^^^^^^
As mentioned earlier, object paths form a file system like tree, and
permissions can be inherited by objects down that tree through the propagate
flag, which is set by default. We use the following inheritance rules:
* Permissions for API tokens are always clamped to the one of the user.
* Permissions on deeper, more specific levels replace those inherited from an
upper level.
Configuration & Management
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-permissions-add.png
:align: right :align: right
:alt: Add permissions for user :alt: Add permissions for user

View File

@ -3,6 +3,7 @@ use anyhow::Error;
// chacha20-poly1305 // chacha20-poly1305
fn rate_test(name: &str, bench: &dyn Fn() -> usize) { fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
print!("{:<20} ", name); print!("{:<20} ", name);
let start = std::time::SystemTime::now(); let start = std::time::SystemTime::now();
@ -13,19 +14,20 @@ fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
loop { loop {
bytes += bench(); bytes += bench();
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
if elapsed > duration { if elapsed > duration { break; }
break;
}
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
println!("{:>8.1} MB/s", (bytes as f64) / (elapsed * 1024.0 * 1024.0)); println!("{:>8.1} MB/s", (bytes as f64)/(elapsed*1024.0*1024.0));
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let input = proxmox_sys::linux::random_data(1024 * 1024)?;
let input = proxmox::sys::linux::random_data(1024*1024)?;
rate_test("crc32", &|| { rate_test("crc32", &|| {
let mut crchasher = crc32fast::Hasher::new(); let mut crchasher = crc32fast::Hasher::new();
@ -44,23 +46,35 @@ fn main() -> Result<(), Error> {
input.len() input.len()
}); });
let key = proxmox_sys::linux::random_data(32)?; let key = proxmox::sys::linux::random_data(32)?;
let iv = proxmox_sys::linux::random_data(16)?; let iv = proxmox::sys::linux::random_data(16)?;
let cipher = openssl::symm::Cipher::aes_256_gcm(); let cipher = openssl::symm::Cipher::aes_256_gcm();
rate_test("aes-256-gcm", &|| { rate_test("aes-256-gcm", &|| {
let mut tag = [0u8; 16]; let mut tag = [0u8;16];
openssl::symm::encrypt_aead(cipher, &key, Some(&iv), b"", &input, &mut tag).unwrap(); openssl::symm::encrypt_aead(
cipher,
&key,
Some(&iv),
b"",
&input,
&mut tag).unwrap();
input.len() input.len()
}); });
let cipher = openssl::symm::Cipher::chacha20_poly1305(); let cipher = openssl::symm::Cipher::chacha20_poly1305();
rate_test("chacha20-poly1305", &|| { rate_test("chacha20-poly1305", &|| {
let mut tag = [0u8; 16]; let mut tag = [0u8;16];
openssl::symm::encrypt_aead(cipher, &key, Some(&iv[..12]), b"", &input, &mut tag).unwrap(); openssl::symm::encrypt_aead(
cipher,
&key,
Some(&iv[..12]),
b"",
&input,
&mut tag).unwrap();
input.len() input.len()
}); });

View File

@ -1,7 +1,7 @@
use anyhow::Error; use anyhow::{Error};
use proxmox_router::cli::*;
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_router::cli::*;
#[api( #[api(
input: { input: {
@ -16,7 +16,9 @@ use proxmox_schema::*;
/// Echo command. Print the passed text. /// Echo command. Print the passed text.
/// ///
/// Returns: nothing /// Returns: nothing
fn echo_command(text: String) -> Result<(), Error> { fn echo_command(
text: String,
) -> Result<(), Error> {
println!("{}", text); println!("{}", text);
Ok(()) Ok(())
} }
@ -35,7 +37,9 @@ fn echo_command(text: String) -> Result<(), Error> {
/// Hello command. /// Hello command.
/// ///
/// Returns: nothing /// Returns: nothing
fn hello_command(verbose: Option<bool>) -> Result<(), Error> { fn hello_command(
verbose: Option<bool>,
) -> Result<(), Error> {
if verbose.unwrap_or(false) { if verbose.unwrap_or(false) {
println!("Hello, how are you!"); println!("Hello, how are you!");
} else { } else {
@ -50,6 +54,7 @@ fn hello_command(verbose: Option<bool>) -> Result<(), Error> {
/// ///
/// Returns: nothing /// Returns: nothing
fn quit_command() -> Result<(), Error> { fn quit_command() -> Result<(), Error> {
println!("Goodbye."); println!("Goodbye.");
std::process::exit(0); std::process::exit(0);
@ -59,9 +64,8 @@ fn cli_definition() -> CommandLineInterface {
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()
.insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND)) .insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND))
.insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND)) .insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND))
.insert( .insert("echo", CliCommand::new(&API_METHOD_ECHO_COMMAND)
"echo", .arg_param(&["text"])
CliCommand::new(&API_METHOD_ECHO_COMMAND).arg_param(&["text"]),
) )
.insert_help(); .insert_help();
@ -69,6 +73,7 @@ fn cli_definition() -> CommandLineInterface {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let helper = CliHelper::new(cli_definition()); let helper = CliHelper::new(cli_definition());
let mut rl = rustyline::Editor::<CliHelper>::new(); let mut rl = rustyline::Editor::<CliHelper>::new();

View File

@ -2,14 +2,15 @@ use std::io::Write;
use anyhow::Error; use anyhow::Error;
use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_api_types::Authid;
use pbs_client::{BackupReader, HttpClient, HttpClientOptions}; use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter { pub struct DummyWriter {
bytes: usize, bytes: usize,
} }
impl Write for DummyWriter { impl Write for DummyWriter {
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> { fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
self.bytes += data.len(); self.bytes += data.len();
Ok(data.len()) Ok(data.len())
@ -20,7 +21,9 @@ impl Write for DummyWriter {
} }
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
let host = "localhost"; let host = "localhost";
let auth_id = Authid::root_auth_id(); let auth_id = Authid::root_auth_id();
@ -33,15 +36,8 @@ async fn run() -> Result<(), Error> {
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?; let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start( let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
client, .await?;
None,
"store2",
&BackupNamespace::root(),
&(BackupType::Host, "elsa".to_string(), backup_time).into(),
true,
)
.await?;
let start = std::time::SystemTime::now(); let start = std::time::SystemTime::now();
@ -54,19 +50,16 @@ async fn run() -> Result<(), Error> {
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
println!( println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
"Downloaded {} bytes, {} MB/s",
bytes,
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
);
Ok(()) Ok(())
} }
fn main() { fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) { if let Err(err) = pbs_runtime::main(run()) {
eprintln!("ERROR: {}", err); eprintln!("ERROR: {}", err);
} }
println!("DONE"); println!("DONE");

View File

@ -1,6 +1,6 @@
use std::io::Write;
use std::path::PathBuf;
use std::thread; use std::thread;
use std::path::PathBuf;
use std::io::Write;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
@ -19,15 +19,15 @@ use anyhow::{bail, Error};
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992) // Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
fn create_large_file(path: PathBuf) { fn create_large_file(path: PathBuf) {
println!("TEST {:?}", path); println!("TEST {:?}", path);
let mut file = std::fs::OpenOptions::new() let mut file = std::fs::OpenOptions::new()
.write(true) .write(true)
.create_new(true) .create_new(true)
.open(&path) .open(&path).unwrap();
.unwrap();
let buffer = vec![0u8; 64 * 1024]; let buffer = vec![0u8; 64*1024];
loop { loop {
for _ in 0..64 { for _ in 0..64 {
@ -40,6 +40,7 @@ fn create_large_file(path: PathBuf) {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let base = PathBuf::from("dyntest1"); let base = PathBuf::from("dyntest1");
let _ = std::fs::create_dir(&base); let _ = std::fs::create_dir(&base);

View File

@ -69,7 +69,7 @@ fn send_request(
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) pbs_runtime::main(run())
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {

View File

@ -69,7 +69,7 @@ fn send_request(
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) pbs_runtime::main(run())
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {

View File

@ -9,7 +9,7 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir; use pbs_buildcfg::configdir;
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) pbs_runtime::main(run())
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {

View File

@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) pbs_runtime::main(run())
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {

View File

@ -2,7 +2,7 @@ extern crate proxmox_backup;
// also see https://www.johndcook.com/blog/standard_deviation/ // also see https://www.johndcook.com/blog/standard_deviation/
use anyhow::Error; use anyhow::{Error};
use std::io::{Read, Write}; use std::io::{Read, Write};
use pbs_datastore::Chunker; use pbs_datastore::Chunker;
@ -21,6 +21,7 @@ struct ChunkWriter {
} }
impl ChunkWriter { impl ChunkWriter {
fn new(chunk_size: usize) -> Self { fn new(chunk_size: usize) -> Self {
ChunkWriter { ChunkWriter {
chunker: Chunker::new(chunk_size), chunker: Chunker::new(chunk_size),
@ -36,6 +37,7 @@ impl ChunkWriter {
} }
fn record_stat(&mut self, chunk_size: f64) { fn record_stat(&mut self, chunk_size: f64) {
self.chunk_count += 1; self.chunk_count += 1;
if self.chunk_count == 1 { if self.chunk_count == 1 {
@ -43,30 +45,28 @@ impl ChunkWriter {
self.m_new = chunk_size; self.m_new = chunk_size;
self.s_old = 0.0; self.s_old = 0.0;
} else { } else {
self.m_new = self.m_old + (chunk_size - self.m_old) / (self.chunk_count as f64); self.m_new = self.m_old + (chunk_size - self.m_old)/(self.chunk_count as f64);
self.s_new = self.s_old + (chunk_size - self.m_old) * (chunk_size - self.m_new); self.s_new = self.s_old +
(chunk_size - self.m_old)*(chunk_size - self.m_new);
// set up for next iteration // set up for next iteration
self.m_old = self.m_new; self.m_old = self.m_new;
self.s_old = self.s_new; self.s_old = self.s_new;
} }
let variance = if self.chunk_count > 1 { let variance = if self.chunk_count > 1 {
self.s_new / ((self.chunk_count - 1) as f64) self.s_new/((self.chunk_count -1)as f64)
} else { } else { 0.0 };
0.0
};
let std_deviation = variance.sqrt(); let std_deviation = variance.sqrt();
let deviation_per = (std_deviation * 100.0) / self.m_new; let deviation_per = (std_deviation*100.0)/self.m_new;
println!( println!("COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%", self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize);
"COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%",
self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize
);
} }
} }
impl Write for ChunkWriter { impl Write for ChunkWriter {
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> { fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
let chunker = &mut self.chunker; let chunker = &mut self.chunker;
let pos = chunker.scan(data); let pos = chunker.scan(data);
@ -80,6 +80,7 @@ impl Write for ChunkWriter {
self.last_chunk = self.chunk_offset; self.last_chunk = self.chunk_offset;
Ok(pos) Ok(pos)
} else { } else {
self.chunk_offset += data.len(); self.chunk_offset += data.len();
Ok(data.len()) Ok(data.len())
@ -92,23 +93,23 @@ impl Write for ChunkWriter {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let mut file = std::fs::File::open("/dev/urandom")?; let mut file = std::fs::File::open("/dev/urandom")?;
let mut bytes = 0; let mut bytes = 0;
let mut buffer = [0u8; 64 * 1024]; let mut buffer = [0u8; 64*1024];
let mut writer = ChunkWriter::new(4096 * 1024); let mut writer = ChunkWriter::new(4096*1024);
loop { loop {
file.read_exact(&mut buffer)?; file.read_exact(&mut buffer)?;
bytes += buffer.len(); bytes += buffer.len();
writer.write_all(&buffer)?; writer.write_all(&buffer)?;
if bytes > 1024 * 1024 * 1024 { if bytes > 1024*1024*1024 { break; }
break;
}
} }
Ok(()) Ok(())

View File

@ -3,16 +3,17 @@ extern crate proxmox_backup;
use pbs_datastore::Chunker; use pbs_datastore::Chunker;
fn main() { fn main() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
for i in 0..20 * 1024 * 1024 { for i in 0..20*1024*1024 {
for j in 0..4 { for j in 0..4 {
let byte = ((i >> (j << 3)) & 0xff) as u8; let byte = ((i >> (j<<3))&0xff) as u8;
//println!("BYTE {}", byte); //println!("BYTE {}", byte);
buffer.push(byte); buffer.push(byte);
} }
} }
let mut chunker = Chunker::new(64 * 1024); let mut chunker = Chunker::new(64*1024);
let count = 5; let count = 5;
@ -38,14 +39,11 @@ fn main() {
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
let mbytecount = ((count * buffer.len()) as f64) / (1024.0 * 1024.0); let mbytecount = ((count*buffer.len()) as f64) / (1024.0*1024.0);
let avg_chunk_size = mbytecount / (chunk_count as f64); let avg_chunk_size = mbytecount/(chunk_count as f64);
let mbytes_per_sec = mbytecount / elapsed; let mbytes_per_sec = mbytecount/elapsed;
println!( println!("SPEED = {} MB/s, avg chunk size = {} KB", mbytes_per_sec, avg_chunk_size*1024.0);
"SPEED = {} MB/s, avg chunk size = {} KB",
mbytes_per_sec,
avg_chunk_size * 1024.0
);
} }

View File

@ -1,4 +1,4 @@
use anyhow::Error; use anyhow::{Error};
use futures::*; use futures::*;
extern crate proxmox_backup; extern crate proxmox_backup;
@ -13,12 +13,13 @@ use pbs_client::ChunkStream;
// Note: I can currently get about 830MB/s // Note: I can currently get about 830MB/s
fn main() { fn main() {
if let Err(err) = proxmox_async::runtime::main(run()) { if let Err(err) = pbs_runtime::main(run()) {
panic!("ERROR: {}", err); panic!("ERROR: {}", err);
} }
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
let file = tokio::fs::File::open("random-test.dat").await?; let file = tokio::fs::File::open("random-test.dat").await?;
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
@ -33,7 +34,7 @@ async fn run() -> Result<(), Error> {
let mut repeat = 0; let mut repeat = 0;
let mut stream_len = 0; let mut stream_len = 0;
while let Some(chunk) = chunk_stream.try_next().await? { while let Some(chunk) = chunk_stream.try_next().await? {
if chunk.len() > 16 * 1024 * 1024 { if chunk.len() > 16*1024*1024 {
panic!("Chunk too large {}", chunk.len()); panic!("Chunk too large {}", chunk.len());
} }
@ -43,19 +44,10 @@ async fn run() -> Result<(), Error> {
println!("Got chunk {}", chunk.len()); println!("Got chunk {}", chunk.len());
} }
let speed = let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
((stream_len * 1_000_000) / (1024 * 1024)) / (start_time.elapsed().as_micros() as usize); println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed);
println!( println!("Average chunk size was {} bytes.", stream_len/repeat);
"Uploaded {} chunks in {} seconds ({} MB/s).", println!("time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
repeat,
start_time.elapsed().as_secs(),
speed
);
println!("Average chunk size was {} bytes.", stream_len / repeat);
println!(
"time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);
Ok(()) Ok(())
} }

View File

@ -1,9 +1,10 @@
use anyhow::Error; use anyhow::{Error};
use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions}; use pbs_api_types::Authid;
async fn upload_speed() -> Result<f64, Error> { async fn upload_speed() -> Result<f64, Error> {
let host = "localhost"; let host = "localhost";
let datastore = "store2"; let datastore = "store2";
@ -17,16 +18,7 @@ async fn upload_speed() -> Result<f64, Error> {
let backup_time = proxmox_time::epoch_i64(); let backup_time = proxmox_time::epoch_i64();
let client = BackupWriter::start( let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
client,
None,
datastore,
&BackupNamespace::root(),
&(BackupType::Host, "speedtest".to_string(), backup_time).into(),
false,
true,
)
.await?;
println!("start upload speed test"); println!("start upload speed test");
let res = client.upload_speedtest(true).await?; let res = client.upload_speedtest(true).await?;
@ -34,8 +26,8 @@ async fn upload_speed() -> Result<f64, Error> {
Ok(res) Ok(res)
} }
fn main() { fn main() {
match proxmox_async::runtime::main(upload_speed()) { match pbs_runtime::main(upload_speed()) {
Ok(mbs) => { Ok(mbs) => {
println!("average upload speed: {} MB/s", mbs); println!("average upload speed: {} MB/s", mbs);
} }

View File

@ -7,15 +7,19 @@ description = "general API type helpers for PBS"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
hex = "0.4.3"
lazy_static = "1.4" lazy_static = "1.4"
percent-encoding = "2.1" libc = "0.2"
regex = "1.5.5" nix = "0.19.1"
openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_plain = "1"
proxmox = "0.14.0"
proxmox-lang = "1.0.0" proxmox-lang = "1.0.0"
proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] } proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] }
proxmox-serde = "0.1" proxmox-time = "1.0.0"
proxmox-time = "1.1.1"
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" }
proxmox-systemd = { path = "../proxmox-systemd" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -73,17 +73,6 @@ constnamedbitmap! {
} }
} }
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions /// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser /// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX; pub const ROLE_ADMIN: u64 = u64::MAX;

View File

@ -1,78 +0,0 @@
//! Predefined Regular Expressions
//!
//! This is a collection of useful regular expressions
use lazy_static::lazy_static;
use regex::Regex;
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
/// Returns the regular expression string to match IPv4 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
/// Returns the regular expression string to match IPv6 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6RE { () => (concat!(r"(?:",
r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
}
/// Returns the regular expression string to match IP addresses (v4 or v6)
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
/// them, while for IPv4 they are forbidden.
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE_BRACKET { () => (
concat!(r"(?:",
IPV4RE!(),
r"|\[(?:",
IPV6RE!(),
r")\]",
r")"))
}
lazy_static! {
pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
pub static ref IP_BRACKET_REGEX: Regex =
Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
pub static ref SYSTEMD_DATETIME_REGEX: Regex =
Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
}
#[test]
fn test_regexes() {
assert!(IP_REGEX.is_match("127.0.0.1"));
assert!(IP_REGEX.is_match("::1"));
assert!(IP_REGEX.is_match("2014:b3a::27"));
assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
assert!(IP_BRACKET_REGEX.is_match("[::1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
}

View File

@ -5,6 +5,8 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
#[api(default: "encrypt")] #[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@ -33,9 +35,6 @@ impl Fingerprint {
pub fn bytes(&self) -> &[u8; 32] { pub fn bytes(&self) -> &[u8; 32] {
&self.bytes &self.bytes
} }
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
} }
/// Display as short key ID /// Display as short key ID
@ -51,45 +50,8 @@ impl std::str::FromStr for Fingerprint {
fn from_str(s: &str) -> Result<Self, Error> { fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string(); let mut tmp = s.to_string();
tmp.retain(|c| c != ':'); tmp.retain(|c| c != ':');
let mut bytes = [0u8; 32]; let bytes = proxmox::tools::hex_to_digest(&tmp)?;
hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes)) Ok(Fingerprint::new(bytes))
} }
} }
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>()
.join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,358 +0,0 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
/// Size units for byte sizes
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum SizeUnit {
Byte,
// SI (base 10)
KByte,
MByte,
GByte,
TByte,
PByte,
// IEC (base 2)
Kibi,
Mebi,
Gibi,
Tebi,
Pebi,
}
impl SizeUnit {
/// Returns the scaling factor
pub fn factor(&self) -> f64 {
match self {
SizeUnit::Byte => 1.0,
// SI (base 10)
SizeUnit::KByte => 1_000.0,
SizeUnit::MByte => 1_000_000.0,
SizeUnit::GByte => 1_000_000_000.0,
SizeUnit::TByte => 1_000_000_000_000.0,
SizeUnit::PByte => 1_000_000_000_000_000.0,
// IEC (base 2)
SizeUnit::Kibi => 1024.0,
SizeUnit::Mebi => 1024.0 * 1024.0,
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
}
}
/// gets the biggest possible unit still having a value greater zero before the decimal point
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
if binary {
let bits = 64 - (size as u64).leading_zeros();
match bits {
51.. => SizeUnit::Pebi,
41..=50 => SizeUnit::Tebi,
31..=40 => SizeUnit::Gibi,
21..=30 => SizeUnit::Mebi,
11..=20 => SizeUnit::Kibi,
_ => SizeUnit::Byte,
}
} else if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else {
SizeUnit::Byte
}
}
}
/// Returns the string repesentation
impl std::fmt::Display for SizeUnit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SizeUnit::Byte => write!(f, "B"),
// SI (base 10)
SizeUnit::KByte => write!(f, "KB"),
SizeUnit::MByte => write!(f, "MB"),
SizeUnit::GByte => write!(f, "GB"),
SizeUnit::TByte => write!(f, "TB"),
SizeUnit::PByte => write!(f, "PB"),
// IEC (base 2)
SizeUnit::Kibi => write!(f, "KiB"),
SizeUnit::Mebi => write!(f, "MiB"),
SizeUnit::Gibi => write!(f, "GiB"),
SizeUnit::Tebi => write!(f, "TiB"),
SizeUnit::Pebi => write!(f, "PiB"),
}
}
}
/// Strips a trailing SizeUnit inclusive trailing whitespace
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
fn strip_unit(v: &str) -> (&str, SizeUnit) {
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
let (v, binary) = match v.strip_suffix('i') {
Some(n) => (n, true),
None => (v, false),
};
let mut unit = SizeUnit::Byte;
#[rustfmt::skip]
let value = v.strip_suffix(|c: char| match c {
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
// binary (IEC recommended) variants
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
_ => false
}).unwrap_or(v).trim_end();
(value, unit)
}
/// Byte size which can be displayed in a human friendly way
#[derive(Debug, Copy, Clone, UpdaterType)]
pub struct HumanByte {
/// The siginficant value, it does not includes any factor of the `unit`
size: f64,
/// The scale/unit of the value
unit: SizeUnit,
}
fn verify_human_byte(s: &str) -> Result<(), Error> {
match s.parse::<HumanByte>() {
Ok(_) => Ok(()),
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
}
}
impl ApiType for HumanByte {
const API_SCHEMA: Schema = StringSchema::new(
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
)
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
.min_length(1)
.max_length(64)
.schema();
}
impl HumanByte {
/// Create instance with size and unit (size must be positive)
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
if size < 0.0 {
bail!("byte size may not be negative");
}
Ok(HumanByte { size, unit })
}
/// Create a new instance with optimal binary unit computed
pub fn new_binary(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, true);
HumanByte {
size: size / unit.factor(),
unit,
}
}
/// Create a new instance with optimal decimal unit computed
pub fn new_decimal(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, false);
HumanByte {
size: size / unit.factor(),
unit,
}
}
/// Returns the size as u64 number of bytes
pub fn as_u64(&self) -> u64 {
self.as_f64() as u64
}
/// Returns the size as f64 number of bytes
pub fn as_f64(&self) -> f64 {
self.size * self.unit.factor()
}
/// Returns a copy with optimal binary unit computed
pub fn auto_scale_binary(self) -> Self {
HumanByte::new_binary(self.as_f64())
}
/// Returns a copy with optimal decimal unit computed
pub fn auto_scale_decimal(self) -> Self {
HumanByte::new_decimal(self.as_f64())
}
}
impl From<u64> for HumanByte {
fn from(v: u64) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl From<usize> for HumanByte {
fn from(v: usize) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl std::fmt::Display for HumanByte {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let precision = f.precision().unwrap_or(3) as f64;
let precision_factor = 1.0 * 10.0_f64.powf(precision);
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
let size = ((self.size * precision_factor).round()) / precision_factor;
write!(f, "{} {}", size, self.unit)
}
}
impl std::str::FromStr for HumanByte {
type Err = Error;
fn from_str(v: &str) -> Result<Self, Error> {
let (v, unit) = strip_unit(v);
HumanByte::with_unit(v.parse()?, unit)
}
}
proxmox_serde::forward_deserialize_to_from_str!(HumanByte);
proxmox_serde::forward_serialize_to_display!(HumanByte);
#[test]
fn test_human_byte_parser() -> Result<(), Error> {
assert!("-10".parse::<HumanByte>().is_err()); // negative size
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
let h: HumanByte = v.parse()?;
if h.size != size {
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
}
if h.unit != unit {
bail!(
"got unexpected unit for '{}' ({:?} != {:?})",
v,
h.unit,
unit
);
}
let new = h.to_string();
if &new != as_str {
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
}
Ok(())
}
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
match do_test(v, size, unit, as_str) {
Ok(_) => true,
Err(err) => {
eprintln!("{}", err); // makes debugging easier
false
}
}
}
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
let h: HumanByte = "1.2345678".parse()?;
assert_eq!(&format!("{:.0}", h), "1 B");
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
assert_eq!(&format!("{:.1}", h), "1.2 B");
assert_eq!(&format!("{:.2}", h), "1.23 B");
assert_eq!(&format!("{:.3}", h), "1.235 B");
assert_eq!(&format!("{:.4}", h), "1.2346 B");
assert_eq!(&format!("{:.5}", h), "1.23457 B");
assert_eq!(&format!("{:.6}", h), "1.234568 B");
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
assert!(test(
"987654321",
987654321.0,
SizeUnit::Byte,
"987654321 B"
));
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
Ok(())
}
#[test]
fn test_human_byte_auto_unit_decimal() {
fn convert(b: u64) -> String {
HumanByte::new_decimal(b as f64).to_string()
}
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1.022 KB");
assert_eq!(convert(9_000), "9 KB");
assert_eq!(convert(1_000), "1 KB");
assert_eq!(convert(1_000_000), "1 MB");
assert_eq!(convert(1_000_000_000), "1 GB");
assert_eq!(convert(1_000_000_000_000), "1 TB");
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
}
#[test]
fn test_human_byte_auto_unit_binary() {
fn convert(b: u64) -> String {
HumanByte::from(b).to_string()
}
assert_eq!(convert(0), "0 B");
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1022 B");
assert_eq!(convert(9_000), "8.789 KiB");
assert_eq!(convert(10_000_000), "9.537 MiB");
assert_eq!(convert(10_000_000_000), "9.313 GiB");
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
assert_eq!(convert(1 << 10), "1 KiB");
assert_eq!(convert((1 << 10) * 10), "10 KiB");
assert_eq!(convert(1 << 20), "1 MiB");
assert_eq!(convert(1 << 30), "1 GiB");
assert_eq!(convert(1 << 40), "1 TiB");
assert_eq!(convert(1 << 50), "1 PiB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
}

View File

@ -1,24 +1,18 @@
use anyhow::format_err;
use std::str::FromStr;
use regex::Regex;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use crate::{ use crate::{
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
}; };
const_regex! { const_regex!{
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
} }
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
@ -27,41 +21,34 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.max_length(32) .max_length(32)
.schema(); .schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.") pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
.format(&ApiStringFormat::VerifyFn( "Run sync job at specified schedule.")
proxmox_time::verify_calendar_event, .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
))
.type_text("<calendar-event>") .type_text("<calendar-event>")
.schema(); .schema();
pub const GC_SCHEDULE_SCHEMA: Schema = pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Run garbage collection job at specified schedule.") "Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn( .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>") .type_text("<calendar-event>")
.schema(); .schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Run verify job at specified schedule.") "Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn( .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
proxmox_time::verify_calendar_event, .type_text("<calendar-event>")
)) .schema();
.type_text("<calendar-event>")
.schema(); pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.", "Delete vanished backups. This remove the local copy if the remote backup was deleted.")
) .default(true)
.default(false) .schema();
.schema();
#[api( #[api(
properties: { properties: {
@ -87,17 +74,17 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Default)] #[derive(Serialize,Deserialize,Default)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Job Scheduling Status /// Job Scheduling Status
pub struct JobScheduleStatus { pub struct JobScheduleStatus {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub next_run: Option<i64>, pub next_run: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_state: Option<String>, pub last_run_state: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_upid: Option<String>, pub last_run_upid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_endtime: Option<i64>, pub last_run_endtime: Option<i64>,
} }
@ -141,23 +128,20 @@ pub struct DatastoreNotify {
pub sync: Option<Notify>, pub sync: Option<Notify>,
} }
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Datastore notification setting") "Datastore notification setting")
.format(&ApiStringFormat::PropertyString( .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
&DatastoreNotify::API_SCHEMA, .schema();
))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.", "Do not verify backups that are already verified if their verification is not outdated.")
) .default(true)
.default(true) .schema();
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'") "Days after that a verification becomes outdated")
.minimum(0) .minimum(1)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
@ -183,53 +167,29 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
optional: true, optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA, schema: VERIFICATION_SCHEDULE_SCHEMA,
}, },
ns: {
optional: true,
schema: BACKUP_NAMESPACE_SCHEMA,
},
"max-depth": {
optional: true,
schema: crate::NS_MAX_DEPTH_SCHEMA,
},
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Verification Job /// Verification Job
pub struct VerificationJobConfig { pub struct VerificationJobConfig {
/// unique ID to address this job /// unique ID to address this job
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
/// the datastore ID this verification job affects /// the datastore ID this verificaiton job affects
pub store: String, pub store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter /// if not set to false, check the age of the last snapshot verification to filter
/// out recent ones, depending on 'outdated_after' configuration. /// out recent ones, depending on 'outdated_after' configuration.
pub ignore_verified: Option<bool>, pub ignore_verified: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
pub outdated_after: Option<i64>, pub outdated_after: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// when to schedule this job in calendar event notation /// when to schedule this job in calendar event notation
pub schedule: Option<String>, pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// on which backup namespace to run the verification recursively
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
pub max_depth: Option<usize>,
}
impl VerificationJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
} }
#[api( #[api(
@ -242,8 +202,8 @@ impl VerificationJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Status of Verification Job /// Status of Verification Job
pub struct VerificationJobStatus { pub struct VerificationJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -282,42 +242,24 @@ pub struct VerificationJobStatus {
optional: true, optional: true,
type: Userid, type: Userid,
}, },
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: crate::NS_MAX_DEPTH_SCHEMA,
optional: true,
},
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Tape Backup Job Setup /// Tape Backup Job Setup
pub struct TapeBackupJobSetup { pub struct TapeBackupJobSetup {
pub store: String, pub store: String,
pub pool: String, pub pool: String,
pub drive: String, pub drive: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub eject_media: Option<bool>, pub eject_media: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub export_media_set: Option<bool>, pub export_media_set: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub latest_only: Option<bool>, pub latest_only: Option<bool>,
/// Send job email notification to this user /// Send job email notification to this user
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>, pub notify_user: Option<Userid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub max_depth: Option<usize>,
} }
#[api( #[api(
@ -338,17 +280,17 @@ pub struct TapeBackupJobSetup {
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Tape Backup Job /// Tape Backup Job
pub struct TapeBackupJobConfig { pub struct TapeBackupJobConfig {
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
#[serde(flatten)] #[serde(flatten)]
pub setup: TapeBackupJobSetup, pub setup: TapeBackupJobSetup,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>, pub schedule: Option<String>,
} }
@ -362,8 +304,8 @@ pub struct TapeBackupJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Status of Tape Backup Job /// Status of Tape Backup Job
pub struct TapeBackupJobStatus { pub struct TapeBackupJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -371,62 +313,10 @@ pub struct TapeBackupJobStatus {
#[serde(flatten)] #[serde(flatten)]
pub status: JobScheduleStatus, pub status: JobScheduleStatus,
/// Next tape used (best guess) /// Next tape used (best guess)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub next_media_label: Option<String>, pub next_media_label: Option<String>,
} }
#[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum GroupFilter {
/// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(BackupType),
/// Full identifier of BackupGroup, including type
Group(String),
/// A regular expression matched against the full identifier of the BackupGroup
Regex(Regex),
}
impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(':') {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())),
Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
}.map_err(|err| format_err!("'{}' - {}", s, err))
}
}
// used for serializing below, caution!
impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
proxmox_serde::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
}
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
#[api( #[api(
properties: { properties: {
id: { id: {
@ -435,10 +325,6 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
store: { store: {
schema: DATASTORE_SCHEMA, schema: DATASTORE_SCHEMA,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
"owner": { "owner": {
type: Authid, type: Authid,
optional: true, optional: true,
@ -449,71 +335,37 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
"remote-store": { "remote-store": {
schema: DATASTORE_SCHEMA, schema: DATASTORE_SCHEMA,
}, },
"remote-ns": {
type: BackupNamespace,
optional: true,
},
"remove-vanished": { "remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA, schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true, optional: true,
}, },
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
comment: { comment: {
optional: true, optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA, schema: SINGLE_LINE_COMMENT_SCHEMA,
}, },
limit: {
type: RateLimitConfig,
},
schedule: { schedule: {
optional: true, optional: true,
schema: SYNC_SCHEDULE_SCHEMA, schema: SYNC_SCHEDULE_SCHEMA,
}, },
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Sync Job /// Sync Job
pub struct SyncJobConfig { pub struct SyncJobConfig {
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
pub store: String, pub store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>, pub owner: Option<Authid>,
pub remote: String, pub remote: String,
pub remote_store: String, pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub remove_vanished: Option<bool>, pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>, pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
}
impl SyncJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
} }
#[api( #[api(
@ -526,8 +378,9 @@ impl SyncJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Status of Sync Job /// Status of Sync Job
pub struct SyncJobStatus { pub struct SyncJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -535,186 +388,3 @@ pub struct SyncJobStatus {
#[serde(flatten)] #[serde(flatten)]
pub status: JobScheduleStatus, pub status: JobScheduleStatus,
} }
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
/// call to prune a specific group, where `max-depth` makes no sense.
#[api(
properties: {
"keep-last": {
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
impl KeepOptions {
pub fn keeps_something(&self) -> bool {
self.keep_last.unwrap_or(0)
+ self.keep_hourly.unwrap_or(0)
+ self.keep_daily.unwrap_or(0)
+ self.keep_weekly.unwrap_or(0)
+ self.keep_monthly.unwrap_or(0)
+ self.keep_yearly.unwrap_or(0)
> 0
}
}
#[api(
properties: {
keep: {
type: KeepOptions,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
#[serde(flatten)]
pub keep: KeepOptions,
/// The (optional) recursion depth
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
}
impl PruneJobOptions {
pub fn keeps_something(&self) -> bool {
self.keep.keeps_something()
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
match &self.ns {
Some(ns) => ns.acl_path(store),
None => vec!["datastore", store],
}
}
}
#[api(
properties: {
disable: {
type: Boolean,
optional: true,
default: false,
},
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
options: {
type: PruneJobOptions,
},
},
)]
#[derive(Deserialize, Serialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
pub store: String,
/// Disable this job.
#[serde(default, skip_serializing_if = "is_false")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
pub disable: bool,
pub schedule: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(flatten)]
pub options: PruneJobOptions,
}
impl PruneJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
self.options.acl_path(&self.store)
}
}
fn is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
config: {
type: PruneJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {
#[serde(flatten)]
pub config: PruneJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -39,7 +39,7 @@ impl Default for Kdf {
/// Encryption Key Information /// Encryption Key Information
pub struct KeyInfo { pub struct KeyInfo {
/// Path to key (if stored in a file) /// Path to key (if stored in a file)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>, pub path: Option<String>,
pub kdf: Kdf, pub kdf: Kdf,
/// Key creation time /// Key creation time
@ -47,9 +47,10 @@ pub struct KeyInfo {
/// Key modification time /// Key modification time
pub modified: i64, pub modified: i64,
/// Key fingerprint /// Key fingerprint
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>, pub fingerprint: Option<String>,
/// Password hint /// Password hint
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub hint: Option<String>, pub hint: Option<String>,
} }

View File

@ -1,14 +1,12 @@
//! Basic API types used by most of the PBS code. //! Basic API types used by most of the PBS code.
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use anyhow::bail;
pub mod common_regex;
pub mod percent_encoding;
use proxmox_schema::{ use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
}; };
use proxmox_time::parse_daily_duration; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
@ -26,62 +24,26 @@ macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
#[macro_export] #[macro_export]
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") } macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_RE {
() => (
concat!("(?:",
"(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!(),
")?")
);
}
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_PATH_RE {
() => (
concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/")
);
}
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => ( () => (
concat!( concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
)
); );
} }
#[rustfmt::skip]
#[macro_export]
macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
() => {
concat!(
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?",
)
};
}
mod acl; mod acl;
pub use acl::*; pub use acl::*;
mod datastore; mod datastore;
pub use datastore::*; pub use datastore::*;
mod human_byte;
pub use human_byte::HumanByte;
mod jobs; mod jobs;
pub use jobs::*; pub use jobs::*;
mod key_derivation; mod key_derivation;
pub use key_derivation::{Kdf, KeyInfo}; pub use key_derivation::{Kdf, KeyInfo};
mod maintenance;
pub use maintenance::*;
mod network; mod network;
pub use network::*; pub use network::*;
@ -101,25 +63,20 @@ pub use user::*;
pub use proxmox_schema::upid::*; pub use proxmox_schema::upid::*;
mod crypto; mod crypto;
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint}; pub use crypto::{CryptMode, Fingerprint};
pub mod file_restore; pub mod file_restore;
mod openid;
pub use openid::*;
mod remote; mod remote;
pub use remote::*; pub use remote::*;
mod tape; mod tape;
pub use tape::*; pub use tape::*;
mod traffic_control;
pub use traffic_control::*;
mod zfs; mod zfs;
pub use zfs::*; pub use zfs::*;
#[rustfmt::skip] #[rustfmt::skip]
#[macro_use] #[macro_use]
mod local_macros { mod local_macros {
@ -155,9 +112,6 @@ const_regex! {
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
// just a rough check - dummy acceptor is used before persisting
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
/// Regex for safe identifiers. /// Regex for safe identifiers.
/// ///
/// This /// This
@ -169,8 +123,6 @@ const_regex! {
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$";
pub BACKUP_REPO_URL_REGEX = concat!( pub BACKUP_REPO_URL_REGEX = concat!(
r"^^(?:(?:(", r"^^(?:(?:(",
USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
@ -192,33 +144,29 @@ pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); pub const DNS_ALIAS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
pub const SEARCH_DOMAIN_SCHEMA: Schema = pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema(); StringSchema::new("Search domain for host-name lookup.").schema();
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.") pub const FIRST_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("First name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.") pub const SECOND_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Second name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.") pub const THIRD_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Third name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
@ -226,47 +174,45 @@ pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in R
.format(&HOSTNAME_FORMAT) .format(&HOSTNAME_FORMAT)
.schema(); .schema();
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = pub const DNS_NAME_FORMAT: ApiStringFormat =
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") ApiStringFormat::Pattern(&DNS_NAME_REGEX);
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX);
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.") pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
.format(&DNS_NAME_OR_IP_FORMAT) .format(&DNS_NAME_OR_IP_FORMAT)
.schema(); .schema();
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
.format(&HOSTNAME_FORMAT) .format(&ApiStringFormat::VerifyFn(|node| {
if node == "localhost" || node == proxmox::tools::nodename() {
Ok(())
} else {
bail!("no such node '{}'", node);
}
}))
.schema(); .schema();
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.", "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
) .format(&SINGLE_LINE_COMMENT_FORMAT)
.format(&SINGLE_LINE_COMMENT_FORMAT) .min_length(2)
.min_length(2) .max_length(64)
.max_length(64) .schema();
.schema();
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
StringSchema::new("Block device name (/sys/block/<name>).") .format(&BLOCKDEVICE_NAME_FORMAT)
.format(&BLOCKDEVICE_NAME_FORMAT) .min_length(3)
.min_length(3) .max_length(64)
.max_length(64) .schema();
.schema();
pub const DISK_ARRAY_SCHEMA: Schema = pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema(); "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
.schema();
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new("A list of disk names, comma separated.") pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
"A list of disk names, comma separated.")
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
.schema(); .schema();
@ -306,21 +252,15 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl
.format(&SINGLE_LINE_COMMENT_FORMAT) .format(&SINGLE_LINE_COMMENT_FORMAT)
.schema(); .schema();
pub const MULTI_LINE_COMMENT_FORMAT: ApiStringFormat = pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
ApiStringFormat::Pattern(&MULTI_LINE_COMMENT_REGEX); .format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multiple lines).") .max_length(16)
.format(&MULTI_LINE_COMMENT_FORMAT)
.schema(); .schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
StringSchema::new("Proxmox Backup Server subscription key.") .max_length(256)
.format(&SUBSCRIPTION_KEY_FORMAT) .schema();
.min_length(15)
.max_length(16)
.schema();
pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.").max_length(256).schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \ "Prevent changes if current configuration file has different \
@ -333,8 +273,10 @@ pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
/// API schema format definition for repository URLs /// API schema format definition for repository URLs
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
// Complex type definitions // Complex type definitions
#[api()] #[api()]
#[derive(Default, Serialize, Deserialize)] #[derive(Default, Serialize, Deserialize)]
/// Storage space usage information. /// Storage space usage information.
@ -353,6 +295,39 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.max_length(64) .max_length(64)
.schema(); .schema();
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
/// Hex-encoded RSA modulus
pub modulus: String,
/// Key (modulus) length in bits
pub length: usize,
}
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
let modulus = value.n().to_hex_str()?.to_string();
let exponent = value.e().to_dec_str()?.to_string();
let length = value.size() as usize * 8;
Ok(Self {
path: None,
exponent,
modulus,
length,
})
}
}
#[api()] #[api()]
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")] #[serde(rename_all = "PascalCase")]
@ -379,10 +354,11 @@ pub struct APTUpdateInfo {
/// URL under which the package's changelog can be retrieved /// URL under which the package's changelog can be retrieved
pub change_log_url: String, pub change_log_url: String,
/// Custom extra field for additional package information /// Custom extra field for additional package information
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub extra_info: Option<String>, pub extra_info: Option<String>,
} }
#[api()] #[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
@ -394,6 +370,7 @@ pub enum NodePowerCommand {
Shutdown, Shutdown,
} }
#[api()] #[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] #[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
@ -432,85 +409,19 @@ pub struct TaskListItem {
/// The authenticated entity who started the task /// The authenticated entity who started the task
pub user: String, pub user: String,
/// The task end time (Epoch) /// The task end time (Epoch)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>, pub endtime: Option<i64>,
/// Task end status /// Task end status
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub status: Option<String>, pub status: Option<String>,
} }
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
optional: false, optional: false,
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), schema: &ArraySchema::new(
"A list of tasks.",
&TaskListItem::API_SCHEMA,
).schema(),
}; };
#[api()] pub use proxmox_rrd_api_types::{RRDMode, RRDTimeFrameResolution};
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
/// RRD consolidation mode
pub enum RRDMode {
/// Maximum
Max,
/// Average
Average,
}
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// RRD time frame
pub enum RRDTimeFrame {
/// Hour
Hour,
/// Day
Day,
/// Week
Week,
/// Month
Month,
/// Year
Year,
/// Decade (10 years)
Decade,
}
#[api]
#[derive(Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
/// type of the realm
pub enum RealmType {
/// The PAM realm
Pam,
/// The PBS realm
Pbs,
/// An OpenID Connect realm
OpenId,
}
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"type": {
type: RealmType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Basic Information about a realm
pub struct BasicRealmInfo {
pub realm: String,
#[serde(rename = "type")]
pub ty: RealmType,
/// True if it is the default realm
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -1,92 +0,0 @@
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
const_regex! {
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
}
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
StringSchema::new("Message describing the reason for the maintenance.")
.format(&MAINTENANCE_MESSAGE_FORMAT)
.max_length(64)
.schema();
#[derive(Clone, Copy, Debug)]
/// Operation requirements, used when checking for maintenance mode.
pub enum Operation {
/// for any read operation like backup restore or RRD metric collection
Read,
/// for any write/delete operation, like backup create or GC
Write,
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
/// some mutex could be locked (e.g., GC already running?)
///
/// NOTE: one must *not* do any IO operations when only helding this Op state
Lookup,
// GarbageCollect or Delete?
}
#[api]
#[derive(Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Maintenance type.
pub enum MaintenanceType {
// TODO:
// - Add "unmounting" once we got pluggable datastores
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
// operation, so that one can enable a mode where nothing new can be added but stuff can be
// cleaned
/// Only read operations are allowed on the datastore.
ReadOnly,
/// Neither read nor write operations are allowed on the datastore.
Offline,
}
#[api(
properties: {
type: {
type: MaintenanceType,
},
message: {
optional: true,
schema: MAINTENANCE_MESSAGE_SCHEMA,
}
},
default_key: "type",
)]
#[derive(Deserialize, Serialize)]
/// Maintenance mode
pub struct MaintenanceMode {
/// Type of maintenance ("read-only" or "offline").
#[serde(rename = "type")]
ty: MaintenanceType,
/// Reason for maintenance.
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
impl MaintenanceMode {
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
.decode_utf8()
.unwrap_or(Cow::Borrowed(""));
if let Some(Operation::Lookup) = operation {
return Ok(());
} else if self.ty == MaintenanceType::Offline {
bail!("offline maintenance mode: {}", message);
} else if self.ty == MaintenanceType::ReadOnly {
if let Some(Operation::Write) = operation {
bail!("read-only maintenance mode: {}", message);
}
}
Ok(())
}
}

View File

@ -3,43 +3,49 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use crate::{ use crate::{
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_REGEX,
IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT,
CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT,
}; };
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat = pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.") pub const IP_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT) .format(&IP_V4_FORMAT)
.max_length(15) .max_length(15)
.schema(); .schema();
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.") pub const IP_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT) .format(&IP_V6_FORMAT)
.max_length(39) .max_length(39)
.schema(); .schema();
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.") pub const IP_SCHEMA: Schema =
StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.max_length(39) .max_length(39)
.schema(); .schema();
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).") pub const CIDR_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT) .format(&CIDR_V4_FORMAT)
.max_length(18) .max_length(18)
.schema(); .schema();
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).") pub const CIDR_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT) .format(&CIDR_V6_FORMAT)
.max_length(43) .max_length(43)
.schema(); .schema();
pub const CIDR_SCHEMA: Schema = pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).") StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT) .format(&CIDR_FORMAT)
.max_length(43) .max_length(43)
.schema(); .schema();
#[api()] #[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
@ -121,18 +127,17 @@ pub enum NetworkInterfaceType {
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.") pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT) .format(&NETWORK_INTERFACE_FORMAT)
.min_length(1) .min_length(1)
.max_length(15) // libc::IFNAMSIZ-1 .max_length(libc::IFNAMSIZ-1)
.schema(); .schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema(); "Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
.schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
StringSchema::new("A list of network devices, comma separated.") "A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString( .format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
&NETWORK_INTERFACE_ARRAY_SCHEMA, .schema();
))
.schema();
#[api( #[api(
properties: { properties: {
@ -227,48 +232,48 @@ pub struct Interface {
/// Interface type /// Interface type
#[serde(rename = "type")] #[serde(rename = "type")]
pub interface_type: NetworkInterfaceType, pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub method: Option<NetworkConfigMethod>, pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub method6: Option<NetworkConfigMethod>, pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv4 address with netmask /// IPv4 address with netmask
pub cidr: Option<String>, pub cidr: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv4 gateway /// IPv4 gateway
pub gateway: Option<String>, pub gateway: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv6 address with netmask /// IPv6 address with netmask
pub cidr6: Option<String>, pub cidr6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv6 gateway /// IPv6 gateway
pub gateway6: Option<String>, pub gateway6: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")] #[serde(skip_serializing_if="Vec::is_empty")]
pub options: Vec<String>, pub options: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty")] #[serde(skip_serializing_if="Vec::is_empty")]
pub options6: Vec<String>, pub options6: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comments: Option<String>, pub comments: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comments6: Option<String>, pub comments6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// Maximum Transmission Unit /// Maximum Transmission Unit
pub mtu: Option<u64>, pub mtu: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bridge_ports: Option<Vec<String>>, pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support. /// Enable bridge vlan support.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bridge_vlan_aware: Option<bool>, pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub slaves: Option<Vec<String>>, pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>, pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
#[serde(rename = "bond-primary")] #[serde(rename = "bond-primary")]
pub bond_primary: Option<String>, pub bond_primary: Option<String>,
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>, pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
@ -276,7 +281,7 @@ pub struct Interface {
impl Interface { impl Interface {
pub fn new(name: String) -> Self { pub fn new(name: String) -> Self {
Self { Self {
name, name,
interface_type: NetworkInterfaceType::Unknown, interface_type: NetworkInterfaceType::Unknown,
autostart: false, autostart: false,

View File

@ -1,119 +0,0 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
use super::{
PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_ACR_SCHEMA: Schema =
StringSchema::new("OpenID Authentication Context Class Reference.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
.format(&OPENID_ACR_LIST_FORMAT)
.schema();
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
"Use the value of this attribute/claim as unique user name. It \
is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \
attribute by himself!",
)
.max_length(64)
.min_length(1)
.format(&PROXMOX_SAFE_ID_FORMAT)
.schema();
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
"scopes": {
schema: OPENID_SCOPE_LIST_SCHEMA,
optional: true,
},
"acr-values": {
schema: OPENID_ACR_LIST_SCHEMA,
optional: true,
},
prompt: {
description: "OpenID Prompt",
type: String,
format: &PROXMOX_SAFE_ID_FORMAT,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
schema: OPENID_USERNAME_CLAIM_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub scopes: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub acr_values: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt: Option<String>,
/// OpenID Client Key
#[serde(skip_serializing_if = "Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if = "Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if = "Option::is_none")]
pub username_claim: Option<String>,
}

View File

@ -3,19 +3,17 @@ use serde::{Deserialize, Serialize};
use super::*; use super::*;
use proxmox_schema::*; use proxmox_schema::*;
pub const REMOTE_PASSWORD_SCHEMA: Schema = pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
StringSchema::new("Password or auth token for remote host.") .format(&PASSWORD_FORMAT)
.format(&PASSWORD_FORMAT) .min_length(1)
.min_length(1) .max_length(1024)
.max_length(1024) .schema();
.schema();
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).")
StringSchema::new("Password or auth token for remote host (stored as base64 string).") .format(&PASSWORD_FORMAT)
.format(&PASSWORD_FORMAT) .min_length(1)
.min_length(1) .max_length(1024)
.max_length(1024) .schema();
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT) .format(&PROXMOX_SAFE_ID_FORMAT)
@ -23,6 +21,7 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.max_length(32) .max_length(32)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
comment: { comment: {
@ -46,17 +45,17 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
}, },
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Remote configuration properties. /// Remote configuration properties.
pub struct RemoteConfig { pub struct RemoteConfig {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
pub host: String, pub host: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub port: Option<u16>, pub port: Option<u16>,
pub auth_id: Authid, pub auth_id: Authid,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>, pub fingerprint: Option<String>,
} }
@ -73,34 +72,15 @@ pub struct RemoteConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Remote properties. /// Remote properties.
pub struct Remote { pub struct Remote {
pub name: String, pub name: String,
// Note: The stored password is base64 encoded // Note: The stored password is base64 encoded
#[serde(skip_serializing_if = "String::is_empty")] #[serde(skip_serializing_if="String::is_empty")]
#[serde(with = "proxmox_serde::string_as_base64")] #[serde(with = "proxmox::tools::serde::string_as_base64")]
pub password: String, pub password: String,
#[serde(flatten)] #[serde(flatten)]
pub config: RemoteConfig, pub config: RemoteConfig,
} }
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct RemoteWithoutPassword {
pub name: String,
#[serde(flatten)]
pub config: RemoteConfig,
}

View File

@ -3,23 +3,23 @@ use ::serde::{Deserialize, Serialize};
use proxmox_schema::api; use proxmox_schema::api;
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Optional Device Identification Attributes /// Optional Device Identification Attributes
pub struct OptionalDeviceIdentification { pub struct OptionalDeviceIdentification {
/// Vendor (autodetected) /// Vendor (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub vendor: Option<String>, pub vendor: Option<String>,
/// Model (autodetected) /// Model (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub model: Option<String>, pub model: Option<String>,
/// Serial number (autodetected) /// Serial number (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub serial: Option<String>, pub serial: Option<String>,
} }
#[api()] #[api()]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug,Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Kind of device /// Kind of device
pub enum DeviceKind { pub enum DeviceKind {
@ -36,7 +36,7 @@ pub enum DeviceKind {
}, },
}, },
)] )]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug,Serialize,Deserialize)]
/// Tape device information /// Tape device information
pub struct TapeDeviceInfo { pub struct TapeDeviceInfo {
pub kind: DeviceKind, pub kind: DeviceKind,

Some files were not shown because too many files have changed in this diff Show More