Compare commits

..

No commits in common. "master" and "2.1.1" have entirely different histories.

480 changed files with 15468 additions and 27014 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "2.2.3" version = "2.1.2"
authors = [ authors = [
"Dietmar Maurer <dietmar@proxmox.com>", "Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>", "Dominik Csapak <d.csapak@proxmox.com>",
@ -49,6 +49,7 @@ bytes = "1.0"
cidr = "0.2.1" cidr = "0.2.1"
crc32fast = "1" crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] } endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.7"
flate2 = "1.0" flate2 = "1.0"
anyhow = "1.0" anyhow = "1.0"
thiserror = "1.0" thiserror = "1.0"
@ -60,16 +61,16 @@ http = "0.2"
hyper = { version = "0.14", features = [ "full" ] } hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4" lazy_static = "1.4"
libc = "0.2" libc = "0.2"
log = "0.4.17" log = "0.4"
nix = "0.24" nix = "0.19.1"
num-traits = "0.2" num-traits = "0.2"
once_cell = "1.3.1" once_cell = "1.3.1"
openssl = "0.10.38" # currently patched! openssl = "0.10"
pam = "0.7" pam = "0.7"
pam-sys = "0.5" pam-sys = "0.5"
percent-encoding = "2.1" percent-encoding = "2.1"
regex = "1.5.5" regex = "1.2"
rustyline = "9" rustyline = "7"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
siphasher = "0.3" siphasher = "0.3"
@ -77,7 +78,7 @@ syslog = "4.0"
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] } tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio-openssl = "0.6.1" tokio-openssl = "0.6.1"
tokio-stream = "0.1.0" tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "codec", "io" ] } tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0" tower-service = "0.3.0"
udev = "0.4" udev = "0.4"
url = "2.1" url = "2.1"
@ -93,24 +94,22 @@ zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2" pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] } pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox-http = { version = "0.6.1", features = [ "client", "http-helpers", "websocket" ] } proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1" proxmox-io = "1"
proxmox-lang = "1.1" proxmox-lang = "1"
proxmox-router = { version = "1.2.2", features = [ "cli" ] } proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] } proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-section-config = "1" proxmox-section-config = "1"
proxmox-tfa = { version = "2", features = [ "api", "api-types" ] } proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2" proxmox-time = "1"
proxmox-uuid = "1" proxmox-uuid = "1"
proxmox-serde = "0.1" proxmox-shared-memory = "0.1.1"
proxmox-shared-memory = "0.2" proxmox-sys = "0.1.2"
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
proxmox-compression = "0.1"
proxmox-acme-rs = "0.3"
proxmox-acme-rs = "0.4"
proxmox-apt = "0.8.0" proxmox-apt = "0.8.0"
proxmox-async = "0.4" proxmox-async = "0.2"
proxmox-openid = "0.9.0" proxmox-openid = "0.9.0"
pbs-api-types = { path = "pbs-api-types" } pbs-api-types = { path = "pbs-api-types" }
@ -126,25 +125,10 @@ pbs-tape = { path = "pbs-tape" }
# Local path overrides # Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect! # NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io] [patch.crates-io]
#proxmox-acme-rs = { path = "../proxmox-acme-rs" } #proxmox = { path = "../proxmox/proxmox" }
#proxmox-apt = { path = "../proxmox-apt" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" } #proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-openid = { path = "../proxmox-openid-rs" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" } #proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" } #proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#pxar = { path = "../pxar" } #pxar = { path = "../pxar" }
[features] [features]

View File

@ -221,6 +221,9 @@ install: $(COMPILED_BINS)
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;) install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
$(MAKE) -C www install $(MAKE) -C www install
$(MAKE) -C docs install $(MAKE) -C docs install
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
endif
.PHONY: upload .PHONY: upload
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB} upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}

View File

@ -1,7 +1,3 @@
Build & Release Notes
*********************
``rustup`` Toolchain ``rustup`` Toolchain
==================== ====================
@ -44,44 +40,41 @@ example for proxmox crate above).
Build Build
===== =====
on Debian 11 Bullseye on Debian Buster
Setup: Setup:
1. # echo 'deb http://download.proxmox.com/debian/devel/ bullseye main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list 1. # echo 'deb http://download.proxmox.com/debian/devel/ buster main' >> /etc/apt/sources.list.d/proxmox-devel.list
2. # sudo wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg 2. # sudo wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
3. # sudo apt update 3. # sudo apt update
4. # sudo apt install devscripts debcargo clang 4. # sudo apt install devscripts debcargo clang
5. # git clone git://git.proxmox.com/git/proxmox-backup.git 5. # git clone git://git.proxmox.com/git/proxmox-backup.git
6. # cd proxmox-backup; sudo mk-build-deps -ir 6. # sudo mk-build-deps -ir
Note: 2. may be skipped if you already added the PVE or PBS package repository Note: 2. may be skipped if you already added the PVE or PBS package repository
You are now able to build using the Makefile or cargo itself, e.g.:: You are now able to build using the Makefile or cargo itself.
# make deb-all
# # or for a non-package build
# cargo build --all --release
Design Notes Design Notes
************ ============
Here are some random thought about the software design (unless I find a better place). Here are some random thought about the software design (unless I find a better place).
Large chunk sizes Large chunk sizes
================= -----------------
It is important to notice that large chunk sizes are crucial for performance. It is important to notice that large chunk sizes are crucial for
We have a multi-user system, where different people can do different operations performance. We have a multi-user system, where different people can do
on a datastore at the same time, and most operation involves reading a series different operations on a datastore at the same time, and most operation
of chunks. involves reading a series of chunks.
So what is the maximal theoretical speed we can get when reading a series of So what is the maximal theoretical speed we can get when reading a
chunks? Reading a chunk sequence need the following steps: series of chunks? Reading a chunk sequence need the following steps:
- seek to the first chunk's start location - seek to the first chunk start location
- read the chunk data - read the chunk data
- seek to the next chunk's start location - seek to the first chunk start location
- read the chunk data - read the chunk data
- ... - ...

402
debian/changelog vendored
View File

@ -1,405 +1,3 @@
rust-proxmox-backup (2.2.3-1) bullseye; urgency=medium
* datastore: swap dirtying the datastore cache every 60s by just using the
available config digest to detect any changes accuratly when the actually
happen
* api: datastore list and datastore status: avoid opening datastore and
possibly iterating over namespace (for lesser privileged users), but
rather use the in-memory ACL tree directly to check if there's access to
any namespace below.
-- Proxmox Support Team <support@proxmox.com> Sat, 04 Jun 2022 16:30:05 +0200
rust-proxmox-backup (2.2.2-3) bullseye; urgency=medium
* datastore: lookup: reuse ChunkStore on stale datastore re-open
* bump tokio (async framework) dependency
-- Proxmox Support Team <support@proxmox.com> Thu, 02 Jun 2022 17:25:01 +0200
rust-proxmox-backup (2.2.2-2) bullseye; urgency=medium
* improvement of error handling when removing status files and locks from
jobs that were never executed.
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 16:22:22 +0200
rust-proxmox-backup (2.2.2-1) bullseye; urgency=medium
* Revert "verify: allow '0' days for reverification", was already possible
by setting "ignore-verified" to false
* ui: datastore permissions: allow ACL path edit & query namespaces
* accessible group iter: allow NS descending with DATASTORE_READ privilege
* prune datastore: rework worker tak log
* prune datastore: support max-depth and improve priv checks
* ui: prune input: support opt-in recursive/max-depth field
* add prune job config and api, allowing one to setup a scheduled pruning
for a specific namespace only
* ui: add ui for prune jobs
* api: disable setting prune options in datastore.cfg and transform any
existing prune tasks from datastore config to new prune job config in a
post installation hook
* proxmox-tape: use correct api call for 'load-media-from-slot'
* avoid overly strict privilege restrictions for some API endpoints and
actions when using namespaces. Better support navigating the user
interface when only having Datastore.Admin on a (sub) namespace.
* include required privilege names in some permission errors
* docs: fix some typos
* api: status: include empty entry for stores with ns-only privs
* ui: datastore options: avoid breakage if rrd store ore active-ops cannot
be queried
* ui: datastore content: only mask the inner treeview, not the top bar on
error to allow a user to trigger a manual reload
* ui: system config: improve bottom margins and scroll behavior
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 15:09:36 +0200
rust-proxmox-backup (2.2.1-1) bullseye; urgency=medium
* docs: update some screenshots and add new ones
* docs: port overcertificate management chapters from Proxmox VE
* ui: datastore/Summary: correctly show the io-delay chart
* ui: sync/verify jobs: use pmxDisplayEditField to fix editing
* ui: server status: use power of two base for memory and swap
* ui: use base 10 (SI) for all storage related displays
* ui: datastore selector: show maintenance mode in selector
* docs: basic maintenance mode section
* docs: storage: refer to options
* storage: add some initial namespace docs
* ui: tape restore: fix form validation for datastore mapping
* ui: namespace selector: show picker empty text if no namespace
-- Proxmox Support Team <support@proxmox.com> Tue, 17 May 2022 13:56:50 +0200
rust-proxmox-backup (2.2.0-2) bullseye; urgency=medium
* client: add CLI auto-completion callbacks for ns parameters
* ui: fix setting protection in namespace
* ui: switch summary repo status to widget toolkit one
* ui: verify outdated: disallow blank and drop wrong empty text
* docs: add namespace section to sync documentation
* ui: datastore summary: add maintenance mask for offline entries
* ui: verify/sync: allow to optionally override ID again
* prune: fix workerid issues
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 19:01:13 +0200
rust-proxmox-backup (2.2.0-1) bullseye; urgency=medium
* cli: improve namespace integration in proxmox-backup-client and
proxmox-tape
* docs: tape: add information about namespaces
* api: datastore status: make counts for groups and snapshots iterate over
all accessible namespaces recursively
* ui: fix storeId casing to register store correctly, so that we can query
it again for the ACL permission path selector
* ui: trigger datastore update after maintenance mode edit
* ui: namespace selector: set queryMode to local to avoid bogus background
requests on typing
* ui: sync job: fix clearing value of remote target-namespace by mistake on
edit
* ui: remote target ns selector: add clear trigger
* ui: prune group: add namespace info to title
* fix #4001: ui: add prefix to files downloaded through the pxar browser
* ui: datastore: reload content tree on successful datastore add
* ui: datastore: allow deleting currently shown namespace
* docs: rework access control, list available privileges
* docs: access control: add "Objects and Paths" section and fix
add-permission screenshot
-- Proxmox Support Team <support@proxmox.com> Mon, 16 May 2022 11:06:05 +0200
rust-proxmox-backup (2.1.10-1) bullseye; urgency=medium
* datastore: drop bogus chunk size check, can cause trouble
* pull/sync: detect remote lack of namespace support
* pull/sync: correctly query with remote-ns as parent
* ui: sync: add reduced max-depth selector
* ui: group filter: make also local filter NS aware
* api types: set NS_MAX_DEPTH schema default to MAX_NAMESPACE_DEPTH instead
of 0
* tape: notify when arriving at end of media
* tree-wide: rename 'backup-ns' API parameters to 'ns'
* tape: add namespaces/recursion depth to tape backup jobs
* api: tape/restore: add namespace mapping
* tape: bump catalog/snapshot archive magic
* ui: tape: backup overview: show namespaces as their own level above groups
* ui: tape restore: allow simple namespace mapping
-- Proxmox Support Team <support@proxmox.com> Fri, 13 May 2022 14:26:32 +0200
rust-proxmox-backup (2.1.9-2) bullseye; urgency=medium
* api: tape restore: lock the target datastore, not the source one
* chunk store: force write chunk again if it exist but its metadata length
is zero
* completion: fix 'group-filter' parameter name
* implement backup namespaces for datastores, allowing one to reuse a single
chunkstore deduplication domain for multiple sources without naming
conflicts and with fine-grained access control.
* make various datastore related API calls backup namespace aware
* make sync and pull backup namespace aware
* ui: datastore content: show namespaces but only one level at a time
* ui: make various datastore related UI components namespace aware
* fix various bugs, add namespace support to file-restore
-- Proxmox Support Team <support@proxmox.com> Thu, 12 May 2022 14:25:53 +0200
rust-proxmox-backup (2.1.8-1) bullseye; urgency=medium
* api: status: return gc-status again
* proxmox-backup-proxy: stop accept() loop on daemon shutdown to avoid that
new request get accepted while the REST stack is already stopped, for
example on the reload triggered by a package upgrade.
* pull: improve filtering local removal candidates
-- Proxmox Support Team <support@proxmox.com> Mon, 02 May 2022 17:36:11 +0200
rust-proxmox-backup (2.1.7-1) bullseye; urgency=medium
* pbs-tape: sgutils2: check sense data when status is 'CHECK_CONDITION'
* rework & refactor datastore implementation for a more hierarchical access
structure
* datastore: implement Iterator for backup group and snapshot listing to
allow more efficient access for cases where we do not need the whole list
in memory
* pbs-client: extract: rewrite create_zip with sequential decoder
* pbs-client: extract: add top-level dir in tar.zst
* fix #3067: ui: add a separate notes view for longer markdown notes and
copy the markdown primer from Proxmox VE to Proxmox Backup Server docs
* restore-daemon: start disk initialization in parallel to the api
* restore-daemon: put blocking code into 'block_in_place'
* restore-daemon: avoid auto-pre-mounting zpools completely, the upfront
(time) cost can be to big to pay up initially, e.g., if there are many
subvolumes present, so only mount on demand.
* file-restore: add 'timeout' and 'json-error' parameter
* ui: add summary mask when in maintenance mode
* ui: update datastore's navigation icon and tooltip if it is in maintenance
mode
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Apr 2022 19:53:53 +0200
rust-proxmox-backup (2.1.6-1) bullseye; urgency=medium
* api: verify: allow passing '0 days' for immediate re-verification
* fix #3103. node configuration: allow to configure default UI language
* fix #3856: tape: encryption key's password hint parameter is not optional
* re-use PROXMOX_DEBUG environment variable to control log level filter
* ui: WebAuthn: fix stopping store upgrades on destroy and decrease interval
* report: add tape, traffic control and disk infos and tune output order
* fix #3853: cli/api: add force option to tape key change-passphrase
* fix #3323: cli client: add dry-run option for backup command
* tape: make iterating over chunks to backup smarter to avoid some work
* bin: daily-update: make single checks/updates fail gracefully and log
to syslog directly instead of going through stdout indirectly.
* datastore: allow to turn of inode-sorting for chunk iteration. While inode
sorting benefits read-performance on block devices with higher latency
(e.g., spinning disks), it's also some extra work to get the metadata
required for sorting, so its a trade-off. For setups that have either very
slow or very fast metadata IO the benefits may turn into a net cost.
* docs: explain retention time for event allocation policy in more detail
* docs: add tape schedule examples
* proxmox-backup-debug api: parse parameters before sending to api
* ui: fix panel height in the dashboard for three-column view mode
* fix #3934 tape owner-selector to auth-id (user OR token)
* fix #3067: api: add support for multi-line comments in the node
configuration
* pbs-client: print error when we couldn't download previous FIDX/DIDX for
incremental change tracking
* fix #3854 add command to import a key from a file (json or paper-key
format) to proxmox-tape
* improve IO access pattern for some scenarios like TFA with high user and
login count or the file-restore-for-block-backup VM's internal driver.
* pxar create: fix anchored path pattern matching when adding entries
* docs: client: file exclusion: add note about leading slash
* rest-server: add option to rotate task logs by 'max_days' instead of
'max_files'
* pbs-datastore: add active operations tracking and use it to implement a
graceful transition into the also newly added maintenance mode (read-only
or offline) for datastores. Note that the UI implementation may still show
some rough edges if a datastore is in offline mode for maintenance.
* add new streaming-response type for API call responses and enable it for
the snapshot and task-log list, which can both get rather big. This avoids
allocation of a potentially big intermediate memory buffer and thus
overall memory usage.
* pxar: accompany existing .zip download support with a tar.zst(d) one. The
tar archive supports more file types (e.g., hard links or device nodes)
and zstd allows for a efficient but still effective compression.
-- Proxmox Support Team <support@proxmox.com> Wed, 13 Apr 2022 17:00:53 +0200
rust-proxmox-backup (2.1.5-1) bullseye; urgency=medium
* tell system allocator to always use mmap for allocations >= 128 KiB to
improve reclaimability of free'd memory to the OS and reduce peak and avg.
RSS consumption
* file restore: always wait up to 25s for the file-restore-VM to have
scanned all possible filesystems in a backup. While theoretically there
are some edge cases where the tool waits less now, most common ones should
be waiting more compared to the 12s "worst" case previously.
-- Proxmox Support Team <support@proxmox.com> Wed, 26 Jan 2022 16:23:09 +0100
rust-proxmox-backup (2.1.4-1) bullseye; urgency=medium
* config: add tls ciphers to NodeConfig
* pbs-tools: improve memory foot print of LRU Cache
* update dependencies to avoid a ref-count leak in async helpers
-- Proxmox Support Team <support@proxmox.com> Fri, 21 Jan 2022 10:48:14 +0100
rust-proxmox-backup (2.1.3-1) bullseye; urgency=medium
* fix #3618: proxmox-async: zip: add conditional EFS flag to zip files to
improve non-ascii code point extraction under windows.
* OpenID Connect login: improve error message for disabled users
* ui: tape: backup job: add second tab for group-filters to add/edit window
* ui: sync job: add second tab for group-filters to add/edit window
* ui: calendar event: add once daily example and clarify the workday one
* fix #3794: api types: set backup time (since the UNIX epoch) lower limit
to 1
* ui: fix opening settings window in datastore panel
* api: zfs: create zpool with `relatime=on` flag set
* fix #3763: disable SSL/TLS renegotiation
* node config: add email-from parameter to control notification sender
address
* ui: configuration: rename the "Authentication" tab to "Other" and add a
"General" section with HTTP-proxy and email-from settings
* datastore stats: not include the unavailable `io_ticks` for ZFS
datastores
* ui: hide RRD chart for IO delay if no `io_ticks` are returned
* fix #3058: ui: improve remote edit UX by clarifying ID vs host fields
* docs: fix some minor typos
* api-types: relax nodename API schema, make it a simple regex check like in
Proxmox VE
-- Proxmox Support Team <support@proxmox.com> Wed, 12 Jan 2022 16:49:13 +0100
rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium rust-proxmox-backup (2.1.2-1) bullseye; urgency=medium
* docs: backup-client: fix wrong reference * docs: backup-client: fix wrong reference

66
debian/control vendored
View File

@ -16,7 +16,7 @@ Build-Depends: debhelper (>= 12),
librust-crossbeam-channel-0.5+default-dev, librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev, librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev, librust-endian-trait-0.6+default-dev,
librust-env-logger-0.9+default-dev, librust-env-logger-0.7+default-dev,
librust-flate2-1+default-dev, librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev, librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev, librust-futures-0.3+default-dev,
@ -24,58 +24,56 @@ Build-Depends: debhelper (>= 12),
librust-h2-0.3+stream-dev, librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev, librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~), librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-http-0.2+default-dev, librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev (>= 0.14.5-~~), librust-hyper-0.14+default-dev (>= 0.14.5-~~),
librust-hyper-0.14+full-dev (>= 0.14.5-~~), librust-hyper-0.14+full-dev (>= 0.14.5-~~),
librust-lazy-static-1+default-dev (>= 1.4-~~), librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev, librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~) <!nocheck>, librust-log-0.4+default-dev,
librust-nix-0.24+default-dev, librust-nix-0.19+default-dev (>= 0.19.1-~~),
librust-nom-5+default-dev (>= 5.1-~~), librust-nom-5+default-dev (>= 5.1-~~),
librust-num-traits-0.2+default-dev, librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~), librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.38-~~), librust-openssl-0.10+default-dev,
librust-pam-0.7+default-dev, librust-pam-0.7+default-dev,
librust-pam-sys-0.5+default-dev, librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~), librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev, librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-rs-0.4+default-dev, librust-proxmox-0.15+default-dev (>= 0.15.3-~~),
librust-proxmox-0.15+sortable-macro-dev (>= 0.15.3-~~),
librust-proxmox-0.15+tokio-dev (>= 0.15.3-~~),
librust-proxmox-acme-rs-0.3+default-dev,
librust-proxmox-apt-0.8+default-dev, librust-proxmox-apt-0.8+default-dev,
librust-proxmox-async-0.4+default-dev, librust-proxmox-async-0.2+default-dev,
librust-proxmox-borrow-1+default-dev, librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~), librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-http-0.6+client-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+client-dev (>= 0.5.4-~~),
librust-proxmox-http-0.6+default-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+default-dev (>= 0.5.4-~~),
librust-proxmox-http-0.6+http-helpers-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+http-helpers-dev (>= 0.5.4-~~),
librust-proxmox-http-0.6+websocket-dev (>= 0.6.1-~~), librust-proxmox-http-0.5+websocket-dev (>= 0.5.4-~~),
librust-proxmox-io-1+default-dev (>= 1.0.1-~~), librust-proxmox-io-1+default-dev,
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~), librust-proxmox-io-1+tokio-dev,
librust-proxmox-lang-1+default-dev (>= 1.1-~~), librust-proxmox-lang-1+default-dev,
librust-proxmox-openid-0.9+default-dev, librust-proxmox-openid-0.9+default-dev,
librust-proxmox-router-1+cli-dev (>= 1.2-~~), librust-proxmox-router-1+cli-dev (>= 1.1-~~),
librust-proxmox-router-1+default-dev (>= 1.2.2-~~), librust-proxmox-router-1+default-dev (>= 1.1-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.3.1-~~), librust-proxmox-schema-1+api-macro-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+default-dev (>= 1.3.1-~~), librust-proxmox-schema-1+default-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.3.1-~~), librust-proxmox-schema-1+upid-api-impl-dev (>= 1.0.1-~~),
librust-proxmox-section-config-1+default-dev, librust-proxmox-section-config-1+default-dev,
librust-proxmox-serde-0.1+default-dev, librust-proxmox-shared-memory-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-shared-memory-0.2+default-dev, librust-proxmox-sys-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-sys-0.3+default-dev, librust-proxmox-tfa-1+api-dev (>= 1.3-~~),
librust-proxmox-sys-0.3+logrotate-dev, librust-proxmox-tfa-1+api-types-dev (>= 1.3-~~),
librust-proxmox-sys-0.3+sortable-macro-dev, librust-proxmox-tfa-1+default-dev (>= 1.3-~~),
librust-proxmox-tfa-2+api-dev, librust-proxmox-time-1+default-dev (>= 1.1-~~),
librust-proxmox-tfa-2+api-types-dev,
librust-proxmox-tfa-2+default-dev,
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
librust-proxmox-uuid-1+default-dev, librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev, librust-proxmox-uuid-1+serde-dev,
librust-pxar-0.10+default-dev (>= 0.10.1-~~), librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~), librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.5.5-~~), librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-9+default-dev, librust-rustyline-7+default-dev,
librust-serde-1+default-dev, librust-serde-1+default-dev,
librust-serde-1+derive-dev, librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~), librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
@ -98,9 +96,9 @@ Build-Depends: debhelper (>= 12),
librust-tokio-1+time-dev (>= 1.6-~~), librust-tokio-1+time-dev (>= 1.6-~~),
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~), librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev, librust-tokio-stream-0.1+default-dev,
librust-tokio-util-0.7+codec-dev, librust-tokio-util-0.6+codec-dev,
librust-tokio-util-0.7+default-dev, librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.7+io-dev, librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev, librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev, librust-udev-0.4+default-dev,
librust-url-2+default-dev (>= 2.1-~~), librust-url-2+default-dev (>= 2.1-~~),

9
debian/postinst vendored
View File

@ -41,14 +41,7 @@ case "$1" in
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi fi
if dpkg --compare-versions "$2" 'lt' '2.2.2~'; then if dpkg --compare-versions "$2" 'lt' '7.1-1' && test -e /etc/proxmox-backup/sync.cfg; then
echo "moving prune schedule from datacenter config to new prune job config"
proxmox-backup-manager update-to-prune-jobs-config \
|| echo "Failed to move prune jobs, please check manually"
true
fi
if dpkg --compare-versions "$2" 'lt' '2.1.3~' && test -e /etc/proxmox-backup/sync.cfg; then
prev_job="" prev_job=""
# read from HERE doc because POSIX sh limitations # read from HERE doc because POSIX sh limitations

3
debian/rules vendored
View File

@ -32,6 +32,9 @@ override_dh_auto_build:
override_dh_missing: override_dh_missing:
dh_missing --fail-missing dh_missing --fail-missing
override_dh_auto_test:
# ignore here to avoid rebuilding the binaries with the wrong target
override_dh_auto_install: override_dh_auto_install:
dh_auto_install -- \ dh_auto_install -- \
PROXY_USER=backup \ PROXY_USER=backup \

View File

@ -71,7 +71,7 @@ Environment Variables
.. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your .. Note:: Passwords must be valid UTF-8 and may not contain newlines. For your
convenience, Proxmox Backup Server only uses the first line as password, so convienience, Proxmox Backup Server only uses the first line as password, so
you can add arbitrary comments after the first newline. you can add arbitrary comments after the first newline.
@ -120,11 +120,11 @@ This will prompt you for a password, then upload a file archive named
(i.e. ``--include-dev /boot/efi``). You can use this option (i.e. ``--include-dev /boot/efi``). You can use this option
multiple times for each mount point that should be included. multiple times for each mount point that should be included.
The ``--repository`` option can get quite long and is used by all commands. You The ``--repository`` option can get quite long and is used by all
can avoid having to enter this value by setting the environment variable commands. You can avoid having to enter this value by setting the
``PBS_REPOSITORY``. Note that if you would like this to remain set over environment variable ``PBS_REPOSITORY``. Note that if you would like this to
multiple sessions, you should instead add the below line to your ``.bashrc`` remain set over multiple sessions, you should instead add the below line to your
file. ``.bashrc`` file.
.. code-block:: console .. code-block:: console
@ -142,16 +142,9 @@ you want to back up two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
This creates a backup of both disks. This creates a backup of both disks.
If you want to use a namespace for the backup target you can add the `--ns` The backup command takes a list of backup specifications, which
parameter: include the archive name on the server, the type of the archive, and the
archive source at the client. The format is:
.. code-block:: console
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2 --ns a/b/c
The backup command takes a list of backup specifications, which include the
archive name on the server, the type of the archive, and the archive source at
the client. The format is:
<archive-name>.<type>:<source-path> <archive-name>.<type>:<source-path>
@ -166,25 +159,21 @@ device images. To create a backup of a block device, run the following command:
Excluding Files/Directories from a Backup Excluding Files/Directories from a Backup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes it is desired to exclude certain files or directories from a backup Sometimes it is desired to exclude certain files or directories from a backup archive.
archive. To tell the Proxmox Backup client when and how to ignore files and To tell the Proxmox Backup client when and how to ignore files and directories,
directories, place a text file named ``.pxarexclude`` in the filesystem place a text file named ``.pxarexclude`` in the filesystem hierarchy.
hierarchy. Whenever the backup client encounters such a file in a directory, Whenever the backup client encounters such a file in a directory, it interprets
it interprets each line as a glob match pattern for files and directories that each line as a glob match pattern for files and directories that are to be excluded
are to be excluded from the backup. from the backup.
The file must contain a single glob pattern per line. Empty lines and lines The file must contain a single glob pattern per line. Empty lines and lines
starting with ``#`` (indicating a comment) are ignored. starting with ``#`` (indicating a comment) are ignored.
A ``!`` at the beginning of a line reverses the glob match pattern from an A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
exclusion to an explicit inclusion. This makes it possible to exclude all to an explicit inclusion. This makes it possible to exclude all entries in a
entries in a directory except for a few single files/subdirectories. directory except for a few single files/subdirectories.
Lines ending in ``/`` match only on directories. Lines ending in ``/`` match only on directories.
The directory containing the ``.pxarexclude`` file is considered to be the root The directory containing the ``.pxarexclude`` file is considered to be the root of
of the given patterns. It is only possible to match files in this directory and the given patterns. It is only possible to match files in this directory and its subdirectories.
its subdirectories.
.. Note:: Patterns without a leading ``/`` will also match in subdirectories,
while patterns with a leading ``/`` will only match in the current directory.
``\`` is used to escape special glob characters. ``\`` is used to escape special glob characters.
``?`` matches any single character. ``?`` matches any single character.
@ -193,15 +182,15 @@ its subdirectories.
the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within the pattern ``**/*.tmp``, it would exclude all files ending in ``.tmp`` within
a directory and its subdirectories. a directory and its subdirectories.
``[...]`` matches a single character from any of the provided characters within ``[...]`` matches a single character from any of the provided characters within
the brackets. ``[!...]`` does the complementary and matches any single the brackets. ``[!...]`` does the complementary and matches any single character
character not contained within the brackets. It is also possible to specify not contained within the brackets. It is also possible to specify ranges with two
ranges with two characters separated by ``-``. For example, ``[a-z]`` matches characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
any lowercase alphabetic character, and ``[0-9]`` matches any single digit. alphabetic character, and ``[0-9]`` matches any single digit.
The order of the glob match patterns defines whether a file is included or The order of the glob match patterns defines whether a file is included or
excluded, that is to say, later entries override earlier ones. excluded, that is to say, later entries override earlier ones.
This is also true for match patterns encountered deeper down the directory This is also true for match patterns encountered deeper down the directory tree,
tree, which can override a previous exclusion. which can override a previous exclusion.
.. Note:: Excluded directories will **not** be read by the backup client. Thus, .. Note:: Excluded directories will **not** be read by the backup client. Thus,
a ``.pxarexclude`` file in an excluded subdirectory will have no effect. a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
@ -416,11 +405,6 @@ list command provides a list of all the snapshots on the server:
├────────────────────────────────┼─────────────┼────────────────────────────────────┤ ├────────────────────────────────┼─────────────┼────────────────────────────────────┤
... ...
.. tip:: List will by default only output the backup snapshots of the root
namespace itself. To list backups from another namespace use the ``--ns
<ns>`` option
You can inspect the catalog to find specific files. You can inspect the catalog to find specific files.
.. code-block:: console .. code-block:: console
@ -578,10 +562,10 @@ user that has ``Datastore.Modify`` privileges on the datastore.
# proxmox-backup-client change-owner vm/103 john@pbs # proxmox-backup-client change-owner vm/103 john@pbs
This can also be done from within the web interface, by navigating to the This can also be done from within the web interface, by navigating to the
`Content` section of the datastore that contains the backup group and selecting `Content` section of the datastore that contains the backup group and
the user icon under the `Actions` column. Common cases for this could be to selecting the user icon under the `Actions` column. Common cases for this could
change the owner of a sync job from ``root@pam``, or to repurpose a backup be to change the owner of a sync job from ``root@pam``, or to repurpose a
group. backup group.
.. _backup-pruning: .. _backup-pruning:
@ -589,24 +573,16 @@ group.
Pruning and Removing Backups Pruning and Removing Backups
---------------------------- ----------------------------
You can manually delete a backup snapshot using the ``forget`` command: You can manually delete a backup snapshot using the ``forget``
command:
.. code-block:: console .. code-block:: console
# proxmox-backup-client snapshot forget <snapshot> # proxmox-backup-client snapshot forget <snapshot>
.. caution:: This command removes all archives in this backup snapshot. They .. caution:: This command removes all archives in this backup
will be inaccessible and *unrecoverable*. snapshot. They will be inaccessible and unrecoverable.
Don't forget to add the namespace ``--ns`` parameter if you want to forget a
snapshot that is contained in the root namespace:
.. code-block:: console
# proxmox-backup-client snapshot forget <snapshot> --ns <ns>
Although manual removal is sometimes required, the ``prune`` Although manual removal is sometimes required, the ``prune``

View File

@ -1,333 +0,0 @@
.. _sysadmin_certificate_management:
Certificate Management
----------------------
Access to the API and thus the web-based administration interface is always
encrypted through ``https``. Each `Proxmox Backup`_ host creates by default its
own (self-signed) certificate. This certificate is used for encrypted
communication with the hosts ``proxmox-backup-proxy`` service, for any API
call between a user or backup-client and the web-interface.
Certificate verification when sending backups to a `Proxmox Backup`_ server
is either done based on pinning the certificate fingerprints in the storage/remote
configuration, or by using certificates, signed by a trusted certificate authority.
.. _sysadmin_certs_api_gui:
Certificates for the API and SMTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ stores it certificate and key in:
- ``/etc/proxmox-backup/proxy.pem``
- ``/etc/proxmox-backup/proxy.key``
You have the following options for the certificate:
1. Keep using the default self-signed certificate in
``/etc/proxmox-backup/proxy.pem``.
2. Use an externally provided certificate (for example, signed by a
commercial Certificate Authority (CA)).
3. Use an ACME provider like Lets Encrypt to get a trusted certificate
with automatic renewal; this is also integrated in the `Proxmox Backup`_
API and web interface.
Certificates are managed through the `Proxmox Backup`_
web-interface/API or using the the ``proxmox-backup-manager`` CLI tool.
.. _sysadmin_certs_upload_custom:
Upload Custom Certificate
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have a certificate which you want to use for a Proxmox
Mail Gateway host, you can simply upload that certificate over the web
interface.
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
:align: right
:alt: Upload a custom certificate
Note that any certificate key files must not be password protected.
.. _sysadmin_certs_get_trusted_acme_cert:
Trusted certificates via Lets Encrypt (ACME)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ includes an implementation of the **A**\ utomatic
**C**\ ertificate **M**\ anagement **E**\ nvironment (**ACME**)
protocol, allowing `Proxmox Backup`_ admins to use an ACME provider
like Lets Encrypt for easy setup of TLS certificates, which are
accepted and trusted by modern operating systems and web browsers out of
the box.
Currently, the two ACME endpoints implemented are the `Lets Encrypt
(LE) <https://letsencrypt.org>`_ production and staging environments.
Our ACME client supports validation of ``http-01`` challenges using a
built-in web server and validation of ``dns-01`` challenges using a DNS
plugin supporting all the DNS API endpoints
`acme.sh <https://acme.sh>`_ does.
.. _sysadmin_certs_acme_account:
ACME Account
^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-acme-create-account.png
:align: right
:alt: Create ACME Account
You need to register an ACME account per cluster, with the endpoint you
want to use. The email address used for that account will serve as the
contact point for renewal-due or similar notifications from the ACME
endpoint.
You can register or deactivate ACME accounts over the web interface
``Certificates -> ACME Accounts`` or using the ``proxmox-backup-manager`` command
line tool.
::
proxmox-backup-manager acme account register <account-name> <mail@example.com>
.. tip::
Because of
`rate-limits <https://letsencrypt.org/docs/rate-limits/>`_ you
should use LE ``staging`` for experiments or if you use ACME for the
very first time until all is working there, and only then switch over
to the production directory.
.. _sysadmin_certs_acme_plugins:
ACME Plugins
^^^^^^^^^^^^
The ACME plugins role is to provide automatic verification that you,
and thus the `Proxmox Backup`_ server under your operation, are the
real owner of a domain. This is the basic building block of automatic
certificate management.
The ACME protocol specifies different types of challenges, for example
the ``http-01``, where a web server provides a file with a specific
token to prove that it controls a domain. Sometimes this isnt possible,
either because of technical limitations or if the address of a record is
not reachable from the public internet. The ``dns-01`` challenge can be
used in such cases. This challenge is fulfilled by creating a certain
DNS record in the domains zone.
.. image:: images/screenshots/pbs-gui-acme-create-challenge-plugin.png
:align: right
:alt: Create ACME Account
`Proxmox Backup`_ supports both of those challenge types out of the
box, you can configure plugins either over the web interface under
``Certificates -> ACME Challenges``, or using the
``proxmox-backup-manager acme plugin add`` command.
ACME Plugin configurations are stored in ``/etc/proxmox-backup/acme/plugins.cfg``.
.. _domains:
Domains
^^^^^^^
You can add new or manage existing domain entries under
``Certificates``, or using the ``proxmox-backup-manager`` command.
.. image:: images/screenshots/pbs-gui-acme-add-domain.png
:align: right
:alt: Add a Domain for ACME verification
After configuring the desired domain(s) for a node and ensuring that the
desired ACME account is selected, you can order your new certificate
over the web-interface. On success, the interface will reload after
roughly 10 seconds.
Renewal will happen `automatically <#sysadmin-certs-acme-automatic-renewal>`_
.. _sysadmin_certs_acme_http_challenge:
ACME HTTP Challenge Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~
There is always an implicitly configured ``standalone`` plugin for
validating ``http-01`` challenges via the built-in web server spawned on
port 80.
.. note::
The name ``standalone`` means that it can provide the validation on
its own, without any third party service.
There are a few prerequisites to use this for certificate management
with Lets Encrypts ACME.
- You have to accept the ToS of Lets Encrypt to register an account.
- **Port 80** of the node needs to be reachable from the internet.
- There **must** be no other listener on port 80.
- The requested (sub)domain needs to resolve to a public IP of the
`Proxmox Backup`_ host.
.. _sysadmin_certs_acme_dns_challenge:
ACME DNS API Challenge Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On systems where external access for validation via the ``http-01``
method is not possible or desired, it is possible to use the ``dns-01``
validation method. This validation method requires a DNS server that
allows provisioning of ``TXT`` records via an API.
.. _sysadmin_certs_acme_dns_api_config:
Configuring ACME DNS APIs for validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`Proxmox Backup`_ re-uses the DNS plugins developed for the
``acme.sh`` [1]_ project. Please refer to its documentation for details
on configuration of specific APIs.
The easiest way to configure a new plugin with the DNS API is using the
web interface (``Certificates -> ACME Accounts/Challenges``).
Here you can add a new challenge plugin by selecting your API provider
and entering the credential data to access your account over their API.
.. tip::
See the acme.sh `How to use DNS
API <https://github.com/acmesh-official/acme.sh/wiki/dnsapi#how-to-use-dns-api>`_
wiki for more detailed information about getting API credentials for
your provider. Configuration values do not need to be quoted with
single or double quotes; for some plugins that is even an error.
As there are many DNS providers and API endpoints, `Proxmox Backup`_
automatically generates the form for the credentials, but not all
providers are annotated yet. For those you will see a bigger text area,
into which you simply need to copy all the credentials
``KEY``\ =\ ``VALUE`` pairs.
.. _dns_validation_through_cname_alias:
DNS Validation through CNAME Alias
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A special ``alias`` mode can be used to handle validation on a different
domain/DNS server, in case your primary/real DNS does not support
provisioning via an API. Manually set up a permanent ``CNAME`` record
for ``_acme-challenge.domain1.example`` pointing to
``_acme-challenge.domain2.example``, and set the ``alias`` property in
the `Proxmox Backup`_ node configuration file ``/etc/proxmox-backup/node.cfg``
to ``domain2.example`` to allow the DNS server of ``domain2.example`` to
validate all challenges for ``domain1.example``.
.. _sysadmin_certs_acme_dns_wildcard:
Wildcard Certificates
^^^^^^^^^^^^^^^^^^^^^
Wildcard DNS names start with a ``*.`` prefix and are considered valid
for all (one-level) subdomain names of the verified domain. So a
certificate for ``*.domain.example`` is valid for ``foo.domain.example``
and ``bar.domain.example``, but not for ``baz.foo.domain.example``.
Currently, you can only create wildcard certificates with the `DNS
challenge
type <https://letsencrypt.org/docs/challenge-types/#dns-01-challenge>`_.
.. _combination_of_plugins:
Combination of Plugins
^^^^^^^^^^^^^^^^^^^^^^
Combining ``http-01`` and ``dns-01`` validation is possible in case your
node is reachable via multiple domains with different requirements / DNS
provisioning capabilities. Mixing DNS APIs from multiple providers or
instances is also possible by specifying different plugin instances per
domain.
.. tip::
Accessing the same service over multiple domains increases complexity
and should be avoided if possible.
.. _sysadmin_certs_acme_automatic_renewal:
Automatic renewal of ACME certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If a node has been successfully configured with an ACME-provided
certificate (either via ``proxmox-backup-manager`` or via the web-interface/API), the
certificate will be renewed automatically by the ``proxmox-backup-daily-update.service``.
Currently, renewal is triggered if the certificate either has already
expired or if it will expire in the next 30 days.
.. _manually_change_certificate_over_command_line:
Manually Change Certificate over Command-Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to get rid of certificate verification warnings, you have to
generate a valid certificate for your server.
Log in to your `Proxmox Backup`_ via ssh or use the console:
::
openssl req -newkey rsa:2048 -nodes -keyout key.pem -out req.pem
Follow the instructions on the screen, for example:
::
Country Name (2 letter code) [AU]: AT
State or Province Name (full name) [Some-State]:Vienna
Locality Name (eg, city) []:Vienna
Organization Name (eg, company) [Internet Widgits Pty Ltd]: Proxmox GmbH
Organizational Unit Name (eg, section) []:Proxmox Backup
Common Name (eg, YOUR name) []: yourproxmox.yourdomain.com
Email Address []:support@yourdomain.com
Please enter the following 'extra' attributes to be sent with your certificate request
A challenge password []: not necessary
An optional company name []: not necessary
After you have finished the certificate request, you have to send the
file ``req.pem`` to your Certification Authority (CA). The CA will issue
the certificate (BASE64 encoded), based on your request save this file
as ``cert.pem`` to your `Proxmox Backup`_.
To activate the new certificate, do the following on your `Proxmox Backup`_
::
cp key.pem /etc/proxmox-backup/proxy.key
cp cert.pem /etc/proxmox-backup/proxy.pem
Then restart the API servers:
::
systemctl restart proxmox-backup-proxy
Test your new certificate, using your browser.
.. note::
To transfer files to and from your `Proxmox Backup`_, you can use
secure copy: If your desktop runs Linux, you can use the ``scp``
command line tool. If your desktop PC runs windows, please use an scp
client like WinSCP (see https://winscp.net/).
.. [1]
acme.sh https://github.com/acmesh-official/acme.sh

View File

@ -6,37 +6,22 @@ Command Line Tools
.. include:: proxmox-backup-client/description.rst .. include:: proxmox-backup-client/description.rst
``proxmox-file-restore``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-file-restore/description.rst
``proxmox-backup-manager`` ``proxmox-backup-manager``
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-backup-manager/description.rst .. include:: proxmox-backup-manager/description.rst
``proxmox-tape``
~~~~~~~~~~~~~~~~
.. include:: proxmox-tape/description.rst
``pmt``
~~~~~~~
.. include:: pmt/description.rst
``pmtx``
~~~~~~~~
.. include:: pmtx/description.rst
``pxar`` ``pxar``
~~~~~~~~ ~~~~~~~~
.. include:: pxar/description.rst .. include:: pxar/description.rst
``proxmox-file-restore``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. include:: proxmox-file-restore/description.rst
``proxmox-backup-debug`` ``proxmox-backup-debug``
~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~
.. include:: proxmox-backup-debug/description.rst .. include:: proxmox-backup-debug/description.rst

View File

@ -51,13 +51,3 @@ The following commands are available in an interactive restore shell:
-------- --------
.. include:: pxar/synopsis.rst .. include:: pxar/synopsis.rst
``proxmox-file-restore``
------------------------
.. include:: proxmox-file-restore/synopsis.rst
``proxmox-backup-debug``
------------------------
.. include:: proxmox-backup-debug/synopsis.rst

View File

@ -77,7 +77,7 @@ project = 'Proxmox Backup'
copyright = '2019-2021, Proxmox Server Solutions GmbH' copyright = '2019-2021, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team' author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as a replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
@ -108,14 +108,11 @@ today_fmt = '%A, %d %B %Y'
exclude_patterns = [ exclude_patterns = [
'_build', 'Thumbs.db', '.DS_Store', '_build', 'Thumbs.db', '.DS_Store',
'*/man1.rst', '*/man1.rst',
'certificate-management.rst',
'config/*/man5.rst', 'config/*/man5.rst',
'epilog.rst', 'epilog.rst',
'pbs-copyright.rst', 'pbs-copyright.rst',
'local-zfs.rst', 'local-zfs.rst'
'package-repositories.rst', 'package-repositories.rst',
'system-booting.rst',
'traffic-control.rst',
] ]
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all

View File

@ -35,7 +35,7 @@
.. _ZFS: https://en.wikipedia.org/wiki/ZFS .. _ZFS: https://en.wikipedia.org/wiki/ZFS
.. _Proxmox VE: https://pve.proxmox.com .. _Proxmox VE: https://pve.proxmox.com
.. _RFC3339: https://tools.ietf.org/html/rfc3339 .. _RFC3399: https://tools.ietf.org/html/rfc3339
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date .. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date

View File

@ -29,7 +29,7 @@ How long will my Proxmox Backup Server version be supported?
+=======================+======================+===============+============+====================+ +=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba | |Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+ +-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 | |Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
+-----------------------+----------------------+---------------+------------+--------------------+ +-----------------------+----------------------+---------------+------------+--------------------+

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 174 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 132 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -50,7 +50,6 @@ in the section entitled "GNU Free Documentation License".
file-formats.rst file-formats.rst
backup-protocol.rst backup-protocol.rst
calendarevents.rst calendarevents.rst
markdown-primer.rst
glossary.rst glossary.rst
GFDL.rst GFDL.rst

View File

@ -5,11 +5,10 @@ What is Proxmox Backup Server?
------------------------------ ------------------------------
Proxmox Backup Server is an enterprise-class, client-server backup solution that Proxmox Backup Server is an enterprise-class, client-server backup solution that
is capable of backing up :term:`virtual machine<Virtual machine>`\ s, is capable of backing up :term:`virtual machine`\ s, :term:`container`\ s, and
:term:`container<Container>`\ s, and physical hosts. It is specially optimized physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
for the `Proxmox Virtual Environment`_ platform and allows you to back up your platform and allows you to back up your data securely, even between remote
data securely, even between remote sites, providing easy management through a sites, providing easy management through a web-based user interface.
web-based user interface.
It supports deduplication, compression, and authenticated It supports deduplication, compression, and authenticated
encryption (AE_). Using :term:`Rust` as the implementation language guarantees encryption (AE_). Using :term:`Rust` as the implementation language guarantees
@ -35,18 +34,18 @@ For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
integrated client. integrated client.
A single backup is allowed to contain several archives. For example, when you A single backup is allowed to contain several archives. For example, when you
backup a :term:`virtual machine<Virtual machine>`, each disk is stored as a backup a :term:`virtual machine`, each disk is stored as a separate archive
separate archive inside that backup. The VM configuration itself is stored as inside that backup. The VM configuration itself is stored as an extra file.
an extra file. This way, it's easy to access and restore only the important This way, it's easy to access and restore only the important parts of the
parts of the backup, without the need to scan the whole backup. backup, without the need to scan the whole backup.
Main Features Main Features
------------- -------------
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully :Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
supported, and you can easily backup :term:`virtual machine<Virtual machine>`\ s and supported, and you can easily backup :term:`virtual machine`\ s and
:term:`container<Container>`\ s. :term:`container`\ s.
:Performance: The whole software stack is written in :term:`Rust`, :Performance: The whole software stack is written in :term:`Rust`,
in order to provide high speed and memory efficiency. in order to provide high speed and memory efficiency.

View File

@ -191,12 +191,12 @@ With `systemd-boot`:
.. code-block:: console .. code-block:: console
# proxmox-boot-tool format <new ESP> # pve-efiboot-tool format <new disk's ESP>
# proxmox-boot-tool init <new ESP> # pve-efiboot-tool init <new disk's ESP>
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on .. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
bootable disks setup by the `Proxmox Backup`_ installer. For details, see bootable disks setup by the {pve} installer since version 5.4. For details, see
:ref:`Setting up a new partition for use as synced ESP <systembooting-proxmox-boot-setup>`. xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
With `grub`: With `grub`:
@ -211,22 +211,27 @@ Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
Activate e-mail notification Activate e-mail notification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ZFS comes with an event daemon ``ZED``, which monitors events generated by the ZFS comes with an event daemon, which monitors events generated by the
ZFS kernel module. The daemon can also send emails on ZFS events like pool ZFS kernel module. The daemon can also send emails on ZFS events like
errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``, pool errors. Newer ZFS packages ship the daemon in a separate package,
which should already be installed by default in `Proxmox Backup`_. and you can install it using `apt-get`:
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc`` with your .. code-block:: console
favorite editor. The required setting for email notification is
``ZED_EMAIL_ADDR``, which is set to ``root`` by default. # apt-get install zfs-zed
To activate the daemon, it is necessary to to uncomment the ZED_EMAIL_ADDR
setting, in the file `/etc/zfs/zed.d/zed.rc`.
.. code-block:: console .. code-block:: console
ZED_EMAIL_ADDR="root" ZED_EMAIL_ADDR="root"
Please note that `Proxmox Backup`_ forwards mails to `root` to the email address Please note that Proxmox Backup forwards mails to `root` to the email address
configured for the root user. configured for the root user.
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
other settings are optional.
Limit ZFS memory usage Limit ZFS memory usage
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
@ -249,7 +254,6 @@ The above example limits the usage to 8 GiB ('8 * 2^30^').
configuration in `/etc/modprobe.d/zfs.conf`, with: configuration in `/etc/modprobe.d/zfs.conf`, with:
.. code-block:: console .. code-block:: console
options zfs zfs_arc_min=8589934591 options zfs zfs_arc_min=8589934591
options zfs zfs_arc_max=8589934592 options zfs zfs_arc_max=8589934592
@ -269,7 +273,8 @@ Swap on ZFS
^^^^^^^^^^^ ^^^^^^^^^^^
Swap-space created on a zvol may cause some issues, such as blocking the Swap-space created on a zvol may cause some issues, such as blocking the
server or generating a high IO load. server or generating a high IO load, often seen when starting a Backup
to an external Storage.
We strongly recommend using enough memory, so that you normally do not We strongly recommend using enough memory, so that you normally do not
run into low memory situations. Should you need or want to add swap, it is run into low memory situations. Should you need or want to add swap, it is
@ -306,20 +311,18 @@ ZFS compression
^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
To activate compression: To activate compression:
.. code-block:: console .. code-block:: console
# zpool set compression=lz4 <pool> # zpool set compression=lz4 <pool>
We recommend using the `lz4` algorithm, since it adds very little CPU overhead. We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
Other algorithms such as `lzjb`, `zstd` and `gzip-N` (where `N` is an integer from `1-9` Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer from `1-9`
representing the compression ratio, where 1 is fastest and 9 is best representing the compression ratio, where 1 is fastest and 9 is best
compression) are also available. Depending on the algorithm and how compression) are also available. Depending on the algorithm and how
compressible the data is, having compression enabled can even increase I/O compressible the data is, having compression enabled can even increase I/O
performance. performance.
You can disable compression at any time with: You can disable compression at any time with:
.. code-block:: console .. code-block:: console
# zfs set compression=off <dataset> # zfs set compression=off <dataset>

View File

@ -173,10 +173,6 @@ scheduled verification, garbage-collection and synchronization tasks results.
By default, notifications are sent to the email address configured for the By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore. `root@pam` user. You can instead set this user for each datastore.
.. image:: images/screenshots/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
You can also change the level of notification received per task type, the You can also change the level of notification received per task type, the
following options are available: following options are available:
@ -186,20 +182,3 @@ following options are available:
* Errors: send a notification for any scheduled task that results in an error * Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all * Never: do not send any notification at all
.. _maintenance_mode:
Maintenance Mode
----------------
Proxmox Backup Server implements setting the `read-only` and `offline`
maintenance modes for a datastore.
Once enabled, depending on the mode, new reads and/or writes to the datastore
are blocked, allowing an administrator to safely execute maintenance tasks, for
example, on the underlying storage.
Internally Proxmox Backup Server tracks whether each datastore access is a
write or read operation, so that it can gracefully enter the respective mode,
by allowing conflicting operations that started before enabling the maintenance
mode to finish.

View File

@ -1,5 +1,5 @@
Managing Remotes & Sync Managing Remotes
======================= ================
.. _backup_remote: .. _backup_remote:
@ -107,7 +107,6 @@ of the specified criteria are synced. The available criteria are:
# proxmox-backup-manager sync-job update ID --group-filter group:vm/100 # proxmox-backup-manager sync-job update ID --group-filter group:vm/100
* regular expression matched against the full group identifier * regular expression matched against the full group identifier
.. todo:: add example for regex .. todo:: add example for regex
The same filter is applied to local groups for handling of the The same filter is applied to local groups for handling of the
@ -115,93 +114,12 @@ The same filter is applied to local groups for handling of the
.. note:: The ``protected`` flag of remote backup snapshots will not be synced. .. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Namespace Support
^^^^^^^^^^^^^^^^^
Sync jobs can be configured to not only sync datastores, but also sub-sets of
datastores in the form of namespaces or namespace sub-trees. The following
parameters influence how namespaces are treated as part of a sync job
execution:
- ``remote-ns``: the remote namespace anchor (default: the root namespace)
- ``ns``: the local namespace anchor (default: the root namespace)
- ``max-depth``: whether to recursively iterate over sub-namespaces of the remote
namespace anchor (default: `None`)
If ``max-depth`` is set to `0`, groups are synced from ``remote-ns`` into
``ns``, without any recursion. If it is set to `None` (left empty), recursion
depth will depend on the value of ``remote-ns`` and the remote side's
availability of namespace support:
- ``remote-ns`` set to something other than the root namespace: remote *must*
support namespaces, full recursion starting at ``remote-ns``.
- ``remote-ns`` set to root namespace and remote *supports* namespaces: full
recursion starting at root namespace.
- ``remote-ns`` set to root namespace and remote *does not support* namespaces:
backwards-compat mode, only root namespace will be synced into ``ns``, no
recursion.
Any other value of ``max-depth`` will limit recursion to at most ``max-depth``
levels, for example: ``remote-ns`` set to `location_a/department_b` and
``max-depth`` set to `1` will result in `location_a/department_b` and at most
one more level of sub-namespaces being synced.
The namespace tree starting at ``remote-ns`` will be mapped into ``ns`` up to a
depth of ``max-depth``.
For example, with the following namespaces at the remote side:
- `location_a`
- `location_a/department_x`
- `location_a/department_x/team_one`
- `location_a/department_x/team_two`
- `location_a/department_y`
- `location_a/department_y/team_one`
- `location_a/department_y/team_two`
- `location_b`
and ``remote-ns`` being set to `location_a/department_x` and ``ns`` set to
`location_a_dep_x` resulting in the following namespace tree on the sync
target:
- `location_a_dep_x` (containing the remote's `location_a/department_x`)
- `location_a_dep_x/team_one` (containing the remote's `location_a/department_x/team_one`)
- `location_a_dep_x/team_two` (containing the remote's `location_a/department_x/team_two`)
with the rest of the remote namespaces and groups not being synced (by this
sync job).
If a remote namespace is included in the sync job scope, but does not exist
locally, it will be created (provided the sync job owner has sufficient
privileges).
If the ``remove-vanished`` option is set, namespaces that are included in the
sync job scope but only exist locally are treated as vanished and removed
(provided the sync job owner has sufficient privileges).
.. note:: All other limitations on sync scope (such as remote user/API token
privileges, group filters) also apply for sync jobs involving one or
multiple namespaces.
Bandwidth Limit Bandwidth Limit
^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
Syncing a datastore to an archive can produce lots of traffic and impact other Syncing datastores to an archive can produce lots of traffic and impact other
users of the network. So, to avoid network or storage congestion you can limit users of the network. So, to avoid network or storage congetsion you can limit
the bandwidth of the sync job by setting the ``rate-in`` option either in the the bandwith of the sync job by setting the ``rate-in`` option either in the
web interface or using the ``proxmox-backup-manager`` command-line tool: web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console .. code-block:: console

View File

@ -1,178 +0,0 @@
.. _markdown-primer:
Markdown Primer
===============
"Markdown is a text-to-HTML conversion tool for web writers. Markdown allows
you to write using an easy-to-read, easy-to-write plain text format, then
convertit to structurally valid XHTML (or HTML)."
-- John Gruber, https://daringfireball.net/projects/markdown/
The Proxmox Backup Server (PBS) web-interface has support for using Markdown to
rendering rich text formatting in node and virtual guest notes.
PBS supports CommonMark with most extensions of GFM (GitHub Flavoured Markdown),
like tables or task-lists.
.. _markdown_basics:
Markdown Basics
---------------
Note that we only describe the basics here, please search the web for more
extensive resources, for example on https://www.markdownguide.org/
Headings
~~~~~~~~
.. code-block:: md
# This is a Heading h1
## This is a Heading h2
##### This is a Heading h5
Emphasis
~~~~~~~~
Use ``*text*`` or ``_text_`` for emphasis.
Use ``**text**`` or ``__text__`` for bold, heavy-weight text.
Combinations are also possible, for example:
.. code-block:: md
_You **can** combine them_
Links
~~~~~
You can use automatic detection of links, for example,
``https://forum.proxmox.com/`` would transform it into a clickable link.
You can also control the link text, for example:
.. code-block:: md
Now, [the part in brackets will be the link text](https://forum.proxmox.com/).
Lists
~~~~~
Unordered Lists
^^^^^^^^^^^^^^^
Use ``*`` or ``-`` for unordered lists, for example:
.. code-block:: md
* Item 1
* Item 2
* Item 2a
* Item 2b
Adding an indentation can be used to created nested lists.
Ordered Lists
^^^^^^^^^^^^^
.. code-block:: md
1. Item 1
1. Item 2
1. Item 3
1. Item 3a
1. Item 3b
NOTE: The integer of ordered lists does not need to be correct, they will be numbered automatically.
Task Lists
^^^^^^^^^^
Task list use a empty box ``[ ]`` for unfinished tasks and a box with an `X` for finished tasks.
For example:
.. code-block:: md
- [X] First task already done!
- [X] Second one too
- [ ] This one is still to-do
- [ ] So is this one
Tables
~~~~~~
Tables use the pipe symbol ``|`` to separate columns, and ``-`` to separate the
table header from the table body, in that separation one can also set the text
alignment, making one column left-, center-, or right-aligned.
.. code-block:: md
| Left columns | Right columns | Some | More | Cols.| Centering Works Too
| ------------- |--------------:|--------|------|------|:------------------:|
| left foo | right foo | First | Row | Here | >center< |
| left bar | right bar | Second | Row | Here | 12345 |
| left baz | right baz | Third | Row | Here | Test |
| left zab | right zab | Fourth | Row | Here | ☁️☁️☁️ |
| left rab | right rab | And | Last | Here | The End |
Note that you do not need to align the columns nicely with white space, but that makes
editing tables easier.
Block Quotes
~~~~~~~~~~~~
You can enter block quotes by prefixing a line with ``>``, similar as in plain-text emails.
.. code-block:: md
> Markdown is a lightweight markup language with plain-text-formatting syntax,
> created in 2004 by John Gruber with Aaron Swartz.
>
>> Markdown is often used to format readme files, for writing messages in online discussion forums,
>> and to create rich text using a plain text editor.
Code and Snippets
~~~~~~~~~~~~~~~~~
You can use backticks to avoid processing for a few word or paragraphs. That is useful for
avoiding that a code or configuration hunk gets mistakenly interpreted as markdown.
Inline code
^^^^^^^^^^^
Surrounding part of a line with single backticks allows to write code inline,
for examples:
.. code-block:: md
This hosts IP address is `10.0.0.1`.
Whole blocks of code
^^^^^^^^^^^^^^^^^^^^
For code blocks spanning several lines you can use triple-backticks to start
and end such a block, for example:
.. code-block:: md
```
# This is the network config I want to remember here
auto vmbr2
iface vmbr2 inet static
address 10.0.0.1/24
bridge-ports ens20
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 2-4094
```

View File

@ -3,10 +3,6 @@
Network Management Network Management
================== ==================
.. image:: images/screenshots/pbs-gui-system-config.png
:align: right
:alt: System and Network Configuration Overview
Proxmox Backup Server provides both a web interface and a command line tool for Proxmox Backup Server provides both a web interface and a command line tool for
network configuration. You can find the configuration options in the web network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu interface under the **Network Interfaces** section of the **Configuration** menu
@ -35,6 +31,10 @@ To get a list of available interfaces, use the following command:
│ ens19 │ eth │ 1 │ manual │ │ │ │ │ ens19 │ eth │ 1 │ manual │ │ │ │
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘ └───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:align: right
:alt: Add a network interface
To add a new network interface, use the ``create`` subcommand with the relevant To add a new network interface, use the ``create`` subcommand with the relevant
parameters. For example, you may want to set up a bond, for the purpose of parameters. For example, you may want to set up a bond, for the purpose of
network redundancy. The following command shows a template for creating the bond shown network redundancy. The following command shows a template for creating the bond shown
@ -44,10 +44,6 @@ in the list above:
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x # proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:align: right
:alt: Add a network interface
You can make changes to the configuration of a network interface with the You can make changes to the configuration of a network interface with the
``update`` subcommand: ``update`` subcommand:

View File

@ -27,10 +27,6 @@ update``.
In addition, you need a package repository from Proxmox to get Proxmox Backup In addition, you need a package repository from Proxmox to get Proxmox Backup
updates. updates.
.. image:: images/screenshots/pbs-gui-administration-apt-repos.png
:align: right
:alt: APT Repository Management in the Web Interface
.. _package_repos_secure_apt: .. _package_repos_secure_apt:
SecureApt SecureApt

View File

@ -51,7 +51,7 @@ ENVIRONMENT
:CHANGER: If set, replaces the `--device` option :CHANGER: If set, replaces the `--device` option
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server :PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
configuration to find the associated changer device. configuration to find the associcated changer device.
.. include:: ../pbs-copyright.rst .. include:: ../pbs-copyright.rst

View File

@ -11,13 +11,8 @@ Disk Management
:alt: List of disks :alt: List of disks
Proxmox Backup Server comes with a set of disk utilities, which are Proxmox Backup Server comes with a set of disk utilities, which are
accessed using the ``disk`` subcommand or the web interface. This subcommand accessed using the ``disk`` subcommand. This subcommand allows you to initialize
allows you to initialize disks, create various filesystems, and get information disks, create various filesystems, and get information about the disks.
about the disks.
.. image:: images/screenshots/pbs-gui-disks.png
:align: right
:alt: Web Interface Administration: Disks
To view the disks connected to the system, navigate to **Administration -> To view the disks connected to the system, navigate to **Administration ->
Storage/Disks** in the web interface or use the ``list`` subcommand of Storage/Disks** in the web interface or use the ``list`` subcommand of
@ -95,10 +90,6 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
:term:`Datastore` :term:`Datastore`
----------------- -----------------
.. image:: images/screenshots/pbs-gui-datastore-summary.png
:align: right
:alt: Datastore Usage Overview
A datastore refers to a location at which backups are stored. The current A datastore refers to a location at which backups are stored. The current
implementation uses a directory inside a standard Unix file system (``ext4``, implementation uses a directory inside a standard Unix file system (``ext4``,
``xfs`` or ``zfs``) to store the backup data. ``xfs`` or ``zfs``) to store the backup data.
@ -120,7 +111,7 @@ Datastore Configuration
.. image:: images/screenshots/pbs-gui-datastore-content.png .. image:: images/screenshots/pbs-gui-datastore-content.png
:align: right :align: right
:alt: Datastore Content Overview :alt: Datastore Overview
You can configure multiple datastores. A minimum of one datastore needs to be You can configure multiple datastores. A minimum of one datastore needs to be
configured. The datastore is identified by a simple *name* and points to a configured. The datastore is identified by a simple *name* and points to a
@ -137,7 +128,7 @@ run periodically, based on a configured schedule (see
Creating a Datastore Creating a Datastore
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create.png .. image:: images/screenshots/pbs-gui-datastore-create-general.png
:align: right :align: right
:alt: Create a datastore :alt: Create a datastore
@ -261,57 +252,3 @@ categorized by checksum, after a backup operation has been executed.
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 . 276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
Once you uploaded some backups, or created namespaces, you may see the Backup
Type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
.. _storage_namespaces:
Backup Namespaces
~~~~~~~~~~~~~~~~~
A datastore can host many backups as long as the underlying storage is big
enough and provides the performance required for one's use case.
But, without any hierarchy or separation its easy to run into naming conflicts,
especially when using the same datastore for multiple Proxmox VE instances or
multiple users.
The backup namespace hierarchy allows you to clearly separate different users
or backup sources in general, avoiding naming conflicts and providing
well-organized backup content view.
Each namespace level can host any backup type, CT, VM or Host but also other
namespaces, up to a depth of 8 level, where the root namespace is the first
level.
Namespace Permissions
^^^^^^^^^^^^^^^^^^^^^
You can make the permission configuration of a datastore more fine-grained by
setting permissions only on a specific namespace.
To see a datastore you need permission that has at least one of `AUDIT`,
`MODIFY`, `READ` or `BACKUP` privilege on any namespace it contains.
To create or delete a namespace you require the modify privilege on the parent
namespace. So, to initially create namespaces you need to have a permission
with a access role that includes the `MODIFY` privilege on the datastore itself.
For backup groups the existing privilege rules still apply, you either need a
powerful permission or be the owner of the backup group, nothing changed here.
.. todo:: continue
Options
~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
There are a few per-datastore options:
* :ref:`Notifications <maintenance_notification>`
* :ref:`Maintenance Mode <maintenance_mode>`
* Verification of incoming backups

View File

@ -15,8 +15,10 @@ through that channel. In addition, we provide our own package
repository to roll out all Proxmox related packages. This includes repository to roll out all Proxmox related packages. This includes
updates to some Debian packages when necessary. updates to some Debian packages when necessary.
We also deliver a specially optimized Linux kernel, based on the Ubuntu We also deliver a specially optimized Linux kernel, where we enable
kernel. That kernel includes drivers for ZFS_. all required virtualization and container features. That kernel
includes drivers for ZFS_, as well as several hardware drivers. For example,
we ship Intel network card drivers to support their newest hardware.
The following sections will concentrate on backup related topics. They The following sections will concentrate on backup related topics. They
will explain things which are different on `Proxmox Backup`_, or will explain things which are different on `Proxmox Backup`_, or
@ -26,10 +28,4 @@ please refer to the standard Debian documentation.
.. include:: local-zfs.rst .. include:: local-zfs.rst
.. include:: system-booting.rst
.. include:: certificate-management.rst
.. include:: services.rst .. include:: services.rst
.. include:: command-line-tools.rst

View File

@ -1,379 +0,0 @@
.. _chapter-systembooting:
Host Bootloader
---------------
`Proxmox Backup`_ currently uses one of two bootloaders depending on the disk setup
selected in the installer.
For EFI Systems installed with ZFS as the root filesystem ``systemd-boot`` is
used. All other deployments use the standard ``grub`` bootloader (this usually
also applies to systems which are installed on top of Debian).
.. _systembooting-installer-part-scheme:
Partitioning Scheme Used by the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The `Proxmox Backup`_ installer creates 3 partitions on all disks selected for
installation.
The created partitions are:
* a 1 MB BIOS Boot Partition (gdisk type EF02)
* a 512 MB EFI System Partition (ESP, gdisk type EF00)
* a third partition spanning the set ``hdsize`` parameter or the remaining space
used for the chosen storage type
Systems using ZFS as root filesystem are booted with a kernel and initrd image
stored on the 512 MB EFI System Partition. For legacy BIOS systems, ``grub`` is
used, for EFI systems ``systemd-boot`` is used. Both are installed and configured
to point to the ESPs.
``grub`` in BIOS mode (``--target i386-pc``) is installed onto the BIOS Boot
Partition of all selected disks on all systems booted with ``grub`` (These are
all installs with root on ``ext4`` or ``xfs`` and installs with root on ZFS on
non-EFI systems).
.. _systembooting-proxmox-boot-tool:
Synchronizing the content of the ESP with ``proxmox-boot-tool``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``proxmox-boot-tool`` is a utility used to keep the contents of the EFI System
Partitions properly configured and synchronized. It copies certain kernel
versions to all ESPs and configures the respective bootloader to boot from
the ``vfat`` formatted ESPs. In the context of ZFS as root filesystem this means
that you can use all optional features on your root pool instead of the subset
which is also present in the ZFS implementation in ``grub`` or having to create a
separate small boot-pool (see: `Booting ZFS on root with grub
<https://github.com/zfsonlinux/zfs/wiki/Debian-Stretch-Root-on-ZFS>`_).
In setups with redundancy all disks are partitioned with an ESP, by the
installer. This ensures the system boots even if the first boot device fails
or if the BIOS can only boot from a particular disk.
The ESPs are not kept mounted during regular operation. This helps to prevent
filesystem corruption to the ``vfat`` formatted ESPs in case of a system crash,
and removes the need to manually adapt ``/etc/fstab`` in case the primary boot
device fails.
``proxmox-boot-tool`` handles the following tasks:
* formatting and setting up a new partition
* copying and configuring new kernel images and initrd images to all listed ESPs
* synchronizing the configuration on kernel upgrades and other maintenance tasks
* managing the list of kernel versions which are synchronized
* configuring the boot-loader to boot a particular kernel version (pinning)
You can view the currently configured ESPs and their state by running:
.. code-block:: console
# proxmox-boot-tool status
.. _systembooting-proxmox-boot-setup:
Setting up a new partition for use as synced ESP
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To format and initialize a partition as synced ESP, e.g., after replacing a
failed vdev in an rpool, ``proxmox-boot-tool`` from ``pve-kernel-helper`` can be used.
WARNING: the ``format`` command will format the ``<partition>``, make sure to pass
in the right device/partition!
For example, to format an empty partition ``/dev/sda2`` as ESP, run the following:
.. code-block:: console
# proxmox-boot-tool format /dev/sda2
To setup an existing, unmounted ESP located on ``/dev/sda2`` for inclusion in
`Proxmox Backup`_'s kernel update synchronization mechanism, use the following:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2
Afterwards `/etc/kernel/proxmox-boot-uuids`` should contain a new line with the
UUID of the newly added partition. The ``init`` command will also automatically
trigger a refresh of all configured ESPs.
.. _systembooting-proxmox-boot-refresh:
Updating the configuration on all ESPs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To copy and configure all bootable kernels and keep all ESPs listed in
``/etc/kernel/proxmox-boot-uuids`` in sync you just need to run:
.. code-block:: console
# proxmox-boot-tool refresh
(The equivalent to running ``update-grub`` systems with ``ext4`` or ``xfs`` on root).
This is necessary should you make changes to the kernel commandline, or want to
sync all kernels and initrds.
.. NOTE:: Both ``update-initramfs`` and ``apt`` (when necessary) will automatically
trigger a refresh.
Kernel Versions considered by ``proxmox-boot-tool``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following kernel versions are configured by default:
* the currently running kernel
* the version being newly installed on package updates
* the two latest already installed kernels
* the latest version of the second-to-last kernel series (e.g. 5.0, 5.3), if applicable
* any manually selected kernels
Manually keeping a kernel bootable
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Should you wish to add a certain kernel and initrd image to the list of
bootable kernels use ``proxmox-boot-tool kernel add``.
For example run the following to add the kernel with ABI version ``5.0.15-1-pve``
to the list of kernels to keep installed and synced to all ESPs:
.. code-block:: console
# proxmox-boot-tool kernel add 5.0.15-1-pve
``proxmox-boot-tool kernel list`` will list all kernel versions currently selected
for booting:
.. code-block:: console
# proxmox-boot-tool kernel list
Manually selected kernels:
5.0.15-1-pve
Automatically selected kernels:
5.0.12-1-pve
4.15.18-18-pve
Run ``proxmox-boot-tool kernel remove`` to remove a kernel from the list of
manually selected kernels, for example:
.. code-block:: console
# proxmox-boot-tool kernel remove 5.0.15-1-pve
.. NOTE:: It's required to run ``proxmox-boot-tool refresh`` to update all EFI System
Partitions (ESPs) after a manual kernel addition or removal from above.
.. _systembooting-determine-bootloader:
Determine which Bootloader is Used
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/boot-grub.png
:target: _images/boot-grub.png
:align: left
:alt: Grub boot screen
The simplest and most reliable way to determine which bootloader is used, is to
watch the boot process of the `Proxmox Backup`_ node.
You will either see the blue box of ``grub`` or the simple black on white
``systemd-boot``.
.. image:: images/screenshots/boot-systemdboot.png
:target: _images/boot-systemdboot.png
:align: right
:alt: systemd-boot screen
Determining the bootloader from a running system might not be 100% accurate. The
safest way is to run the following command:
.. code-block:: console
# efibootmgr -v
If it returns a message that EFI variables are not supported, ``grub`` is used in
BIOS/Legacy mode.
If the output contains a line that looks similar to the following, ``grub`` is
used in UEFI mode.
.. code-block:: console
Boot0005* proxmox [...] File(\EFI\proxmox\grubx64.efi)
If the output contains a line similar to the following, ``systemd-boot`` is used.
.. code-block:: console
Boot0006* Linux Boot Manager [...] File(\EFI\systemd\systemd-bootx64.efi)
By running:
.. code-block:: console
# proxmox-boot-tool status
you can find out if ``proxmox-boot-tool`` is configured, which is a good
indication of how the system is booted.
.. _systembooting-grub:
Grub
~~~~
``grub`` has been the de-facto standard for booting Linux systems for many years
and is quite well documented
(see the `Grub Manual
<https://www.gnu.org/software/grub/manual/grub/grub.html>`_).
Configuration
^^^^^^^^^^^^^
Changes to the ``grub`` configuration are done via the defaults file
``/etc/default/grub`` or config snippets in ``/etc/default/grub.d``. To regenerate
the configuration file after a change to the configuration run:
.. code-block:: console
# update-grub
.. NOTE:: Systems using ``proxmox-boot-tool`` will call
``proxmox-boot-tool refresh`` upon ``update-grub``
.. _systembooting-systemdboot:
Systemd-boot
~~~~~~~~~~~~
``systemd-boot`` is a lightweight EFI bootloader. It reads the kernel and initrd
images directly from the EFI Service Partition (ESP) where it is installed.
The main advantage of directly loading the kernel from the ESP is that it does
not need to reimplement the drivers for accessing the storage. In `Proxmox
Backup`_ :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
keep the configuration on the ESPs synchronized.
.. _systembooting-systemd-boot-config:
Configuration
^^^^^^^^^^^^^
``systemd-boot`` is configured via the file ``loader/loader.conf`` in the root
directory of an EFI System Partition (ESP). See the ``loader.conf(5)`` manpage
for details.
Each bootloader entry is placed in a file of its own in the directory
``loader/entries/``
An example entry.conf looks like this (``/`` refers to the root of the ESP):
.. code-block:: console
title Proxmox
version 5.0.15-1-pve
options root=ZFS=rpool/ROOT/pve-1 boot=zfs
linux /EFI/proxmox/5.0.15-1-pve/vmlinuz-5.0.15-1-pve
initrd /EFI/proxmox/5.0.15-1-pve/initrd.img-5.0.15-1-pve
.. _systembooting-edit-kernel-cmdline:
Editing the Kernel Commandline
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can modify the kernel commandline in the following places, depending on the
bootloader used:
Grub
^^^^
The kernel commandline needs to be placed in the variable
``GRUB_CMDLINE_LINUX_DEFAULT`` in the file ``/etc/default/grub``. Running
``update-grub`` appends its content to all ``linux`` entries in
``/boot/grub/grub.cfg``.
Systemd-boot
^^^^^^^^^^^^
The kernel commandline needs to be placed as one line in ``/etc/kernel/cmdline``.
To apply your changes, run ``proxmox-boot-tool refresh``, which sets it as the
``option`` line for all config files in ``loader/entries/proxmox-*.conf``.
.. _systembooting-kernel-pin:
Override the Kernel-Version for next Boot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To select a kernel that is not currently the default kernel, you can either:
* use the boot loader menu that is displayed at the beginning of the boot
process
* use the ``proxmox-boot-tool`` to ``pin`` the system to a kernel version either
once or permanently (until pin is reset).
This should help you work around incompatibilities between a newer kernel
version and the hardware.
.. NOTE:: Such a pin should be removed as soon as possible so that all current
security patches of the latest kernel are also applied to the system.
For example: To permanently select the version ``5.15.30-1-pve`` for booting you
would run:
.. code-block:: console
# proxmox-boot-tool kernel pin 5.15.30-1-pve
.. TIP:: The pinning functionality works for all `Proxmox Backup`_ systems, not only those using
``proxmox-boot-tool`` to synchronize the contents of the ESPs, if your system
does not use ``proxmox-boot-tool`` for synchronizing you can also skip the
``proxmox-boot-tool refresh`` call in the end.
You can also set a kernel version to be booted on the next system boot only.
This is for example useful to test if an updated kernel has resolved an issue,
which caused you to ``pin`` a version in the first place:
.. code-block:: console
# proxmox-boot-tool kernel pin 5.15.30-1-pve --next-boot
To remove any pinned version configuration use the ``unpin`` subcommand:
.. code-block:: console
# proxmox-boot-tool kernel unpin
While ``unpin`` has a ``--next-boot`` option as well, it is used to clear a pinned
version set with ``--next-boot``. As that happens already automatically on boot,
invonking it manually is of little use.
After setting, or clearing pinned versions you also need to synchronize the
content and configuration on the ESPs by running the ``refresh`` subcommand.
.. TIP:: You will be prompted to automatically do for ``proxmox-boot-tool`` managed
systems if you call the tool interactively.
.. code-block:: console
# proxmox-boot-tool refresh

View File

@ -500,7 +500,7 @@ a single media pool, so a job only uses tapes from that pool.
is less space efficient, because the media from the last set is less space efficient, because the media from the last set
may not be fully written, leaving the remaining space unused. may not be fully written, leaving the remaining space unused.
The advantage is that this produces media sets of minimal The advantage is that this procudes media sets of minimal
size. Small sets are easier to handle, can be moved more conveniently size. Small sets are easier to handle, can be moved more conveniently
to an off-site vault, and can be restored much faster. to an off-site vault, and can be restored much faster.
@ -519,9 +519,8 @@ a single media pool, so a job only uses tapes from that pool.
This balances between space efficiency and media count. This balances between space efficiency and media count.
.. NOTE:: Retention period starts on the creation time of the next .. NOTE:: Retention period starts when the calendar event
media-set or, if that does not exist, when the calendar event triggers.
triggers the next time after the current media-set start time.
Additionally, the following events may allocate a new media set: Additionally, the following events may allocate a new media set:
@ -565,6 +564,13 @@ a single media pool, so a job only uses tapes from that pool.
the password. Please make sure to remember the password, in case the password. Please make sure to remember the password, in case
you need to restore the key. you need to restore the key.
.. NOTE:: We use global content namespace, meaning we do not store the
source datastore name. Because of this, it is impossible to distinguish
store1:/vm/100 from store2:/vm/100. Please use different media pools
if the sources are from different namespaces with conflicting names
(for example, if the sources are from different Proxmox VE clusters).
.. image:: images/screenshots/pbs-gui-tape-pools-add.png .. image:: images/screenshots/pbs-gui-tape-pools-add.png
:align: right :align: right
:alt: Tape Backup: Add a media pool :alt: Tape Backup: Add a media pool
@ -681,16 +687,6 @@ To remove a job, please use:
# proxmox-tape backup-job remove job2 # proxmox-tape backup-job remove job2
By default, all (recursive) namespaces of the datastore are included in a tape
backup. You can specify a single namespace with ``ns`` and a depth with
``max-depth``. For example:
.. code-block:: console
# proxmox-tape backup-job update job2 --ns mynamespace --max-depth 3
If no `max-depth` is given, it will include all recursive namespaces.
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png .. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
:align: right :align: right
:alt: Tape Backup: Add a backup job :alt: Tape Backup: Add a backup job
@ -807,16 +803,6 @@ The following options are available:
media set into import-export slots. The operator can then pick up media set into import-export slots. The operator can then pick up
those tapes and move them to a media vault. those tapes and move them to a media vault.
--ns The namespace to backup.
If you only want to backup a specific namespace. If omitted, the root
namespaces is assumed.
--max-depth The depth to recurse namespaces.
``0`` means no recursion at all (only the given namespace). If omitted,
all namespaces are recursed (below the the given one).
Restore from Tape Restore from Tape
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
@ -851,53 +837,6 @@ data disk (datastore):
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore # proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
Single Snapshot Restore
^^^^^^^^^^^^^^^^^^^^^^^
Sometimes it is not necessary to restore a whole media-set, but only some
specific snapshots from the tape. This can be achieved with the ``snapshots``
parameter:
.. code-block:: console
// proxmox-tape restore <media-set-uuid> <datastore> [<snapshot>]
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore sourcestore:host/hostname/2022-01-01T00:01:00Z
This first restores the snapshot to a temporary location, then restores the relevant
chunk archives, and finally restores the snapshot data to the target datastore.
The ``snapshot`` parameter can be given multiple times, so one can restore
multiple snapshots with one restore action.
.. NOTE:: When using the single snapshot restore, the tape must be traversed
more than once, which, if you restore many snapshots at once, can take longer
than restoring the whole datastore.
Namespaces
^^^^^^^^^^
It is also possible to select and map specific namespaces from a media-set
during a restore. This is possible with the ``namespaces`` parameter.
The format of the parameter is
.. code-block:: console
store=<source-datastore>[,source=<source-ns>][,target=<target-ns>][,max-depth=<depth>]
If ``source`` or ``target`` is not given, the root namespace is assumed.
When no ``max-depth`` is given, the source namespace will be fully recursed.
An example restore command:
.. code-block:: console
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore --namespaces store=sourcedatastore,source=ns1,target=ns2,max-depth=2
The parameter can be given multiple times. It can also be combined with the
``snapshots`` parameter to only restore those snapshots and map them to different
namespaces.
Update Inventory Update Inventory
~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~
@ -1039,76 +978,3 @@ This command does the following:
- run drive cleaning operation - run drive cleaning operation
- unload the cleaning tape (to slot 3) - unload the cleaning tape (to slot 3)
Example Setups
--------------
Here are a few example setups for how to manage media pools and schedules.
This is not an exhaustive list, and there are many more possible combinations
of useful settings.
Single Continued Media Set
~~~~~~~~~~~~~~~~~~~~~~~~~~
The most simple setup: always continue the media-set and never expire.
Allocation policy:
continue
Retention policy:
keep
This setup has the advantage of being easy to manage and is re-using the benefits
from deduplication as much as possible. But, it's also prone to a failure of
any single tape, which would render all backups referring to chunks from that
tape unusable.
If you want to start a new media-set manually, you can set the currently
writable media of the set either to 'full', or set the location to an
offsite vault.
Weekday Scheme
~~~~~~~~~~~~~~
A slightly more complex scheme, where the goal is to have an independent
tape or media set for each weekday, for example from Monday to Friday.
This can be solved by having a separate media pool for each day, so 'Monday',
'Tuesday', etc.
Allocation policy:
should be 'mon' for the 'Monday' pool, 'tue' for the Tuesday pool and so on.
Retention policy:
overwrite
There should be a (or more) tape-backup jobs for each pool on the corresponding
weekday. This scheme is still very manageable with one media set per weekday,
and could be easily moved off-site.
Multiple Pools with Different Policies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Complex setups are also possible with multiple media pools configured with
different allocation and retention policies.
An example would be to have two media pools. The first configured with weekly
allocation and a few weeks of retention:
Allocation policy:
mon
Retention policy:
3 weeks
The second pool configured yearly allocation that does not expire:
Allocation policy:
yearly
Retention policy:
keep
In combination with suited prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the current
backups on smaller media sets that get expired every three plus the current
week (~ 4 weeks).

View File

@ -61,15 +61,6 @@ The manifest contains a list of all backed up files, and their
sizes and checksums. It is used to verify the consistency of a sizes and checksums. It is used to verify the consistency of a
backup. backup.
Backup Namespace
----------------
Namespaces allow for the reuse of a single chunk store deduplication domain for
multiple sources, while avoiding naming conflicts and getting more fine-grained
access control.
Essentially they're implemented as simple directory structure and need no
separate configuration.
Backup Type Backup Type
----------- -----------
@ -77,14 +68,13 @@ Backup Type
The backup server groups backups by *type*, where *type* is one of: The backup server groups backups by *type*, where *type* is one of:
``vm`` ``vm``
This type is used for :term:`virtual machine<Virtual machine>`\ s. It This type is used for :term:`virtual machine`\ s. It typically
typically consists of the virtual machine's configuration file and an image consists of the virtual machine's configuration file and an image archive
archive for each disk. for each disk.
``ct`` ``ct``
This type is used for :term:`container<Container>`\ s. It consists of the This type is used for :term:`container`\ s. It consists of the container's
container's configuration and a single file archive for the filesystem's configuration and a single file archive for the filesystem's contents.
contents.
``host`` ``host``
This type is used for file/directory backups created from within a machine. This type is used for file/directory backups created from within a machine.
@ -92,25 +82,25 @@ The backup server groups backups by *type*, where *type* is one of:
or container. Such backups may contain file and image archives; there are no or container. Such backups may contain file and image archives; there are no
restrictions in this regard. restrictions in this regard.
Backup ID Backup ID
--------- ---------
A unique ID for a specific Backup Type and Backup Namespace. Usually the A unique ID. Usually the virtual machine or container ID. ``host``
virtual machine or container ID. ``host`` type backups normally use the type backups normally use the hostname.
hostname.
Backup Time Backup Time
----------- -----------
The time when the backup was made with second resolution. The time when the backup was made.
Backup Group Backup Group
------------ ------------
The tuple ``<type>/<id>`` is called a backup group. Such a group may contain The tuple ``<type>/<ID>`` is called a backup group. Such a group
one or more backup snapshots. may contain one or more backup snapshots.
.. _term_backup_snapshot: .. _term_backup_snapshot:
@ -126,7 +116,7 @@ uniquely identifies a specific backup within a datastore.
vm/104/2019-10-09T08:01:06Z vm/104/2019-10-09T08:01:06Z
host/elsa/2019-11-08T09:48:14Z host/elsa/2019-11-08T09:48:14Z
As you can see, the time format is RFC3339_ with Coordinated As you can see, the time format is RFC3399_ with Coordinated
Universal Time (UTC_, identified by the trailing *Z*). Universal Time (UTC_, identified by the trailing *Z*).

View File

@ -21,7 +21,7 @@ You can manage the traffic controls either over the web-interface or using the
tool. tool.
.. note:: Sync jobs on the server are not affected by its rate-in limits. If .. note:: Sync jobs on the server are not affected by its rate-in limits. If
you want to limit the incoming traffic that a pull-based sync job you want to limit the incomming traffic that a pull-based sync job
generates, you need to setup a job-specific rate-in limit. See generates, you need to setup a job-specific rate-in limit. See
:ref:`syncjobs`. :ref:`syncjobs`.

View File

@ -157,133 +157,34 @@ Access Control
-------------- --------------
By default, new users and API tokens do not have any permissions. Instead you By default, new users and API tokens do not have any permissions. Instead you
need to specify what is allowed and what is not. need to specify what is allowed and what is not. You can do this by assigning
roles to users/tokens on specific objects, like datastores or remotes. The
Proxmox Backup Server uses a role and path based permission management system. following roles exist:
An entry in the permissions table allows a user, group or token to take on a
specific role when accessing an 'object' or 'path'. This means that such an
access rule can be represented as a triple of '(path, user, role)', '(path,
group, role)' or '(path, token, role)', with the role containing a set of
allowed actions, and the path representing the target of these actions.
Privileges
~~~~~~~~~~
Privileges are the atoms that access roles are made off. They are internally
used to enforce the actual permission checks in the API.
We currently support the following privileges:
**Sys.Audit**
Sys.Audit allows one to know about the system and its status.
**Sys.Modify**
Sys.Modify allows one to modify system-level configuration and apply updates.
**Sys.PowerManagement**
Sys.Modify allows one to to poweroff or reboot the system.
**Datastore.Audit**
Datastore.Audit allows one to know about a datastore, including reading the
configuration entry and listing its contents.
**Datastore.Allocate**
Datastore.Allocate allows one to create or deleting datastores.
**Datastore.Modify**
Datastore.Modify allows one to modify a datastore and its contents, and to
create or delete namespaces inside a datastore.
**Datastore.Read**
Datastore.Read allows one to read arbitrary backup contents, independent of
the backup group owner.
**Datastore.Verify**
Allows verifying the backup snapshots in a datastore.
**Datastore.Backup**
Datastore.Backup allows one create new backup snapshot and gives one also the
privileges of Datastore.Read and Datastore.Verify, but only if the backup
group is owned by the user or one of its tokens.
**Datastore.Prune**
Datastore.Prune allows one to delete snapshots, but additionally requires
backup ownership
**Permissions.Modify**
Permissions.Modify allows one to modifying ACLs
.. note:: One can always configure privileges for their own API tokens, as
they will clamped by the users privileges anyway.
**Remote.Audit**
Remote.Audit allows one to read the remote and the sync configuration entries
**Remote.Modify**
Remote.Modify allows one to modify the remote configuration
**Remote.Read**
Remote.Read allows one to read data from a configured `Remote`
**Sys.Console**
Sys.Console allows one to access to the system's console, note that for all
but `root@pam` a valid system login is still required.
**Tape.Audit**
Tape.Audit allows one to read the configuration and status of tape drives,
changers and backups
**Tape.Modify**
Tape.Modify allows one to modify the configuration of tape drives, changers
and backups
**Tape.Write**
Tape.Write allows one to write to a tape media
**Tape.Read**
Tape.Read allows one to read tape backup configuration and contents from a
tape media
**Realm.Allocate**
Realm.Allocate allows one to view, create, modify and delete authentication
realms for users
Access Roles
~~~~~~~~~~~~
An access role combines one or more privileges into something that can be
assigned to an user or API token on an object path.
Currently there are only built-in roles, that means, you cannot create your
own, custom role.
The following roles exist:
**NoAccess** **NoAccess**
Disable Access - nothing is allowed. Disable Access - nothing is allowed.
**Admin** **Admin**
Can do anything, on the object path assigned. Can do anything.
**Audit** **Audit**
Can view the status and configuration of things, but is not allowed to change Can view things, but is not allowed to change settings.
settings.
**DatastoreAdmin** **DatastoreAdmin**
Can do anything on *existing* datastores. Can do anything on datastores.
**DatastoreAudit** **DatastoreAudit**
Can view datastore metrics, settings and list content. But is not allowed to Can view datastore settings and list content. But
read the actual data. is not allowed to read the actual data.
**DatastoreReader** **DatastoreReader**
Can inspect a datastore's or namespaces content and do restores. Can Inspect datastore content and do restores.
**DatastoreBackup** **DatastoreBackup**
Can backup and restore owned backups. Can backup and restore owned backups.
**DatastorePowerUser** **DatastorePowerUser**
Can backup, restore, and prune *owned* backups. Can backup, restore, and prune owned backups.
**RemoteAdmin** **RemoteAdmin**
Can do anything on remotes. Can do anything on remotes.
@ -294,62 +195,19 @@ The following roles exist:
**RemoteSyncOperator** **RemoteSyncOperator**
Is allowed to read data from a remote. Is allowed to read data from a remote.
**TapeAdmin** **TapeAudit**
Can view tape related configuration and status
**TapeAdministrat**
Can do anything related to tape backup Can do anything related to tape backup
**TapeAudit**
Can view tape related metrics, configuration and status
**TapeOperator** **TapeOperator**
Can do tape backup and restore, but cannot change any configuration Can do tape backup and restore (but no configuration changes)
**TapeReader** **TapeReader**
Can read and inspect tape configuration and media content Can read and inspect tape configuration and media content
Objects and Paths .. image:: images/screenshots/pbs-gui-user-management-add-user.png
~~~~~~~~~~~~~~~~~
Access permissions are assigned to objects, such as a datastore, a namespace or
some system resources.
We use file system like paths to address these objects. These paths form a
natural tree, and permissions of higher levels (shorter paths) can optionally
be propagated down within this hierarchy.
Paths can be templated, that means they can refer to the actual id of an
configuration entry. When an API call requires permissions on a templated
path, the path may contain references to parameters of the API call. These
references are specified in curly braces.
Some examples are:
* `/datastore`: Access to *all* datastores on a Proxmox Backup server
* `/datastore/{store}`: Access to a specific datastore on a Proxmox Backup
server
* `/datastore/{store}/{ns}`: Access to a specific namespace on a specific
datastore
* `/remote`: Access to all remote entries
* `/system/network`: Access to configuring the host network
* `/tape/`: Access to tape devices, pools and jobs
* `/access/users`: User administration
* `/access/openid/{id}`: Administrative access to a specific OpenID Connect realm
Inheritance
^^^^^^^^^^^
As mentioned earlier, object paths form a file system like tree, and
permissions can be inherited by objects down that tree through the propagate
flag, which is set by default. We use the following inheritance rules:
* Permissions for API tokens are always clamped to the one of the user.
* Permissions on deeper, more specific levels replace those inherited from an
upper level.
Configuration & Management
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-permissions-add.png
:align: right :align: right
:alt: Add permissions for user :alt: Add permissions for user

View File

@ -3,6 +3,7 @@ use anyhow::Error;
// chacha20-poly1305 // chacha20-poly1305
fn rate_test(name: &str, bench: &dyn Fn() -> usize) { fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
print!("{:<20} ", name); print!("{:<20} ", name);
let start = std::time::SystemTime::now(); let start = std::time::SystemTime::now();
@ -13,19 +14,20 @@ fn rate_test(name: &str, bench: &dyn Fn() -> usize) {
loop { loop {
bytes += bench(); bytes += bench();
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
if elapsed > duration { if elapsed > duration { break; }
break;
}
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
println!("{:>8.1} MB/s", (bytes as f64) / (elapsed * 1024.0 * 1024.0)); println!("{:>8.1} MB/s", (bytes as f64)/(elapsed*1024.0*1024.0));
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let input = proxmox_sys::linux::random_data(1024 * 1024)?;
let input = proxmox::sys::linux::random_data(1024*1024)?;
rate_test("crc32", &|| { rate_test("crc32", &|| {
let mut crchasher = crc32fast::Hasher::new(); let mut crchasher = crc32fast::Hasher::new();
@ -44,23 +46,35 @@ fn main() -> Result<(), Error> {
input.len() input.len()
}); });
let key = proxmox_sys::linux::random_data(32)?; let key = proxmox::sys::linux::random_data(32)?;
let iv = proxmox_sys::linux::random_data(16)?; let iv = proxmox::sys::linux::random_data(16)?;
let cipher = openssl::symm::Cipher::aes_256_gcm(); let cipher = openssl::symm::Cipher::aes_256_gcm();
rate_test("aes-256-gcm", &|| { rate_test("aes-256-gcm", &|| {
let mut tag = [0u8; 16]; let mut tag = [0u8;16];
openssl::symm::encrypt_aead(cipher, &key, Some(&iv), b"", &input, &mut tag).unwrap(); openssl::symm::encrypt_aead(
cipher,
&key,
Some(&iv),
b"",
&input,
&mut tag).unwrap();
input.len() input.len()
}); });
let cipher = openssl::symm::Cipher::chacha20_poly1305(); let cipher = openssl::symm::Cipher::chacha20_poly1305();
rate_test("chacha20-poly1305", &|| { rate_test("chacha20-poly1305", &|| {
let mut tag = [0u8; 16]; let mut tag = [0u8;16];
openssl::symm::encrypt_aead(cipher, &key, Some(&iv[..12]), b"", &input, &mut tag).unwrap(); openssl::symm::encrypt_aead(
cipher,
&key,
Some(&iv[..12]),
b"",
&input,
&mut tag).unwrap();
input.len() input.len()
}); });

View File

@ -1,7 +1,7 @@
use anyhow::Error; use anyhow::{Error};
use proxmox_router::cli::*;
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_router::cli::*;
#[api( #[api(
input: { input: {
@ -16,7 +16,9 @@ use proxmox_schema::*;
/// Echo command. Print the passed text. /// Echo command. Print the passed text.
/// ///
/// Returns: nothing /// Returns: nothing
fn echo_command(text: String) -> Result<(), Error> { fn echo_command(
text: String,
) -> Result<(), Error> {
println!("{}", text); println!("{}", text);
Ok(()) Ok(())
} }
@ -35,7 +37,9 @@ fn echo_command(text: String) -> Result<(), Error> {
/// Hello command. /// Hello command.
/// ///
/// Returns: nothing /// Returns: nothing
fn hello_command(verbose: Option<bool>) -> Result<(), Error> { fn hello_command(
verbose: Option<bool>,
) -> Result<(), Error> {
if verbose.unwrap_or(false) { if verbose.unwrap_or(false) {
println!("Hello, how are you!"); println!("Hello, how are you!");
} else { } else {
@ -50,6 +54,7 @@ fn hello_command(verbose: Option<bool>) -> Result<(), Error> {
/// ///
/// Returns: nothing /// Returns: nothing
fn quit_command() -> Result<(), Error> { fn quit_command() -> Result<(), Error> {
println!("Goodbye."); println!("Goodbye.");
std::process::exit(0); std::process::exit(0);
@ -59,9 +64,8 @@ fn cli_definition() -> CommandLineInterface {
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()
.insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND)) .insert("quit", CliCommand::new(&API_METHOD_QUIT_COMMAND))
.insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND)) .insert("hello", CliCommand::new(&API_METHOD_HELLO_COMMAND))
.insert( .insert("echo", CliCommand::new(&API_METHOD_ECHO_COMMAND)
"echo", .arg_param(&["text"])
CliCommand::new(&API_METHOD_ECHO_COMMAND).arg_param(&["text"]),
) )
.insert_help(); .insert_help();
@ -69,6 +73,7 @@ fn cli_definition() -> CommandLineInterface {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let helper = CliHelper::new(cli_definition()); let helper = CliHelper::new(cli_definition());
let mut rl = rustyline::Editor::<CliHelper>::new(); let mut rl = rustyline::Editor::<CliHelper>::new();

View File

@ -2,14 +2,15 @@ use std::io::Write;
use anyhow::Error; use anyhow::Error;
use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_api_types::Authid;
use pbs_client::{BackupReader, HttpClient, HttpClientOptions}; use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter { pub struct DummyWriter {
bytes: usize, bytes: usize,
} }
impl Write for DummyWriter { impl Write for DummyWriter {
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> { fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
self.bytes += data.len(); self.bytes += data.len();
Ok(data.len()) Ok(data.len())
@ -20,7 +21,9 @@ impl Write for DummyWriter {
} }
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
let host = "localhost"; let host = "localhost";
let auth_id = Authid::root_auth_id(); let auth_id = Authid::root_auth_id();
@ -33,15 +36,8 @@ async fn run() -> Result<(), Error> {
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?; let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start( let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
client, .await?;
None,
"store2",
&BackupNamespace::root(),
&(BackupType::Host, "elsa".to_string(), backup_time).into(),
true,
)
.await?;
let start = std::time::SystemTime::now(); let start = std::time::SystemTime::now();
@ -54,13 +50,10 @@ async fn run() -> Result<(), Error> {
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
println!( println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
"Downloaded {} bytes, {} MB/s",
bytes,
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
);
Ok(()) Ok(())
} }

View File

@ -1,6 +1,6 @@
use std::io::Write;
use std::path::PathBuf;
use std::thread; use std::thread;
use std::path::PathBuf;
use std::io::Write;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
@ -19,15 +19,15 @@ use anyhow::{bail, Error};
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992) // Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
fn create_large_file(path: PathBuf) { fn create_large_file(path: PathBuf) {
println!("TEST {:?}", path); println!("TEST {:?}", path);
let mut file = std::fs::OpenOptions::new() let mut file = std::fs::OpenOptions::new()
.write(true) .write(true)
.create_new(true) .create_new(true)
.open(&path) .open(&path).unwrap();
.unwrap();
let buffer = vec![0u8; 64 * 1024]; let buffer = vec![0u8; 64*1024];
loop { loop {
for _ in 0..64 { for _ in 0..64 {
@ -40,6 +40,7 @@ fn create_large_file(path: PathBuf) {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let base = PathBuf::from("dyntest1"); let base = PathBuf::from("dyntest1");
let _ = std::fs::create_dir(&base); let _ = std::fs::create_dir(&base);

View File

@ -2,7 +2,7 @@ extern crate proxmox_backup;
// also see https://www.johndcook.com/blog/standard_deviation/ // also see https://www.johndcook.com/blog/standard_deviation/
use anyhow::Error; use anyhow::{Error};
use std::io::{Read, Write}; use std::io::{Read, Write};
use pbs_datastore::Chunker; use pbs_datastore::Chunker;
@ -21,6 +21,7 @@ struct ChunkWriter {
} }
impl ChunkWriter { impl ChunkWriter {
fn new(chunk_size: usize) -> Self { fn new(chunk_size: usize) -> Self {
ChunkWriter { ChunkWriter {
chunker: Chunker::new(chunk_size), chunker: Chunker::new(chunk_size),
@ -36,6 +37,7 @@ impl ChunkWriter {
} }
fn record_stat(&mut self, chunk_size: f64) { fn record_stat(&mut self, chunk_size: f64) {
self.chunk_count += 1; self.chunk_count += 1;
if self.chunk_count == 1 { if self.chunk_count == 1 {
@ -43,30 +45,28 @@ impl ChunkWriter {
self.m_new = chunk_size; self.m_new = chunk_size;
self.s_old = 0.0; self.s_old = 0.0;
} else { } else {
self.m_new = self.m_old + (chunk_size - self.m_old) / (self.chunk_count as f64); self.m_new = self.m_old + (chunk_size - self.m_old)/(self.chunk_count as f64);
self.s_new = self.s_old + (chunk_size - self.m_old) * (chunk_size - self.m_new); self.s_new = self.s_old +
(chunk_size - self.m_old)*(chunk_size - self.m_new);
// set up for next iteration // set up for next iteration
self.m_old = self.m_new; self.m_old = self.m_new;
self.s_old = self.s_new; self.s_old = self.s_new;
} }
let variance = if self.chunk_count > 1 { let variance = if self.chunk_count > 1 {
self.s_new / ((self.chunk_count - 1) as f64) self.s_new/((self.chunk_count -1)as f64)
} else { } else { 0.0 };
0.0
};
let std_deviation = variance.sqrt(); let std_deviation = variance.sqrt();
let deviation_per = (std_deviation * 100.0) / self.m_new; let deviation_per = (std_deviation*100.0)/self.m_new;
println!( println!("COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%", self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize);
"COUNT {:10} SIZE {:10} MEAN {:10} DEVIATION {:3}%",
self.chunk_count, chunk_size, self.m_new as usize, deviation_per as usize
);
} }
} }
impl Write for ChunkWriter { impl Write for ChunkWriter {
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> { fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
let chunker = &mut self.chunker; let chunker = &mut self.chunker;
let pos = chunker.scan(data); let pos = chunker.scan(data);
@ -80,6 +80,7 @@ impl Write for ChunkWriter {
self.last_chunk = self.chunk_offset; self.last_chunk = self.chunk_offset;
Ok(pos) Ok(pos)
} else { } else {
self.chunk_offset += data.len(); self.chunk_offset += data.len();
Ok(data.len()) Ok(data.len())
@ -92,23 +93,23 @@ impl Write for ChunkWriter {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let mut file = std::fs::File::open("/dev/urandom")?; let mut file = std::fs::File::open("/dev/urandom")?;
let mut bytes = 0; let mut bytes = 0;
let mut buffer = [0u8; 64 * 1024]; let mut buffer = [0u8; 64*1024];
let mut writer = ChunkWriter::new(4096 * 1024); let mut writer = ChunkWriter::new(4096*1024);
loop { loop {
file.read_exact(&mut buffer)?; file.read_exact(&mut buffer)?;
bytes += buffer.len(); bytes += buffer.len();
writer.write_all(&buffer)?; writer.write_all(&buffer)?;
if bytes > 1024 * 1024 * 1024 { if bytes > 1024*1024*1024 { break; }
break;
}
} }
Ok(()) Ok(())

View File

@ -3,16 +3,17 @@ extern crate proxmox_backup;
use pbs_datastore::Chunker; use pbs_datastore::Chunker;
fn main() { fn main() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
for i in 0..20 * 1024 * 1024 { for i in 0..20*1024*1024 {
for j in 0..4 { for j in 0..4 {
let byte = ((i >> (j << 3)) & 0xff) as u8; let byte = ((i >> (j<<3))&0xff) as u8;
//println!("BYTE {}", byte); //println!("BYTE {}", byte);
buffer.push(byte); buffer.push(byte);
} }
} }
let mut chunker = Chunker::new(64 * 1024); let mut chunker = Chunker::new(64*1024);
let count = 5; let count = 5;
@ -38,14 +39,11 @@ fn main() {
} }
let elapsed = start.elapsed().unwrap(); let elapsed = start.elapsed().unwrap();
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0; let elapsed = (elapsed.as_secs() as f64) +
(elapsed.subsec_millis() as f64)/1000.0;
let mbytecount = ((count * buffer.len()) as f64) / (1024.0 * 1024.0); let mbytecount = ((count*buffer.len()) as f64) / (1024.0*1024.0);
let avg_chunk_size = mbytecount / (chunk_count as f64); let avg_chunk_size = mbytecount/(chunk_count as f64);
let mbytes_per_sec = mbytecount / elapsed; let mbytes_per_sec = mbytecount/elapsed;
println!( println!("SPEED = {} MB/s, avg chunk size = {} KB", mbytes_per_sec, avg_chunk_size*1024.0);
"SPEED = {} MB/s, avg chunk size = {} KB",
mbytes_per_sec,
avg_chunk_size * 1024.0
);
} }

View File

@ -1,4 +1,4 @@
use anyhow::Error; use anyhow::{Error};
use futures::*; use futures::*;
extern crate proxmox_backup; extern crate proxmox_backup;
@ -19,6 +19,7 @@ fn main() {
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
let file = tokio::fs::File::open("random-test.dat").await?; let file = tokio::fs::File::open("random-test.dat").await?;
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
@ -33,7 +34,7 @@ async fn run() -> Result<(), Error> {
let mut repeat = 0; let mut repeat = 0;
let mut stream_len = 0; let mut stream_len = 0;
while let Some(chunk) = chunk_stream.try_next().await? { while let Some(chunk) = chunk_stream.try_next().await? {
if chunk.len() > 16 * 1024 * 1024 { if chunk.len() > 16*1024*1024 {
panic!("Chunk too large {}", chunk.len()); panic!("Chunk too large {}", chunk.len());
} }
@ -43,19 +44,10 @@ async fn run() -> Result<(), Error> {
println!("Got chunk {}", chunk.len()); println!("Got chunk {}", chunk.len());
} }
let speed = let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
((stream_len * 1_000_000) / (1024 * 1024)) / (start_time.elapsed().as_micros() as usize); println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed);
println!( println!("Average chunk size was {} bytes.", stream_len/repeat);
"Uploaded {} chunks in {} seconds ({} MB/s).", println!("time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
repeat,
start_time.elapsed().as_secs(),
speed
);
println!("Average chunk size was {} bytes.", stream_len / repeat);
println!(
"time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);
Ok(()) Ok(())
} }

View File

@ -1,9 +1,10 @@
use anyhow::Error; use anyhow::{Error};
use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions}; use pbs_api_types::Authid;
async fn upload_speed() -> Result<f64, Error> { async fn upload_speed() -> Result<f64, Error> {
let host = "localhost"; let host = "localhost";
let datastore = "store2"; let datastore = "store2";
@ -17,16 +18,7 @@ async fn upload_speed() -> Result<f64, Error> {
let backup_time = proxmox_time::epoch_i64(); let backup_time = proxmox_time::epoch_i64();
let client = BackupWriter::start( let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
client,
None,
datastore,
&BackupNamespace::root(),
&(BackupType::Host, "speedtest".to_string(), backup_time).into(),
false,
true,
)
.await?;
println!("start upload speed test"); println!("start upload speed test");
let res = client.upload_speedtest(true).await?; let res = client.upload_speedtest(true).await?;
@ -34,7 +26,7 @@ async fn upload_speed() -> Result<f64, Error> {
Ok(res) Ok(res)
} }
fn main() { fn main() {
match proxmox_async::runtime::main(upload_speed()) { match proxmox_async::runtime::main(upload_speed()) {
Ok(mbs) => { Ok(mbs) => {
println!("average upload speed: {} MB/s", mbs); println!("average upload speed: {} MB/s", mbs);

View File

@ -9,13 +9,14 @@ description = "general API type helpers for PBS"
anyhow = "1.0" anyhow = "1.0"
hex = "0.4.3" hex = "0.4.3"
lazy_static = "1.4" lazy_static = "1.4"
percent-encoding = "2.1" libc = "0.2"
regex = "1.5.5" nix = "0.19.1"
openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_plain = "1"
proxmox = "0.15.3"
proxmox-lang = "1.0.0" proxmox-lang = "1.0.0"
proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] } proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] }
proxmox-serde = "0.1" proxmox-time = "1.1"
proxmox-time = "1.1.1"
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }

View File

@ -73,17 +73,6 @@ constnamedbitmap! {
} }
} }
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions /// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser /// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX; pub const ROLE_ADMIN: u64 = u64::MAX;

View File

@ -1,78 +0,0 @@
//! Predefined Regular Expressions
//!
//! This is a collection of useful regular expressions
use lazy_static::lazy_static;
use regex::Regex;
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
/// Returns the regular expression string to match IPv4 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
/// Returns the regular expression string to match IPv6 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6RE { () => (concat!(r"(?:",
r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
}
/// Returns the regular expression string to match IP addresses (v4 or v6)
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
/// them, while for IPv4 they are forbidden.
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE_BRACKET { () => (
concat!(r"(?:",
IPV4RE!(),
r"|\[(?:",
IPV6RE!(),
r")\]",
r")"))
}
lazy_static! {
pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
pub static ref IP_BRACKET_REGEX: Regex =
Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
pub static ref SYSTEMD_DATETIME_REGEX: Regex =
Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
}
#[test]
fn test_regexes() {
assert!(IP_REGEX.is_match("127.0.0.1"));
assert!(IP_REGEX.is_match("::1"));
assert!(IP_REGEX.is_match("2014:b3a::27"));
assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
assert!(IP_BRACKET_REGEX.is_match("[::1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
}

View File

@ -51,8 +51,7 @@ impl std::str::FromStr for Fingerprint {
fn from_str(s: &str) -> Result<Self, Error> { fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string(); let mut tmp = s.to_string();
tmp.retain(|c| c != ':'); tmp.retain(|c| c != ':');
let mut bytes = [0u8; 32]; let bytes = proxmox::tools::hex_to_digest(&tmp)?;
hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes)) Ok(Fingerprint::new(bytes))
} }
} }
@ -62,16 +61,18 @@ fn as_fingerprint(bytes: &[u8]) -> String {
.as_bytes() .as_bytes()
.chunks(2) .chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string .map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>() .collect::<Vec<&str>>().join(":")
.join(":")
} }
pub mod bytes_as_fingerprint { pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit; use std::mem::MaybeUninit;
use serde::{Deserialize, Deserializer, Serializer}; use serde::{Deserialize, Serializer, Deserializer};
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error> pub fn serialize<S>(
bytes: &[u8; 32],
serializer: S,
) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
{ {
@ -79,7 +80,9 @@ pub mod bytes_as_fingerprint {
serializer.serialize_str(&s) serializer.serialize_str(&s)
} }
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error> pub fn deserialize<'de, D>(
deserializer: D,
) -> Result<[u8; 32], D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {

File diff suppressed because it is too large Load Diff

View File

@ -53,18 +53,20 @@ impl SizeUnit {
11..=20 => SizeUnit::Kibi, 11..=20 => SizeUnit::Kibi,
_ => SizeUnit::Byte, _ => SizeUnit::Byte,
} }
} else if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else { } else {
SizeUnit::Byte if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else {
SizeUnit::Byte
}
} }
} }
} }
@ -101,8 +103,7 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) {
}; };
let mut unit = SizeUnit::Byte; let mut unit = SizeUnit::Byte;
#[rustfmt::skip] (v.strip_suffix(|c: char| match c {
let value = v.strip_suffix(|c: char| match c {
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true } 'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true } 'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true } 'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
@ -115,9 +116,7 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) {
't' | 'T' if binary => { unit = SizeUnit::Tebi; true } 't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true } 'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
_ => false _ => false
}).unwrap_or(v).trim_end(); }).unwrap_or(v).trim_end(), unit)
(value, unit)
} }
/// Byte size which can be displayed in a human friendly way /// Byte size which can be displayed in a human friendly way
@ -157,19 +156,13 @@ impl HumanByte {
/// Create a new instance with optimal binary unit computed /// Create a new instance with optimal binary unit computed
pub fn new_binary(size: f64) -> Self { pub fn new_binary(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, true); let unit = SizeUnit::auto_scale(size, true);
HumanByte { HumanByte { size: size / unit.factor(), unit }
size: size / unit.factor(),
unit,
}
} }
/// Create a new instance with optimal decimal unit computed /// Create a new instance with optimal decimal unit computed
pub fn new_decimal(size: f64) -> Self { pub fn new_decimal(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, false); let unit = SizeUnit::auto_scale(size, false);
HumanByte { HumanByte { size: size / unit.factor(), unit }
size: size / unit.factor(),
unit,
}
} }
/// Returns the size as u64 number of bytes /// Returns the size as u64 number of bytes
@ -223,8 +216,8 @@ impl std::str::FromStr for HumanByte {
} }
} }
proxmox_serde::forward_deserialize_to_from_str!(HumanByte); proxmox::forward_deserialize_to_from_str!(HumanByte);
proxmox_serde::forward_serialize_to_display!(HumanByte); proxmox::forward_serialize_to_display!(HumanByte);
#[test] #[test]
fn test_human_byte_parser() -> Result<(), Error> { fn test_human_byte_parser() -> Result<(), Error> {
@ -237,12 +230,7 @@ fn test_human_byte_parser() -> Result<(), Error> {
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size); bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
} }
if h.unit != unit { if h.unit != unit {
bail!( bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit);
"got unexpected unit for '{}' ({:?} != {:?})",
v,
h.unit,
unit
);
} }
let new = h.to_string(); let new = h.to_string();
@ -279,12 +267,7 @@ fn test_human_byte_parser() -> Result<(), Error> {
assert_eq!(&format!("{:.7}", h), "1.2345678 B"); assert_eq!(&format!("{:.7}", h), "1.2345678 B");
assert_eq!(&format!("{:.8}", h), "1.2345678 B"); assert_eq!(&format!("{:.8}", h), "1.2345678 B");
assert!(test( assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B"));
"987654321",
987654321.0,
SizeUnit::Byte,
"987654321 B"
));
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B")); assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B")); assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));

View File

@ -7,18 +7,18 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use crate::{ use crate::{
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, Userid, Authid, RateLimitConfig,
BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA, BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA,
}; };
const_regex! { const_regex!{
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
} }
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
@ -27,41 +27,34 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.max_length(32) .max_length(32)
.schema(); .schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.") pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
.format(&ApiStringFormat::VerifyFn( "Run sync job at specified schedule.")
proxmox_time::verify_calendar_event, .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
))
.type_text("<calendar-event>") .type_text("<calendar-event>")
.schema(); .schema();
pub const GC_SCHEDULE_SCHEMA: Schema = pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Run garbage collection job at specified schedule.") "Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn( .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>") .type_text("<calendar-event>")
.schema(); .schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Run verify job at specified schedule.") "Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn( .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
proxmox_time::verify_calendar_event, .type_text("<calendar-event>")
)) .schema();
.type_text("<calendar-event>")
.schema(); pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.", "Delete vanished backups. This remove the local copy if the remote backup was deleted.")
) .default(false)
.default(false) .schema();
.schema();
#[api( #[api(
properties: { properties: {
@ -87,17 +80,17 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Default)] #[derive(Serialize,Deserialize,Default)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Job Scheduling Status /// Job Scheduling Status
pub struct JobScheduleStatus { pub struct JobScheduleStatus {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub next_run: Option<i64>, pub next_run: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_state: Option<String>, pub last_run_state: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_upid: Option<String>, pub last_run_upid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub last_run_endtime: Option<i64>, pub last_run_endtime: Option<i64>,
} }
@ -141,23 +134,20 @@ pub struct DatastoreNotify {
pub sync: Option<Notify>, pub sync: Option<Notify>,
} }
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Datastore notification setting") "Datastore notification setting")
.format(&ApiStringFormat::PropertyString( .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
&DatastoreNotify::API_SCHEMA, .schema();
))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.", "Do not verify backups that are already verified if their verification is not outdated.")
) .default(true)
.default(true) .schema();
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'") "Days after that a verification becomes outdated")
.minimum(0) .minimum(1)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
@ -183,53 +173,29 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
optional: true, optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA, schema: VERIFICATION_SCHEDULE_SCHEMA,
}, },
ns: {
optional: true,
schema: BACKUP_NAMESPACE_SCHEMA,
},
"max-depth": {
optional: true,
schema: crate::NS_MAX_DEPTH_SCHEMA,
},
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Verification Job /// Verification Job
pub struct VerificationJobConfig { pub struct VerificationJobConfig {
/// unique ID to address this job /// unique ID to address this job
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
/// the datastore ID this verification job affects /// the datastore ID this verificaiton job affects
pub store: String, pub store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter /// if not set to false, check the age of the last snapshot verification to filter
/// out recent ones, depending on 'outdated_after' configuration. /// out recent ones, depending on 'outdated_after' configuration.
pub ignore_verified: Option<bool>, pub ignore_verified: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
pub outdated_after: Option<i64>, pub outdated_after: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// when to schedule this job in calendar event notation /// when to schedule this job in calendar event notation
pub schedule: Option<String>, pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// on which backup namespace to run the verification recursively
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
pub max_depth: Option<usize>,
}
impl VerificationJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
} }
#[api( #[api(
@ -242,8 +208,8 @@ impl VerificationJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Status of Verification Job /// Status of Verification Job
pub struct VerificationJobStatus { pub struct VerificationJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -286,38 +252,26 @@ pub struct VerificationJobStatus {
schema: GROUP_FILTER_LIST_SCHEMA, schema: GROUP_FILTER_LIST_SCHEMA,
optional: true, optional: true,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: crate::NS_MAX_DEPTH_SCHEMA,
optional: true,
},
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Tape Backup Job Setup /// Tape Backup Job Setup
pub struct TapeBackupJobSetup { pub struct TapeBackupJobSetup {
pub store: String, pub store: String,
pub pool: String, pub pool: String,
pub drive: String, pub drive: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub eject_media: Option<bool>, pub eject_media: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub export_media_set: Option<bool>, pub export_media_set: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub latest_only: Option<bool>, pub latest_only: Option<bool>,
/// Send job email notification to this user /// Send job email notification to this user
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>, pub notify_user: Option<Userid>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>, pub group_filter: Option<Vec<GroupFilter>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub max_depth: Option<usize>,
} }
#[api( #[api(
@ -338,17 +292,17 @@ pub struct TapeBackupJobSetup {
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Tape Backup Job /// Tape Backup Job
pub struct TapeBackupJobConfig { pub struct TapeBackupJobConfig {
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
#[serde(flatten)] #[serde(flatten)]
pub setup: TapeBackupJobSetup, pub setup: TapeBackupJobSetup,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>, pub schedule: Option<String>,
} }
@ -362,8 +316,8 @@ pub struct TapeBackupJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Status of Tape Backup Job /// Status of Tape Backup Job
pub struct TapeBackupJobStatus { pub struct TapeBackupJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -371,7 +325,7 @@ pub struct TapeBackupJobStatus {
#[serde(flatten)] #[serde(flatten)]
pub status: JobScheduleStatus, pub status: JobScheduleStatus,
/// Next tape used (best guess) /// Next tape used (best guess)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub next_media_label: Option<String>, pub next_media_label: Option<String>,
} }
@ -379,7 +333,7 @@ pub struct TapeBackupJobStatus {
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum GroupFilter { pub enum GroupFilter {
/// BackupGroup type - either `vm`, `ct`, or `host`. /// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(BackupType), BackupType(String),
/// Full identifier of BackupGroup, including type /// Full identifier of BackupGroup, including type
Group(String), Group(String),
/// A regular expression matched against the full identifier of the BackupGroup /// A regular expression matched against the full identifier of the BackupGroup
@ -390,9 +344,9 @@ impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(':') { match s.split_once(":") {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())),
Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)), Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")), None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
@ -411,8 +365,8 @@ impl std::fmt::Display for GroupFilter {
} }
} }
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter); proxmox::forward_deserialize_to_from_str!(GroupFilter);
proxmox_serde::forward_serialize_to_display!(GroupFilter); proxmox::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ()) GroupFilter::from_str(input).map(|_| ())
@ -424,8 +378,7 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>") .type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema(); .schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema = pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
#[api( #[api(
properties: { properties: {
@ -435,10 +388,6 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
store: { store: {
schema: DATASTORE_SCHEMA, schema: DATASTORE_SCHEMA,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
"owner": { "owner": {
type: Authid, type: Authid,
optional: true, optional: true,
@ -449,18 +398,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
"remote-store": { "remote-store": {
schema: DATASTORE_SCHEMA, schema: DATASTORE_SCHEMA,
}, },
"remote-ns": {
type: BackupNamespace,
optional: true,
},
"remove-vanished": { "remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA, schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true, optional: true,
}, },
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
comment: { comment: {
optional: true, optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA, schema: SINGLE_LINE_COMMENT_SCHEMA,
@ -478,44 +419,29 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Clone, Updater)] #[derive(Serialize,Deserialize,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Sync Job /// Sync Job
pub struct SyncJobConfig { pub struct SyncJobConfig {
#[updater(skip)] #[updater(skip)]
pub id: String, pub id: String,
pub store: String, pub store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>, pub owner: Option<Authid>,
pub remote: String, pub remote: String,
pub remote_store: String, pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub remove_vanished: Option<bool>, pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>, pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>, pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)] #[serde(flatten)]
pub limit: RateLimitConfig, pub limit: RateLimitConfig,
} }
impl SyncJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api( #[api(
properties: { properties: {
config: { config: {
@ -526,8 +452,9 @@ impl SyncJobConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[derive(Serialize,Deserialize)]
#[serde(rename_all="kebab-case")]
/// Status of Sync Job /// Status of Sync Job
pub struct SyncJobStatus { pub struct SyncJobStatus {
#[serde(flatten)] #[serde(flatten)]
@ -535,186 +462,3 @@ pub struct SyncJobStatus {
#[serde(flatten)] #[serde(flatten)]
pub status: JobScheduleStatus, pub status: JobScheduleStatus,
} }
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
/// call to prune a specific group, where `max-depth` makes no sense.
#[api(
properties: {
"keep-last": {
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
impl KeepOptions {
pub fn keeps_something(&self) -> bool {
self.keep_last.unwrap_or(0)
+ self.keep_hourly.unwrap_or(0)
+ self.keep_daily.unwrap_or(0)
+ self.keep_weekly.unwrap_or(0)
+ self.keep_monthly.unwrap_or(0)
+ self.keep_yearly.unwrap_or(0)
> 0
}
}
#[api(
properties: {
keep: {
type: KeepOptions,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
#[serde(flatten)]
pub keep: KeepOptions,
/// The (optional) recursion depth
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
}
impl PruneJobOptions {
pub fn keeps_something(&self) -> bool {
self.keep.keeps_something()
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
match &self.ns {
Some(ns) => ns.acl_path(store),
None => vec!["datastore", store],
}
}
}
#[api(
properties: {
disable: {
type: Boolean,
optional: true,
default: false,
},
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
options: {
type: PruneJobOptions,
},
},
)]
#[derive(Deserialize, Serialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
pub store: String,
/// Disable this job.
#[serde(default, skip_serializing_if = "is_false")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
pub disable: bool,
pub schedule: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(flatten)]
pub options: PruneJobOptions,
}
impl PruneJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
self.options.acl_path(&self.store)
}
}
fn is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
config: {
type: PruneJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {
#[serde(flatten)]
pub config: PruneJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -39,7 +39,7 @@ impl Default for Kdf {
/// Encryption Key Information /// Encryption Key Information
pub struct KeyInfo { pub struct KeyInfo {
/// Path to key (if stored in a file) /// Path to key (if stored in a file)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>, pub path: Option<String>,
pub kdf: Kdf, pub kdf: Kdf,
/// Key creation time /// Key creation time
@ -47,9 +47,10 @@ pub struct KeyInfo {
/// Key modification time /// Key modification time
pub modified: i64, pub modified: i64,
/// Key fingerprint /// Key fingerprint
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>, pub fingerprint: Option<String>,
/// Password hint /// Password hint
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub hint: Option<String>, pub hint: Option<String>,
} }

View File

@ -1,13 +1,12 @@
//! Basic API types used by most of the PBS code. //! Basic API types used by most of the PBS code.
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use anyhow::bail;
pub mod common_regex;
pub mod percent_encoding;
use proxmox_schema::{ use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
}; };
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
use proxmox_time::parse_daily_duration; use proxmox_time::parse_daily_duration;
#[rustfmt::skip] #[rustfmt::skip]
@ -26,44 +25,14 @@ macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
#[macro_export] #[macro_export]
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") } macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_RE {
() => (
concat!("(?:",
"(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!(),
")?")
);
}
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_PATH_RE {
() => (
concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/")
);
}
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => ( () => (
concat!( concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
)
); );
} }
#[rustfmt::skip]
#[macro_export]
macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
() => {
concat!(
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?",
)
};
}
mod acl; mod acl;
pub use acl::*; pub use acl::*;
@ -79,9 +48,6 @@ pub use jobs::*;
mod key_derivation; mod key_derivation;
pub use key_derivation::{Kdf, KeyInfo}; pub use key_derivation::{Kdf, KeyInfo};
mod maintenance;
pub use maintenance::*;
mod network; mod network;
pub use network::*; pub use network::*;
@ -101,7 +67,7 @@ pub use user::*;
pub use proxmox_schema::upid::*; pub use proxmox_schema::upid::*;
mod crypto; mod crypto;
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint}; pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint};
pub mod file_restore; pub mod file_restore;
@ -120,6 +86,7 @@ pub use traffic_control::*;
mod zfs; mod zfs;
pub use zfs::*; pub use zfs::*;
#[rustfmt::skip] #[rustfmt::skip]
#[macro_use] #[macro_use]
mod local_macros { mod local_macros {
@ -155,9 +122,6 @@ const_regex! {
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
// just a rough check - dummy acceptor is used before persisting
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
/// Regex for safe identifiers. /// Regex for safe identifiers.
/// ///
/// This /// This
@ -169,8 +133,6 @@ const_regex! {
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$";
pub BACKUP_REPO_URL_REGEX = concat!( pub BACKUP_REPO_URL_REGEX = concat!(
r"^^(?:(?:(", r"^^(?:(?:(",
USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
@ -192,17 +154,13 @@ pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); pub const DNS_ALIAS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat = pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop)); ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
@ -210,15 +168,18 @@ pub const DAILY_DURATION_FORMAT: ApiStringFormat =
pub const SEARCH_DOMAIN_SCHEMA: Schema = pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema(); StringSchema::new("Search domain for host-name lookup.").schema();
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.") pub const FIRST_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("First name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.") pub const SECOND_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Second name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.") pub const THIRD_DNS_SERVER_SCHEMA: Schema =
StringSchema::new("Third name server IP address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.schema(); .schema();
@ -226,47 +187,45 @@ pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in R
.format(&HOSTNAME_FORMAT) .format(&HOSTNAME_FORMAT)
.schema(); .schema();
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = pub const DNS_NAME_FORMAT: ApiStringFormat =
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") ApiStringFormat::Pattern(&DNS_NAME_REGEX);
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX);
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.") pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
.format(&DNS_NAME_OR_IP_FORMAT) .format(&DNS_NAME_OR_IP_FORMAT)
.schema(); .schema();
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
.format(&HOSTNAME_FORMAT) .format(&ApiStringFormat::VerifyFn(|node| {
if node == "localhost" || node == proxmox::tools::nodename() {
Ok(())
} else {
bail!("no such node '{}'", node);
}
}))
.schema(); .schema();
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.", "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
) .format(&SINGLE_LINE_COMMENT_FORMAT)
.format(&SINGLE_LINE_COMMENT_FORMAT) .min_length(2)
.min_length(2) .max_length(64)
.max_length(64) .schema();
.schema();
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
StringSchema::new("Block device name (/sys/block/<name>).") .format(&BLOCKDEVICE_NAME_FORMAT)
.format(&BLOCKDEVICE_NAME_FORMAT) .min_length(3)
.min_length(3) .max_length(64)
.max_length(64) .schema();
.schema();
pub const DISK_ARRAY_SCHEMA: Schema = pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema(); "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
.schema();
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new("A list of disk names, comma separated.") pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
"A list of disk names, comma separated.")
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
.schema(); .schema();
@ -306,21 +265,15 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl
.format(&SINGLE_LINE_COMMENT_FORMAT) .format(&SINGLE_LINE_COMMENT_FORMAT)
.schema(); .schema();
pub const MULTI_LINE_COMMENT_FORMAT: ApiStringFormat = pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
ApiStringFormat::Pattern(&MULTI_LINE_COMMENT_REGEX); .format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multiple lines).") .max_length(16)
.format(&MULTI_LINE_COMMENT_FORMAT)
.schema(); .schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
StringSchema::new("Proxmox Backup Server subscription key.") .max_length(256)
.format(&SUBSCRIPTION_KEY_FORMAT) .schema();
.min_length(15)
.max_length(16)
.schema();
pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.").max_length(256).schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \ "Prevent changes if current configuration file has different \
@ -333,8 +286,10 @@ pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
/// API schema format definition for repository URLs /// API schema format definition for repository URLs
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
// Complex type definitions // Complex type definitions
#[api()] #[api()]
#[derive(Default, Serialize, Deserialize)] #[derive(Default, Serialize, Deserialize)]
/// Storage space usage information. /// Storage space usage information.
@ -353,6 +308,39 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.max_length(64) .max_length(64)
.schema(); .schema();
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
/// Hex-encoded RSA modulus
pub modulus: String,
/// Key (modulus) length in bits
pub length: usize,
}
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
let modulus = value.n().to_hex_str()?.to_string();
let exponent = value.e().to_dec_str()?.to_string();
let length = value.size() as usize * 8;
Ok(Self {
path: None,
exponent,
modulus,
length,
})
}
}
#[api()] #[api()]
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")] #[serde(rename_all = "PascalCase")]
@ -379,10 +367,11 @@ pub struct APTUpdateInfo {
/// URL under which the package's changelog can be retrieved /// URL under which the package's changelog can be retrieved
pub change_log_url: String, pub change_log_url: String,
/// Custom extra field for additional package information /// Custom extra field for additional package information
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub extra_info: Option<String>, pub extra_info: Option<String>,
} }
#[api()] #[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
@ -394,6 +383,7 @@ pub enum NodePowerCommand {
Shutdown, Shutdown,
} }
#[api()] #[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] #[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
@ -432,16 +422,19 @@ pub struct TaskListItem {
/// The authenticated entity who started the task /// The authenticated entity who started the task
pub user: String, pub user: String,
/// The task end time (Epoch) /// The task end time (Epoch)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>, pub endtime: Option<i64>,
/// Task end status /// Task end status
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub status: Option<String>, pub status: Option<String>,
} }
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
optional: false, optional: false,
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), schema: &ArraySchema::new(
"A list of tasks.",
&TaskListItem::API_SCHEMA,
).schema(),
}; };
#[api()] #[api()]
@ -473,44 +466,3 @@ pub enum RRDTimeFrame {
/// Decade (10 years) /// Decade (10 years)
Decade, Decade,
} }
#[api]
#[derive(Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
/// type of the realm
pub enum RealmType {
/// The PAM realm
Pam,
/// The PBS realm
Pbs,
/// An OpenID Connect realm
OpenId,
}
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"type": {
type: RealmType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Basic Information about a realm
pub struct BasicRealmInfo {
pub realm: String,
#[serde(rename = "type")]
pub ty: RealmType,
/// True if it is the default realm
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -1,92 +0,0 @@
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
const_regex! {
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
}
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
StringSchema::new("Message describing the reason for the maintenance.")
.format(&MAINTENANCE_MESSAGE_FORMAT)
.max_length(64)
.schema();
#[derive(Clone, Copy, Debug)]
/// Operation requirements, used when checking for maintenance mode.
pub enum Operation {
/// for any read operation like backup restore or RRD metric collection
Read,
/// for any write/delete operation, like backup create or GC
Write,
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
/// some mutex could be locked (e.g., GC already running?)
///
/// NOTE: one must *not* do any IO operations when only helding this Op state
Lookup,
// GarbageCollect or Delete?
}
#[api]
#[derive(Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Maintenance type.
pub enum MaintenanceType {
// TODO:
// - Add "unmounting" once we got pluggable datastores
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
// operation, so that one can enable a mode where nothing new can be added but stuff can be
// cleaned
/// Only read operations are allowed on the datastore.
ReadOnly,
/// Neither read nor write operations are allowed on the datastore.
Offline,
}
#[api(
properties: {
type: {
type: MaintenanceType,
},
message: {
optional: true,
schema: MAINTENANCE_MESSAGE_SCHEMA,
}
},
default_key: "type",
)]
#[derive(Deserialize, Serialize)]
/// Maintenance mode
pub struct MaintenanceMode {
/// Type of maintenance ("read-only" or "offline").
#[serde(rename = "type")]
ty: MaintenanceType,
/// Reason for maintenance.
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
impl MaintenanceMode {
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
.decode_utf8()
.unwrap_or(Cow::Borrowed(""));
if let Some(Operation::Lookup) = operation {
return Ok(());
} else if self.ty == MaintenanceType::Offline {
bail!("offline maintenance mode: {}", message);
} else if self.ty == MaintenanceType::ReadOnly {
if let Some(Operation::Write) = operation {
bail!("read-only maintenance mode: {}", message);
}
}
Ok(())
}
}

View File

@ -3,43 +3,49 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use crate::{ use crate::{
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_REGEX,
IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT,
CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT,
}; };
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat = pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.") pub const IP_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT) .format(&IP_V4_FORMAT)
.max_length(15) .max_length(15)
.schema(); .schema();
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.") pub const IP_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT) .format(&IP_V6_FORMAT)
.max_length(39) .max_length(39)
.schema(); .schema();
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.") pub const IP_SCHEMA: Schema =
StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT) .format(&IP_FORMAT)
.max_length(39) .max_length(39)
.schema(); .schema();
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).") pub const CIDR_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT) .format(&CIDR_V4_FORMAT)
.max_length(18) .max_length(18)
.schema(); .schema();
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).") pub const CIDR_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT) .format(&CIDR_V6_FORMAT)
.max_length(43) .max_length(43)
.schema(); .schema();
pub const CIDR_SCHEMA: Schema = pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).") StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT) .format(&CIDR_FORMAT)
.max_length(43) .max_length(43)
.schema(); .schema();
#[api()] #[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
@ -121,18 +127,17 @@ pub enum NetworkInterfaceType {
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.") pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT) .format(&NETWORK_INTERFACE_FORMAT)
.min_length(1) .min_length(1)
.max_length(15) // libc::IFNAMSIZ-1 .max_length(libc::IFNAMSIZ-1)
.schema(); .schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema(); "Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
.schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
StringSchema::new("A list of network devices, comma separated.") "A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString( .format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
&NETWORK_INTERFACE_ARRAY_SCHEMA, .schema();
))
.schema();
#[api( #[api(
properties: { properties: {
@ -227,48 +232,48 @@ pub struct Interface {
/// Interface type /// Interface type
#[serde(rename = "type")] #[serde(rename = "type")]
pub interface_type: NetworkInterfaceType, pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub method: Option<NetworkConfigMethod>, pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub method6: Option<NetworkConfigMethod>, pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv4 address with netmask /// IPv4 address with netmask
pub cidr: Option<String>, pub cidr: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv4 gateway /// IPv4 gateway
pub gateway: Option<String>, pub gateway: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv6 address with netmask /// IPv6 address with netmask
pub cidr6: Option<String>, pub cidr6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// IPv6 gateway /// IPv6 gateway
pub gateway6: Option<String>, pub gateway6: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")] #[serde(skip_serializing_if="Vec::is_empty")]
pub options: Vec<String>, pub options: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty")] #[serde(skip_serializing_if="Vec::is_empty")]
pub options6: Vec<String>, pub options6: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comments: Option<String>, pub comments: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comments6: Option<String>, pub comments6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
/// Maximum Transmission Unit /// Maximum Transmission Unit
pub mtu: Option<u64>, pub mtu: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bridge_ports: Option<Vec<String>>, pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support. /// Enable bridge vlan support.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bridge_vlan_aware: Option<bool>, pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub slaves: Option<Vec<String>>, pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>, pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
#[serde(rename = "bond-primary")] #[serde(rename = "bond-primary")]
pub bond_primary: Option<String>, pub bond_primary: Option<String>,
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>, pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
@ -276,7 +281,7 @@ pub struct Interface {
impl Interface { impl Interface {
pub fn new(name: String) -> Self { pub fn new(name: String) -> Self {
Self { Self {
name, name,
interface_type: NetworkInterfaceType::Unknown, interface_type: NetworkInterfaceType::Unknown,
autostart: false, autostart: false,

View File

@ -1,38 +1,42 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater}; use proxmox_schema::{
api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater,
use super::{
PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
}; };
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); use super::{
PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.") pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT) .format(&OPENID_SCOPE_FORMAT)
.schema(); .schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema(); "Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat = pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA); ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile"; pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List") pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT) .format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST) .default(OPENID_DEFAILT_SCOPE_LIST)
.schema(); .schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); pub const OPENID_ACR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_ACR_SCHEMA: Schema = pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.")
StringSchema::new("OpenID Authentication Context Class Reference.") .format(&OPENID_SCOPE_FORMAT)
.format(&OPENID_SCOPE_FORMAT) .schema();
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema = pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new(
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema(); "Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat = pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA); ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
@ -46,12 +50,10 @@ pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
is up to the identity provider to guarantee the uniqueness. The \ is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \ OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \ unique. Also make sure that the user is not allowed to change that \
attribute by himself!", attribute by himself!")
) .max_length(64)
.max_length(64) .min_length(1)
.min_length(1) .format(&PROXMOX_SAFE_ID_FORMAT) .schema();
.format(&PROXMOX_SAFE_ID_FORMAT)
.schema();
#[api( #[api(
properties: { properties: {
@ -90,7 +92,7 @@ pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// OpenID configuration properties. /// OpenID configuration properties.
pub struct OpenIdRealmConfig { pub struct OpenIdRealmConfig {
#[updater(skip)] #[updater(skip)]
@ -99,21 +101,21 @@ pub struct OpenIdRealmConfig {
pub issuer_url: String, pub issuer_url: String,
/// OpenID Client ID /// OpenID Client ID
pub client_id: String, pub client_id: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub scopes: Option<String>, pub scopes: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub acr_values: Option<String>, pub acr_values: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub prompt: Option<String>, pub prompt: Option<String>,
/// OpenID Client Key /// OpenID Client Key
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub client_key: Option<String>, pub client_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// Automatically create users if they do not exist. /// Automatically create users if they do not exist.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub autocreate: Option<bool>, pub autocreate: Option<bool>,
#[updater(skip)] #[updater(skip)]
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub username_claim: Option<String>, pub username_claim: Option<String>,
} }

View File

@ -3,19 +3,17 @@ use serde::{Deserialize, Serialize};
use super::*; use super::*;
use proxmox_schema::*; use proxmox_schema::*;
pub const REMOTE_PASSWORD_SCHEMA: Schema = pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
StringSchema::new("Password or auth token for remote host.") .format(&PASSWORD_FORMAT)
.format(&PASSWORD_FORMAT) .min_length(1)
.min_length(1) .max_length(1024)
.max_length(1024) .schema();
.schema();
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).")
StringSchema::new("Password or auth token for remote host (stored as base64 string).") .format(&PASSWORD_FORMAT)
.format(&PASSWORD_FORMAT) .min_length(1)
.min_length(1) .max_length(1024)
.max_length(1024) .schema();
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT) .format(&PROXMOX_SAFE_ID_FORMAT)
@ -23,6 +21,7 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.max_length(32) .max_length(32)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
comment: { comment: {
@ -46,17 +45,17 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
}, },
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Remote configuration properties. /// Remote configuration properties.
pub struct RemoteConfig { pub struct RemoteConfig {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
pub host: String, pub host: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub port: Option<u16>, pub port: Option<u16>,
pub auth_id: Authid, pub auth_id: Authid,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub fingerprint: Option<String>, pub fingerprint: Option<String>,
} }
@ -73,34 +72,15 @@ pub struct RemoteConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Remote properties. /// Remote properties.
pub struct Remote { pub struct Remote {
pub name: String, pub name: String,
// Note: The stored password is base64 encoded // Note: The stored password is base64 encoded
#[serde(skip_serializing_if = "String::is_empty")] #[serde(skip_serializing_if="String::is_empty")]
#[serde(with = "proxmox_serde::string_as_base64")] #[serde(with = "proxmox::tools::serde::string_as_base64")]
pub password: String, pub password: String,
#[serde(flatten)] #[serde(flatten)]
pub config: RemoteConfig, pub config: RemoteConfig,
} }
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct RemoteWithoutPassword {
pub name: String,
#[serde(flatten)]
pub config: RemoteConfig,
}

View File

@ -3,23 +3,23 @@ use ::serde::{Deserialize, Serialize};
use proxmox_schema::api; use proxmox_schema::api;
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Optional Device Identification Attributes /// Optional Device Identification Attributes
pub struct OptionalDeviceIdentification { pub struct OptionalDeviceIdentification {
/// Vendor (autodetected) /// Vendor (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub vendor: Option<String>, pub vendor: Option<String>,
/// Model (autodetected) /// Model (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub model: Option<String>, pub model: Option<String>,
/// Serial number (autodetected) /// Serial number (autodetected)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub serial: Option<String>, pub serial: Option<String>,
} }
#[api()] #[api()]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug,Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Kind of device /// Kind of device
pub enum DeviceKind { pub enum DeviceKind {
@ -36,7 +36,7 @@ pub enum DeviceKind {
}, },
}, },
)] )]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug,Serialize,Deserialize)]
/// Tape device information /// Tape device information
pub struct TapeDeviceInfo { pub struct TapeDeviceInfo {
pub kind: DeviceKind, pub kind: DeviceKind,

View File

@ -4,9 +4,13 @@ use std::convert::TryFrom;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; use crate::{
PROXMOX_SAFE_ID_FORMAT,
CHANGER_NAME_SCHEMA,
OptionalDeviceIdentification,
};
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.") pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
.format(&PROXMOX_SAFE_ID_FORMAT) .format(&PROXMOX_SAFE_ID_FORMAT)
@ -14,15 +18,16 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
.max_length(32) .max_length(32)
.schema(); .schema();
pub const LTO_DRIVE_PATH_SCHEMA: Schema = pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema(); "The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')")
.schema();
pub const CHANGER_DRIVENUM_SCHEMA: Schema = pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
IntegerSchema::new("Associated changer drive number (requires option changer)") "Associated changer drive number (requires option changer)")
.minimum(0) .minimum(0)
.maximum(255) .maximum(255)
.default(0) .default(0)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
@ -31,7 +36,7 @@ pub const CHANGER_DRIVENUM_SCHEMA: Schema =
} }
} }
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
/// Simulate tape drives (only for test and debug) /// Simulate tape drives (only for test and debug)
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct VirtualTapeDrive { pub struct VirtualTapeDrive {
@ -39,7 +44,7 @@ pub struct VirtualTapeDrive {
/// Path to directory /// Path to directory
pub path: String, pub path: String,
/// Virtual tape size /// Virtual tape size
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub max_size: Option<usize>, pub max_size: Option<usize>,
} }
@ -61,16 +66,16 @@ pub struct VirtualTapeDrive {
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Lto SCSI tape driver /// Lto SCSI tape driver
pub struct LtoTapeDrive { pub struct LtoTapeDrive {
#[updater(skip)] #[updater(skip)]
pub name: String, pub name: String,
pub path: String, pub path: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub changer: Option<String>, pub changer: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub changer_drivenum: Option<u64>, pub changer_drivenum: Option<u64>,
} }
@ -84,7 +89,7 @@ pub struct LtoTapeDrive {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Drive list entry /// Drive list entry
pub struct DriveListEntry { pub struct DriveListEntry {
@ -93,12 +98,12 @@ pub struct DriveListEntry {
#[serde(flatten)] #[serde(flatten)]
pub info: OptionalDeviceIdentification, pub info: OptionalDeviceIdentification,
/// the state of the drive if locked /// the state of the drive if locked
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub state: Option<String>, pub state: Option<String>,
} }
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
/// Medium auxiliary memory attributes (MAM) /// Medium auxiliary memory attributes (MAM)
pub struct MamAttribute { pub struct MamAttribute {
/// Attribute id /// Attribute id
@ -110,7 +115,7 @@ pub struct MamAttribute {
} }
#[api()] #[api()]
#[derive(Serialize, Deserialize, Copy, Clone, Debug)] #[derive(Serialize,Deserialize,Copy,Clone,Debug)]
pub enum TapeDensity { pub enum TapeDensity {
/// Unknown (no media loaded) /// Unknown (no media loaded)
Unknown, Unknown,
@ -163,7 +168,7 @@ impl TryFrom<u8> for TapeDensity {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Drive/Media status for Lto SCSI drives. /// Drive/Media status for Lto SCSI drives.
/// ///
@ -185,35 +190,35 @@ pub struct LtoDriveAndMediaStatus {
/// Tape density /// Tape density
pub density: TapeDensity, pub density: TapeDensity,
/// Media is write protected /// Media is write protected
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub write_protect: Option<bool>, pub write_protect: Option<bool>,
/// Tape Alert Flags /// Tape Alert Flags
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub alert_flags: Option<String>, pub alert_flags: Option<String>,
/// Current file number /// Current file number
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub file_number: Option<u64>, pub file_number: Option<u64>,
/// Current block number /// Current block number
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub block_number: Option<u64>, pub block_number: Option<u64>,
/// Medium Manufacture Date (epoch) /// Medium Manufacture Date (epoch)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub manufactured: Option<i64>, pub manufactured: Option<i64>,
/// Total Bytes Read in Medium Life /// Total Bytes Read in Medium Life
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bytes_read: Option<u64>, pub bytes_read: Option<u64>,
/// Total Bytes Written in Medium Life /// Total Bytes Written in Medium Life
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bytes_written: Option<u64>, pub bytes_written: Option<u64>,
/// Number of mounts for the current volume (i.e., Thread Count) /// Number of mounts for the current volume (i.e., Thread Count)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub volume_mounts: Option<u64>, pub volume_mounts: Option<u64>,
/// Count of the total number of times the medium has passed over /// Count of the total number of times the medium has passed over
/// the head. /// the head.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub medium_passes: Option<u64>, pub medium_passes: Option<u64>,
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub medium_wearout: Option<f64>, pub medium_wearout: Option<f64>,
} }

View File

@ -3,15 +3,19 @@ use ::serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use crate::{MediaLocation, MediaStatus, UUID_FORMAT}; use crate::{
UUID_FORMAT,
MediaStatus,
MediaLocation,
};
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new( pub const MEDIA_SET_UUID_SCHEMA: Schema =
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).", StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
) .format(&UUID_FORMAT)
.format(&UUID_FORMAT) .schema();
.schema();
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.") pub const MEDIA_UUID_SCHEMA: Schema =
StringSchema::new("Media Uuid.")
.format(&UUID_FORMAT) .format(&UUID_FORMAT)
.schema(); .schema();
@ -22,7 +26,7 @@ pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Media Set list entry /// Media Set list entry
pub struct MediaSetListEntry { pub struct MediaSetListEntry {
@ -52,7 +56,7 @@ pub struct MediaSetListEntry {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Media list entry /// Media list entry
pub struct MediaListEntry { pub struct MediaListEntry {
@ -68,18 +72,18 @@ pub struct MediaListEntry {
/// Catalog status OK /// Catalog status OK
pub catalog: bool, pub catalog: bool,
/// Media set name /// Media set name
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub media_set_name: Option<String>, pub media_set_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub media_set_uuid: Option<Uuid>, pub media_set_uuid: Option<Uuid>,
/// Media set seq_nr /// Media set seq_nr
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub seq_nr: Option<u64>, pub seq_nr: Option<u64>,
/// MediaSet creation time stamp /// MediaSet creation time stamp
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub media_set_ctime: Option<i64>, pub media_set_ctime: Option<i64>,
/// Media Pool /// Media Pool
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub pool: Option<String>, pub pool: Option<String>,
} }
@ -94,7 +98,7 @@ pub struct MediaListEntry {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Media label info /// Media label info
pub struct MediaIdFlat { pub struct MediaIdFlat {
@ -106,18 +110,18 @@ pub struct MediaIdFlat {
pub ctime: i64, pub ctime: i64,
// All MediaSet properties are optional here // All MediaSet properties are optional here
/// MediaSet Pool /// MediaSet Pool
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub pool: Option<String>, pub pool: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub media_set_uuid: Option<Uuid>, pub media_set_uuid: Option<Uuid>,
/// MediaSet media sequence number /// MediaSet media sequence number
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub seq_nr: Option<u64>, pub seq_nr: Option<u64>,
/// MediaSet Creation time stamp /// MediaSet Creation time stamp
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub media_set_ctime: Option<i64>, pub media_set_ctime: Option<i64>,
/// Encryption key fingerprint /// Encryption key fingerprint
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub encryption_key_fingerprint: Option<String>, pub encryption_key_fingerprint: Option<String>,
} }
@ -129,7 +133,7 @@ pub struct MediaIdFlat {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Label with optional Uuid /// Label with optional Uuid
pub struct LabelUuidMap { pub struct LabelUuidMap {
@ -149,7 +153,7 @@ pub struct LabelUuidMap {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Media content list entry /// Media content list entry
pub struct MediaContentEntry { pub struct MediaContentEntry {

View File

@ -1,6 +1,6 @@
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, Schema, StringSchema}; use proxmox_schema::{parse_simple_value, ApiStringFormat, Schema, StringSchema};
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
@ -22,8 +22,8 @@ pub enum MediaLocation {
Vault(String), Vault(String),
} }
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation); proxmox::forward_deserialize_to_from_str!(MediaLocation);
proxmox_serde::forward_serialize_to_display!(MediaLocation); proxmox::forward_serialize_to_display!(MediaLocation);
impl proxmox_schema::ApiType for MediaLocation { impl proxmox_schema::ApiType for MediaLocation {
const API_SCHEMA: Schema = StringSchema::new( const API_SCHEMA: Schema = StringSchema::new(
@ -33,10 +33,10 @@ impl proxmox_schema::ApiType for MediaLocation {
let location: MediaLocation = text.parse()?; let location: MediaLocation = text.parse()?;
match location { match location {
MediaLocation::Online(ref changer) => { MediaLocation::Online(ref changer) => {
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?; parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?;
} }
MediaLocation::Vault(ref vault) => { MediaLocation::Vault(ref vault) => {
VAULT_NAME_SCHEMA.parse_simple_value(vault)?; parse_simple_value(vault, &VAULT_NAME_SCHEMA)?;
} }
MediaLocation::Offline => { /* OK */ } MediaLocation::Offline => { /* OK */ }
} }

View File

@ -9,12 +9,14 @@ use std::str::FromStr;
use anyhow::Error; use anyhow::Error;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater}; use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater};
use proxmox_time::{CalendarEvent, TimeSpan}; use proxmox_time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
use crate::{ use crate::{
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
SINGLE_LINE_COMMENT_FORMAT,
SINGLE_LINE_COMMENT_SCHEMA,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
}; };
@ -25,22 +27,19 @@ pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
.schema(); .schema();
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new( pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
"Media set naming template (may contain strftime() time format specifications).", "Media set naming template (may contain strftime() time format specifications).")
) .format(&SINGLE_LINE_COMMENT_FORMAT)
.format(&SINGLE_LINE_COMMENT_FORMAT) .min_length(2)
.min_length(2) .max_length(64)
.max_length(64) .schema();
.schema();
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat =
MediaSetPolicy::from_str(s)?; ApiStringFormat::VerifyFn(|s| { MediaSetPolicy::from_str(s)?; Ok(()) });
Ok(())
});
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).") "Media set allocation policy ('continue', 'always', or a calendar event).")
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT) .format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
.schema(); .schema();
/// Media set allocation policy /// Media set allocation policy
pub enum MediaSetPolicy { pub enum MediaSetPolicy {
@ -63,21 +62,19 @@ impl std::str::FromStr for MediaSetPolicy {
return Ok(MediaSetPolicy::AlwaysCreate); return Ok(MediaSetPolicy::AlwaysCreate);
} }
let event = s.parse()?; let event = parse_calendar_event(s)?;
Ok(MediaSetPolicy::CreateAt(event)) Ok(MediaSetPolicy::CreateAt(event))
} }
} }
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat =
RetentionPolicy::from_str(s)?; ApiStringFormat::VerifyFn(|s| { RetentionPolicy::from_str(s)?; Ok(()) });
Ok(())
});
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).") "Media retention policy ('overwrite', 'keep', or time span).")
.format(&MEDIA_RETENTION_POLICY_FORMAT) .format(&MEDIA_RETENTION_POLICY_FORMAT)
.schema(); .schema();
/// Media retention Policy /// Media retention Policy
pub enum RetentionPolicy { pub enum RetentionPolicy {
@ -100,7 +97,7 @@ impl std::str::FromStr for RetentionPolicy {
return Ok(RetentionPolicy::KeepForever); return Ok(RetentionPolicy::KeepForever);
} }
let time_span = s.parse()?; let time_span = parse_time_span(s)?;
Ok(RetentionPolicy::ProtectFor(time_span)) Ok(RetentionPolicy::ProtectFor(time_span))
} }
@ -133,29 +130,29 @@ impl std::str::FromStr for RetentionPolicy {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
/// Media pool configuration /// Media pool configuration
pub struct MediaPoolConfig { pub struct MediaPoolConfig {
/// The pool name /// The pool name
#[updater(skip)] #[updater(skip)]
pub name: String, pub name: String,
/// Media Set allocation policy /// Media Set allocation policy
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub allocation: Option<String>, pub allocation: Option<String>,
/// Media retention policy /// Media retention policy
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub retention: Option<String>, pub retention: Option<String>,
/// Media set naming template (default "%c") /// Media set naming template (default "%c")
/// ///
/// The template is UTF8 text, and can include strftime time /// The template is UTF8 text, and can include strftime time
/// format specifications. /// format specifications.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub template: Option<String>, pub template: Option<String>,
/// Encryption key fingerprint /// Encryption key fingerprint
/// ///
/// If set, encrypt all data using the specified key. /// If set, encrypt all data using the specified key.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub encrypt: Option<String>, pub encrypt: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
} }

View File

@ -24,28 +24,31 @@ pub use media::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat};
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; use crate::{
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
};
const_regex! { const_regex!{
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(:?", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
} }
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX); ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Tape encryption key fingerprint (sha256).") "Tape encryption key fingerprint (sha256)."
.format(&FINGERPRINT_SHA256_FORMAT) )
.schema(); .format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time") "A snapshot in the format: 'store:type/id/time")
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT) .format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
.type_text("store:[ns/namespace/...]type/id/time") .type_text("store:type/id/time")
.schema(); .schema();
#[api( #[api(
properties: { properties: {
@ -66,7 +69,7 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
optional: true, optional: true,
}, },
"backup-type": { "backup-type": {
type: BackupType, schema: BACKUP_TYPE_SCHEMA,
optional: true, optional: true,
}, },
"backup-id": { "backup-id": {
@ -75,14 +78,14 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
}, },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// Content list filter parameters /// Content list filter parameters
pub struct MediaContentListFilter { pub struct MediaContentListFilter {
pub pool: Option<String>, pub pool: Option<String>,
pub label_text: Option<String>, pub label_text: Option<String>,
pub media: Option<Uuid>, pub media: Option<Uuid>,
pub media_set: Option<Uuid>, pub media_set: Option<Uuid>,
pub backup_type: Option<BackupType>, pub backup_type: Option<String>,
pub backup_id: Option<String>, pub backup_id: Option<String>,
} }

View File

@ -1,16 +1,16 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater};
use crate::{ use crate::{
HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT,
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
}; };
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Timeframe to specify when the rule is actice.") "Timeframe to specify when the rule is actice.")
.format(&DAILY_DURATION_FORMAT) .format(&DAILY_DURATION_FORMAT)
.schema(); .schema();
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.format(&PROXMOX_SAFE_ID_FORMAT) .format(&PROXMOX_SAFE_ID_FORMAT)
@ -18,15 +18,15 @@ pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.max_length(32) .max_length(32)
.schema(); .schema();
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new(
IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.") "Rate limit (for Token bucket filter) in bytes/second.")
.minimum(100_000) .minimum(100_000)
.schema(); .schema();
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new(
IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.") "Size of the token bucket (for Token bucket filter) in bytes.")
.minimum(1000) .minimum(1000)
.schema(); .schema();
#[api( #[api(
properties: { properties: {
@ -48,17 +48,17 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema =
}, },
}, },
)] )]
#[derive(Serialize, Deserialize, Default, Clone, Updater)] #[derive(Serialize,Deserialize,Default,Clone,Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Rate Limit Configuration /// Rate Limit Configuration
pub struct RateLimitConfig { pub struct RateLimitConfig {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub rate_in: Option<HumanByte>, pub rate_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub burst_in: Option<HumanByte>, pub burst_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub rate_out: Option<HumanByte>, pub rate_out: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub burst_out: Option<HumanByte>, pub burst_out: Option<HumanByte>,
} }
@ -100,13 +100,13 @@ impl RateLimitConfig {
}, },
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize, Updater)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Traffic control rule /// Traffic control rule
pub struct TrafficControlRule { pub struct TrafficControlRule {
#[updater(skip)] #[updater(skip)]
pub name: String, pub name: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// Rule applies to Source IPs within this networks /// Rule applies to Source IPs within this networks
pub network: Vec<String>, pub network: Vec<String>,
@ -117,6 +117,6 @@ pub struct TrafficControlRule {
// #[serde(skip_serializing_if="Option::is_none")] // #[serde(skip_serializing_if="Option::is_none")]
// pub shared: Option<bool>, // pub shared: Option<bool>,
/// Enable the rule at specific times /// Enable the rule at specific times
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub timeframe: Option<Vec<String>>, pub timeframe: Option<Vec<String>>,
} }

View File

@ -1,22 +1,22 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater}; use proxmox_schema::{
api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
};
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new( pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
"Enable the account (default). You can set this to '0' to disable the account.", "Enable the account (default). You can set this to '0' to disable the account.")
) .default(true)
.default(true) .schema();
.schema();
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new( pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
"Account expiration date (seconds since epoch). '0' means no expiration date.", "Account expiration date (seconds since epoch). '0' means no expiration date.")
) .default(0)
.default(0) .minimum(0)
.minimum(0) .schema();
.schema();
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.") pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
.format(&SINGLE_LINE_COMMENT_FORMAT) .format(&SINGLE_LINE_COMMENT_FORMAT)
@ -75,23 +75,23 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
}, },
} }
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
/// User properties with added list of ApiTokens /// User properties with added list of ApiTokens
pub struct UserWithTokens { pub struct UserWithTokens {
pub userid: Userid, pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>, pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>, pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>, pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>, pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>, pub email: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)] #[serde(skip_serializing_if="Vec::is_empty", default)]
pub tokens: Vec<ApiToken>, pub tokens: Vec<ApiToken>,
} }
@ -114,15 +114,15 @@ pub struct UserWithTokens {
}, },
} }
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize,Deserialize)]
/// ApiToken properties. /// ApiToken properties.
pub struct ApiToken { pub struct ApiToken {
pub tokenid: Authid, pub tokenid: Authid,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>, pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>, pub expire: Option<i64>,
} }
@ -132,7 +132,7 @@ impl ApiToken {
return false; return false;
} }
if let Some(expire) = self.expire { if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64(); let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now { if expire > 0 && expire <= now {
return false; return false;
} }
@ -172,22 +172,22 @@ impl ApiToken {
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize,Deserialize,Updater)]
/// User properties. /// User properties.
pub struct User { pub struct User {
#[updater(skip)] #[updater(skip)]
pub userid: Userid, pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>, pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>, pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>, pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>, pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>, pub email: Option<String>,
} }
@ -197,7 +197,7 @@ impl User {
return false; return false;
} }
if let Some(expire) = self.expire { if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64(); let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now { if expire > 0 && expire <= now {
return false; return false;
} }

View File

@ -39,35 +39,15 @@ use proxmox_schema::{
// slash is not allowed because it is used as pve API delimiter // slash is not allowed because it is used as pve API delimiter
// also see "man useradd" // also see "man useradd"
#[macro_export] #[macro_export]
macro_rules! USER_NAME_REGEX_STR { macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
() => {
r"(?:[^\s:/[:cntrl:]]+)"
};
}
#[macro_export] #[macro_export]
macro_rules! GROUP_NAME_REGEX_STR { macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
() => {
USER_NAME_REGEX_STR!()
};
}
#[macro_export] #[macro_export]
macro_rules! TOKEN_NAME_REGEX_STR { macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
() => {
PROXMOX_SAFE_ID_REGEX_STR!()
};
}
#[macro_export] #[macro_export]
macro_rules! USER_ID_REGEX_STR { macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
() => {
concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())
};
}
#[macro_export] #[macro_export]
macro_rules! APITOKEN_ID_REGEX_STR { macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
() => {
concat!(USER_ID_REGEX_STR!(), r"!", TOKEN_NAME_REGEX_STR!())
};
}
const_regex! { const_regex! {
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$"); pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
@ -121,7 +101,6 @@ pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.s
#[api( #[api(
type: String, type: String,
format: &PROXMOX_USER_NAME_FORMAT, format: &PROXMOX_USER_NAME_FORMAT,
min_length: 1,
)] )]
/// The user name part of a user id. /// The user name part of a user id.
/// ///
@ -258,8 +237,7 @@ impl TryFrom<String> for Realm {
type Error = Error; type Error = Error;
fn try_from(s: String) -> Result<Self, Error> { fn try_from(s: String) -> Result<Self, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s)
.check_constraints(&s)
.map_err(|_| format_err!("invalid realm"))?; .map_err(|_| format_err!("invalid realm"))?;
Ok(Self(s)) Ok(Self(s))
@ -270,8 +248,7 @@ impl<'a> TryFrom<&'a str> for &'a RealmRef {
type Error = Error; type Error = Error;
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> { fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s)
.check_constraints(s)
.map_err(|_| format_err!("invalid realm"))?; .map_err(|_| format_err!("invalid realm"))?;
Ok(RealmRef::new(s)) Ok(RealmRef::new(s))
@ -327,7 +304,7 @@ impl PartialEq<Realm> for &RealmRef {
/// The token ID part of an API token authentication id. /// The token ID part of an API token authentication id.
/// ///
/// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases. /// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq, Deserialize, Serialize)] #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Tokenname(String); pub struct Tokenname(String);
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify /// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
@ -420,7 +397,7 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef {
} }
/// A complete user id consisting of a user name and a realm /// A complete user id consisting of a user name and a realm
#[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, UpdaterType)] #[derive(Clone, Debug, PartialEq, Eq, Hash, UpdaterType)]
pub struct Userid { pub struct Userid {
data: String, data: String,
name_len: usize, name_len: usize,
@ -504,8 +481,7 @@ impl std::str::FromStr for Userid {
bail!("invalid user name in user id"); bail!("invalid user name in user id");
} }
PROXMOX_AUTH_REALM_STRING_SCHEMA PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
.check_constraints(realm)
.map_err(|_| format_err!("invalid realm in user id"))?; .map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm)))) Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
@ -526,8 +502,7 @@ impl TryFrom<String> for Userid {
bail!("invalid user name in user id"); bail!("invalid user name in user id");
} }
PROXMOX_AUTH_REALM_STRING_SCHEMA PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
.check_constraints(&data[(name_len + 1)..])
.map_err(|_| format_err!("invalid realm in user id"))?; .map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self { data, name_len }) Ok(Self { data, name_len })
@ -553,10 +528,10 @@ impl PartialEq<String> for Userid {
} }
/// A complete authentication id consisting of a user id and an optional token name. /// A complete authentication id consisting of a user id and an optional token name.
#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType, Ord, PartialOrd)] #[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)]
pub struct Authid { pub struct Authid {
user: Userid, user: Userid,
tokenname: Option<Tokenname>, tokenname: Option<Tokenname>
} }
impl ApiType for Authid { impl ApiType for Authid {
@ -581,7 +556,10 @@ impl Authid {
} }
pub fn tokenname(&self) -> Option<&TokennameRef> { pub fn tokenname(&self) -> Option<&TokennameRef> {
self.tokenname.as_deref() match &self.tokenname {
Some(name) => Some(&name),
None => None,
}
} }
/// Get the "root@pam" auth id. /// Get the "root@pam" auth id.
@ -676,7 +654,7 @@ impl TryFrom<String> for Authid {
data.truncate(realm_end); data.truncate(realm_end);
let user: Userid = data.parse()?; let user:Userid = data.parse()?;
Ok(Self { user, tokenname }) Ok(Self { user, tokenname })
} }
@ -703,15 +681,12 @@ fn test_token_id() {
let token_userid = auth_id.user(); let token_userid = auth_id.user();
assert_eq!(&userid, token_userid); assert_eq!(&userid, token_userid);
assert!(auth_id.is_token()); assert!(auth_id.is_token());
assert_eq!( assert_eq!(auth_id.tokenname().expect("Token has tokenname").as_str(), TokennameRef::new("bar").as_str());
auth_id.tokenname().expect("Token has tokenname").as_str(),
TokennameRef::new("bar").as_str()
);
assert_eq!(auth_id.to_string(), "test@pam!bar".to_string()); assert_eq!(auth_id.to_string(), "test@pam!bar".to_string());
} }
proxmox_serde::forward_deserialize_to_from_str!(Userid); proxmox::forward_deserialize_to_from_str!(Userid);
proxmox_serde::forward_serialize_to_display!(Userid); proxmox::forward_serialize_to_display!(Userid);
proxmox_serde::forward_deserialize_to_from_str!(Authid); proxmox::forward_deserialize_to_from_str!(Authid);
proxmox_serde::forward_serialize_to_display!(Authid); proxmox::forward_serialize_to_display!(Authid);

View File

@ -6,7 +6,8 @@ const_regex! {
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$"; pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
} }
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.") pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
"Pool sector size exponent.")
.minimum(9) .minimum(9)
.maximum(16) .maximum(16)
.default(12) .default(12)
@ -58,7 +59,7 @@ pub enum ZfsRaidLevel {
#[api()] #[api()]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all="kebab-case")]
/// zpool list item /// zpool list item
pub struct ZpoolListItem { pub struct ZpoolListItem {
/// zpool name /// zpool name

View File

@ -1,6 +1,6 @@
[package] [package]
name = "pbs-buildcfg" name = "pbs-buildcfg"
version = "2.2.3" version = "2.1.2"
authors = ["Proxmox Support Team <support@proxmox.com>"] authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018" edition = "2018"
description = "macros used for pbs related paths such as configdir and rundir" description = "macros used for pbs related paths such as configdir and rundir"

View File

@ -5,12 +5,19 @@ use std::process::Command;
fn main() { fn main() {
let repoid = match env::var("REPOID") { let repoid = match env::var("REPOID") {
Ok(repoid) => repoid, Ok(repoid) => repoid,
Err(_) => match Command::new("git").args(&["rev-parse", "HEAD"]).output() { Err(_) => {
Ok(output) => String::from_utf8(output.stdout).unwrap(), match Command::new("git")
Err(err) => { .args(&["rev-parse", "HEAD"])
panic!("git rev-parse failed: {}", err); .output()
{
Ok(output) => {
String::from_utf8(output.stdout).unwrap()
}
Err(err) => {
panic!("git rev-parse failed: {}", err);
}
} }
}, }
}; };
println!("cargo:rustc-env=REPOID={}", repoid); println!("cargo:rustc-env=REPOID={}", repoid);

View File

@ -1,13 +1,15 @@
//! Exports configuration data from the build system //! Exports configuration data from the build system
pub const PROXMOX_PKG_VERSION: &str = concat!( pub const PROXMOX_PKG_VERSION: &str =
env!("CARGO_PKG_VERSION_MAJOR"), concat!(
".", env!("CARGO_PKG_VERSION_MAJOR"),
env!("CARGO_PKG_VERSION_MINOR"), ".",
); env!("CARGO_PKG_VERSION_MINOR"),
);
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH"); pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
pub const PROXMOX_PKG_REPOID: &str = env!("REPOID"); pub const PROXMOX_PKG_REPOID: &str = env!("REPOID");
/// The configured configuration directory /// The configured configuration directory
pub const CONFIGDIR: &str = "/etc/proxmox-backup"; pub const CONFIGDIR: &str = "/etc/proxmox-backup";
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup"; pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
@ -18,38 +20,20 @@ pub const BACKUP_USER_NAME: &str = "backup";
pub const BACKUP_GROUP_NAME: &str = "backup"; pub const BACKUP_GROUP_NAME: &str = "backup";
#[macro_export] #[macro_export]
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
() => {
"/run/proxmox-backup"
};
}
#[macro_export] #[macro_export]
macro_rules! PROXMOX_BACKUP_STATE_DIR_M { macro_rules! PROXMOX_BACKUP_STATE_DIR_M { () => ("/var/lib/proxmox-backup") }
() => {
"/var/lib/proxmox-backup"
};
}
#[macro_export] #[macro_export]
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
() => {
"/var/log/proxmox-backup"
};
}
#[macro_export] #[macro_export]
macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
() => {
"/var/cache/proxmox-backup"
};
}
#[macro_export] #[macro_export]
macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M { macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
() => { () => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
"/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
};
} }
/// namespaced directory for in-memory (tmpfs) run state /// namespaced directory for in-memory (tmpfs) run state
@ -81,10 +65,8 @@ pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img"); concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
/// filename of the cached initramfs to use for debugging single file restore /// filename of the cached initramfs to use for debugging single file restore
pub const PROXMOX_BACKUP_INITRAMFS_DBG_FN: &str = concat!( pub const PROXMOX_BACKUP_INITRAMFS_DBG_FN: &str =
PROXMOX_BACKUP_CACHE_DIR_M!(), concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs-debug.img");
"/file-restore-initramfs-debug.img"
);
/// filename of the kernel to use for booting single file restore VMs /// filename of the kernel to use for booting single file restore VMs
pub const PROXMOX_BACKUP_KERNEL_FN: &str = pub const PROXMOX_BACKUP_KERNEL_FN: &str =
@ -100,9 +82,7 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
/// ``` /// ```
#[macro_export] #[macro_export]
macro_rules! configdir { macro_rules! configdir {
($subdir:expr) => { ($subdir:expr) => (concat!("/etc/proxmox-backup", $subdir))
concat!("/etc/proxmox-backup", $subdir)
};
} }
/// Prepend the run directory to a file name. /// Prepend the run directory to a file name.

View File

@ -10,39 +10,33 @@ anyhow = "1.0"
bitflags = "1.2.1" bitflags = "1.2.1"
bytes = "1.0" bytes = "1.0"
futures = "0.3" futures = "0.3"
hex = "0.4.3"
h2 = { version = "0.3", features = [ "stream" ] } h2 = { version = "0.3", features = [ "stream" ] }
http = "0.2" http = "0.2"
hyper = { version = "0.14", features = [ "full" ] } hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4" lazy_static = "1.4"
libc = "0.2" libc = "0.2"
nix = "0.24" nix = "0.19.1"
openssl = "0.10" openssl = "0.10"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2" pin-project-lite = "0.2"
regex = "1.5" regex = "1.2"
rustyline = "9" rustyline = "7"
serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
tokio = { version = "1.6", features = [ "fs", "signal" ] } tokio = { version = "1.6", features = [ "fs", "signal" ] }
tokio-stream = "0.1.0" tokio-stream = "0.1.0"
tower-service = "0.3.0" tower-service = "0.3.0"
xdg = "2.2" xdg = "2.2"
tar = "0.4"
pathpatterns = "0.1.2" pathpatterns = "0.1.2"
proxmox = "0.15.3"
proxmox-async = "0.4" proxmox-async = "0.2"
proxmox-compression = "0.1.1"
proxmox-fuse = "0.1.1" proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.6", features = [ "client", "http-helpers", "websocket" ] } proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = { version = "1.0.1", features = [ "tokio" ] } proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = "1.1" proxmox-lang = "1"
proxmox-router = { version = "1.2", features = [ "cli" ] } proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = "1.3.1" proxmox-schema = "1"
proxmox-time = "1" proxmox-time = "1"
proxmox-sys = "0.3"
pxar = { version = "0.10.1", features = [ "tokio-io" ] } pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-api-types = { path = "../pbs-api-types" } pbs-api-types = { path = "../pbs-api-types" }

View File

@ -1,24 +1,25 @@
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use std::io::{Write, Seek, SeekFrom};
use std::fs::File; use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::OpenOptionsExt;
use std::sync::Arc; use std::sync::Arc;
use std::os::unix::fs::OpenOptionsExt;
use futures::future::AbortHandle; use futures::future::AbortHandle;
use serde_json::{json, Value}; use serde_json::{json, Value};
use pbs_api_types::{BackupDir, BackupNamespace}; use proxmox::tools::digest_to_hex;
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::sha::sha256;
use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, BackupManifest};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME; use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::sha::sha256;
use super::{H2Client, HttpClient}; use super::{HttpClient, H2Client};
/// Backup Reader /// Backup Reader
pub struct BackupReader { pub struct BackupReader {
@ -28,18 +29,16 @@ pub struct BackupReader {
} }
impl Drop for BackupReader { impl Drop for BackupReader {
fn drop(&mut self) { fn drop(&mut self) {
self.abort.abort(); self.abort.abort();
} }
} }
impl BackupReader { impl BackupReader {
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> { fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
Arc::new(Self { Arc::new(Self { h2, abort, crypt_config})
h2,
abort,
crypt_config,
})
} }
/// Create a new instance by upgrading the connection at '/api2/json/reader' /// Create a new instance by upgrading the connection at '/api2/json/reader'
@ -47,55 +46,59 @@ impl BackupReader {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
ns: &BackupNamespace, backup_type: &str,
backup: &BackupDir, backup_id: &str,
backup_time: i64,
debug: bool, debug: bool,
) -> Result<Arc<BackupReader>, Error> { ) -> Result<Arc<BackupReader>, Error> {
let mut param = json!({
"backup-type": backup.ty(), let param = json!({
"backup-id": backup.id(), "backup-type": backup_type,
"backup-time": backup.time, "backup-id": backup_id,
"backup-time": backup_time,
"store": datastore, "store": datastore,
"debug": debug, "debug": debug,
}); });
let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
if !ns.is_root() { let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
param["ns"] = serde_json::to_value(ns)?;
}
let req = HttpClient::request_builder(
client.server(),
client.port(),
"GET",
"/api2/json/reader",
Some(param),
)
.unwrap();
let (h2, abort) = client
.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
.await?;
Ok(BackupReader::new(h2, abort, crypt_config)) Ok(BackupReader::new(h2, abort, crypt_config))
} }
/// Execute a GET request /// Execute a GET request
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn get(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.get(path, param).await self.h2.get(path, param).await
} }
/// Execute a PUT request /// Execute a PUT request
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn put(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.put(path, param).await self.h2.put(path, param).await
} }
/// Execute a POST request /// Execute a POST request
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn post(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.post(path, param).await self.h2.post(path, param).await
} }
/// Execute a GET request and send output to a writer /// Execute a GET request and send output to a writer
pub async fn download<W: Write + Send>(&self, file_name: &str, output: W) -> Result<(), Error> { pub async fn download<W: Write + Send>(
&self,
file_name: &str,
output: W,
) -> Result<(), Error> {
let path = "download"; let path = "download";
let param = json!({ "file-name": file_name }); let param = json!({ "file-name": file_name });
self.h2.download(path, Some(param), output).await self.h2.download(path, Some(param), output).await
@ -104,7 +107,10 @@ impl BackupReader {
/// Execute a special GET request and send output to a writer /// Execute a special GET request and send output to a writer
/// ///
/// This writes random data, and is only useful to test download speed. /// This writes random data, and is only useful to test download speed.
pub async fn speedtest<W: Write + Send>(&self, output: W) -> Result<(), Error> { pub async fn speedtest<W: Write + Send>(
&self,
output: W,
) -> Result<(), Error> {
self.h2.download("speedtest", None, output).await self.h2.download("speedtest", None, output).await
} }
@ -115,7 +121,7 @@ impl BackupReader {
output: W, output: W,
) -> Result<(), Error> { ) -> Result<(), Error> {
let path = "chunk"; let path = "chunk";
let param = json!({ "digest": hex::encode(digest) }); let param = json!({ "digest": digest_to_hex(digest) });
self.h2.download(path, Some(param), output).await self.h2.download(path, Some(param), output).await
} }
@ -127,14 +133,14 @@ impl BackupReader {
/// ///
/// The manifest signature is verified if we have a crypt_config. /// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> { pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
let mut raw_data = Vec::with_capacity(64 * 1024); let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available // no expected digest available
let data = blob.decode(None, None)?; let data = blob.decode(None, None)?;
let manifest = let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok((manifest, data)) Ok((manifest, data))
} }
@ -148,6 +154,7 @@ impl BackupReader {
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &str,
) -> Result<DataBlobReader<'_, File>, Error> { ) -> Result<DataBlobReader<'_, File>, Error> {
let mut tmpfile = std::fs::OpenOptions::new() let mut tmpfile = std::fs::OpenOptions::new()
.write(true) .write(true)
.read(true) .read(true)
@ -174,6 +181,7 @@ impl BackupReader {
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &str,
) -> Result<DynamicIndexReader, Error> { ) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new() let mut tmpfile = std::fs::OpenOptions::new()
.write(true) .write(true)
.read(true) .read(true)
@ -201,6 +209,7 @@ impl BackupReader {
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &str,
) -> Result<FixedIndexReader, Error> { ) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new() let mut tmpfile = std::fs::OpenOptions::new()
.write(true) .write(true)
.read(true) .read(true)

View File

@ -3,7 +3,7 @@ use std::fmt;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use pbs_api_types::{Authid, Userid, BACKUP_REPO_URL_REGEX, IP_V6_REGEX}; use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
/// Reference remote backup locations /// Reference remote backup locations
/// ///
@ -21,22 +21,15 @@ pub struct BackupRepository {
} }
impl BackupRepository { impl BackupRepository {
pub fn new(
auth_id: Option<Authid>, pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
host: Option<String>,
port: Option<u16>,
store: String,
) -> Self {
let host = match host { let host = match host {
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => Some(format!("[{}]", host)), Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
Some(format!("[{}]", host))
},
other => other, other => other,
}; };
Self { Self { auth_id, host, port, store }
auth_id,
host,
port,
store,
}
} }
pub fn auth_id(&self) -> &Authid { pub fn auth_id(&self) -> &Authid {
@ -44,7 +37,7 @@ impl BackupRepository {
return auth_id; return auth_id;
} }
Authid::root_auth_id() &Authid::root_auth_id()
} }
pub fn user(&self) -> &Userid { pub fn user(&self) -> &Userid {
@ -77,14 +70,7 @@ impl BackupRepository {
impl fmt::Display for BackupRepository { impl fmt::Display for BackupRepository {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match (&self.auth_id, &self.host, self.port) { match (&self.auth_id, &self.host, self.port) {
(Some(auth_id), _, _) => write!( (Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
f,
"{}@{}:{}:{}",
auth_id,
self.host(),
self.port(),
self.store
),
(None, Some(host), None) => write!(f, "{}:{}", host, self.store), (None, Some(host), None) => write!(f, "{}:{}", host, self.store),
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store), (None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
(None, None, None) => write!(f, "{}", self.store), (None, None, None) => write!(f, "{}", self.store),
@ -101,15 +87,12 @@ impl std::str::FromStr for BackupRepository {
/// `host` parts are optional, where `host` defaults to the local /// `host` parts are optional, where `host` defaults to the local
/// host, and `user` defaults to `root@pam`. /// host, and `user` defaults to `root@pam`.
fn from_str(url: &str) -> Result<Self, Self::Err> { fn from_str(url: &str) -> Result<Self, Self::Err> {
let cap = (BACKUP_REPO_URL_REGEX.regex_obj)()
.captures(url) let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?; .ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
Ok(Self { Ok(Self {
auth_id: cap auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
.get(1)
.map(|m| Authid::try_from(m.as_str().to_owned()))
.transpose()?,
host: cap.get(2).map(|m| m.as_str().to_owned()), host: cap.get(2).map(|m| m.as_str().to_owned()),
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?, port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
store: cap[4].to_owned(), store: cap[4].to_owned(),

View File

@ -6,29 +6,25 @@ const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$"; BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
} }
pub const BACKUP_SOURCE_SCHEMA: Schema = pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
StringSchema::new("Backup source specification ([<label>:<path>]).") "Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)) .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema(); .schema();
pub enum BackupSpecificationType { pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
PXAR,
IMAGE,
CONFIG,
LOGFILE,
}
pub struct BackupSpecification { pub struct BackupSpecification {
pub archive_name: String, // left part pub archive_name: String, // left part
pub config_string: String, // right part pub config_string: String, // right part
pub spec_type: BackupSpecificationType, pub spec_type: BackupSpecificationType,
} }
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> { pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) { if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
let archive_name = caps.get(1).unwrap().as_str().into(); let archive_name = caps.get(1).unwrap().as_str().into();
let extension = caps.get(2).unwrap().as_str(); let extension = caps.get(2).unwrap().as_str();
let config_string = caps.get(3).unwrap().as_str().into(); let config_string = caps.get(3).unwrap().as_str().into();
let spec_type = match extension { let spec_type = match extension {
"pxar" => BackupSpecificationType::PXAR, "pxar" => BackupSpecificationType::PXAR,
"img" => BackupSpecificationType::IMAGE, "img" => BackupSpecificationType::IMAGE,
@ -36,11 +32,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
"log" => BackupSpecificationType::LOGFILE, "log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{}'", extension), _ => bail!("unknown backup source type '{}'", extension),
}; };
return Ok(BackupSpecification { return Ok(BackupSpecification { archive_name, config_string, spec_type });
archive_name,
config_string,
spec_type,
});
} }
bail!("unable to parse backup source specification '{}'", value); bail!("unable to parse backup source specification '{}'", value);

View File

@ -12,14 +12,16 @@ use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{BackupDir, BackupNamespace, HumanByte}; use proxmox::tools::digest_to_hex;
use pbs_api_types::HumanByte;
use pbs_tools::crypt_config::CryptConfig;
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig;
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo}; use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
@ -86,24 +88,21 @@ impl BackupWriter {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
ns: &BackupNamespace, backup_type: &str,
backup: &BackupDir, backup_id: &str,
backup_time: i64,
debug: bool, debug: bool,
benchmark: bool, benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> { ) -> Result<Arc<BackupWriter>, Error> {
let mut param = json!({ let param = json!({
"backup-type": backup.ty(), "backup-type": backup_type,
"backup-id": backup.id(), "backup-id": backup_id,
"backup-time": backup.time, "backup-time": backup_time,
"store": datastore, "store": datastore,
"debug": debug, "debug": debug,
"benchmark": benchmark "benchmark": benchmark
}); });
if !ns.is_root() {
param["ns"] = serde_json::to_value(ns)?;
}
let req = HttpClient::request_builder( let req = HttpClient::request_builder(
client.server(), client.server(),
client.port(), client.port(),
@ -292,28 +291,22 @@ impl BackupWriter {
// try, but ignore errors // try, but ignore errors
match ArchiveType::from_path(archive_name) { match ArchiveType::from_path(archive_name) {
Ok(ArchiveType::FixedIndex) => { Ok(ArchiveType::FixedIndex) => {
if let Err(err) = self let _ = self
.download_previous_fixed_index( .download_previous_fixed_index(
archive_name, archive_name,
&manifest, &manifest,
known_chunks.clone(), known_chunks.clone(),
) )
.await .await;
{
eprintln!("Error downloading .fidx from previous manifest: {}", err);
}
} }
Ok(ArchiveType::DynamicIndex) => { Ok(ArchiveType::DynamicIndex) => {
if let Err(err) = self let _ = self
.download_previous_dynamic_index( .download_previous_dynamic_index(
archive_name, archive_name,
&manifest, &manifest,
known_chunks.clone(), known_chunks.clone(),
) )
.await .await;
{
eprintln!("Error downloading .didx from previous manifest: {}", err);
}
} }
_ => { /* do nothing */ } _ => { /* do nothing */ }
} }
@ -330,7 +323,7 @@ impl BackupWriter {
self.h2.clone(), self.h2.clone(),
wid, wid,
stream, stream,
prefix, &prefix,
known_chunks.clone(), known_chunks.clone(),
if options.encrypt { if options.encrypt {
self.crypt_config.clone() self.crypt_config.clone()
@ -396,7 +389,7 @@ impl BackupWriter {
"wid": wid , "wid": wid ,
"chunk-count": upload_stats.chunk_count, "chunk-count": upload_stats.chunk_count,
"size": upload_stats.size, "size": upload_stats.size,
"csum": hex::encode(&upload_stats.csum), "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
}); });
let _value = self.h2.post(&close_path, Some(param)).await?; let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats { Ok(BackupStats {
@ -488,7 +481,7 @@ impl BackupWriter {
let mut digest_list = vec![]; let mut digest_list = vec![];
let mut offset_list = vec![]; let mut offset_list = vec![];
for (offset, digest) in chunk_list { for (offset, digest) in chunk_list {
digest_list.push(hex::encode(&digest)); digest_list.push(digest_to_hex(&digest));
offset_list.push(offset); offset_list.push(offset);
} }
if verbose { println!("append chunks list len ({})", digest_list.len()); } if verbose { println!("append chunks list len ({})", digest_list.len()); }
@ -719,7 +712,7 @@ impl BackupWriter {
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info { if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
let offset = chunk_info.offset; let offset = chunk_info.offset;
let digest = chunk_info.digest; let digest = chunk_info.digest;
let digest_str = hex::encode(&digest); let digest_str = digest_to_hex(&digest);
/* too verbose, needs finer verbosity setting granularity /* too verbose, needs finer verbosity setting granularity
if verbose { if verbose {

View File

@ -14,16 +14,16 @@ use nix::fcntl::OFlag;
use nix::sys::stat::Mode; use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag}; use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
use proxmox::tools::fs::{create_path, CreateOptions};
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface}; use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::{create_path, CreateOptions};
use pxar::{EntryKind, Metadata}; use pxar::{EntryKind, Metadata};
use pbs_datastore::catalog::{self, DirEntryAttribute};
use proxmox_async::runtime::block_in_place; use proxmox_async::runtime::block_in_place;
use pbs_datastore::catalog::{self, DirEntryAttribute};
use crate::pxar::fuse::{Accessor, FileEntry};
use crate::pxar::Flags; use crate::pxar::Flags;
use crate::pxar::fuse::{Accessor, FileEntry};
type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>; type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>;
@ -91,7 +91,10 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
"find", "find",
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]), CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
) )
.insert("exit", CliCommand::new(&API_METHOD_EXIT)) .insert(
"exit",
CliCommand::new(&API_METHOD_EXIT),
)
.insert_help(), .insert_help(),
) )
} }
@ -526,7 +529,7 @@ impl Shell {
}; };
let new_stack = let new_stack =
Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?; Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
*stack = new_stack; *stack = new_stack;
@ -990,7 +993,7 @@ impl Shell {
&mut self.catalog, &mut self.catalog,
dir_stack, dir_stack,
extractor, extractor,
match_list, &match_list,
&self.accessor, &self.accessor,
)?; )?;
@ -1067,8 +1070,7 @@ impl<'a> ExtractorState<'a> {
} }
self.path.extend(&entry.name); self.path.extend(&entry.name);
self.extractor self.extractor.set_path(OsString::from_vec(self.path.clone()));
.set_path(OsString::from_vec(self.path.clone()));
self.handle_entry(entry).await?; self.handle_entry(entry).await?;
} }
@ -1116,12 +1118,11 @@ impl<'a> ExtractorState<'a> {
self.path_len_stack.push(self.path_len); self.path_len_stack.push(self.path_len);
self.path_len = self.path.len(); self.path_len = self.path.len();
Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap(); let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
let dir_meta = dir_pxar.entry().metadata().clone(); let dir_meta = dir_pxar.entry().metadata().clone();
let create = self.matches && match_result != Some(MatchType::Exclude); let create = self.matches && match_result != Some(MatchType::Exclude);
self.extractor self.extractor.enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
.enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
Ok(()) Ok(())
} }
@ -1140,7 +1141,7 @@ impl<'a> ExtractorState<'a> {
} }
(true, DirEntryAttribute::File { .. }) => { (true, DirEntryAttribute::File { .. }) => {
self.dir_stack.push(PathStackEntry::new(entry)); self.dir_stack.push(PathStackEntry::new(entry));
let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
self.extract_file(file).await?; self.extract_file(file).await?;
self.dir_stack.pop(); self.dir_stack.pop();
} }
@ -1152,7 +1153,7 @@ impl<'a> ExtractorState<'a> {
| (true, DirEntryAttribute::Hardlink) => { | (true, DirEntryAttribute::Hardlink) => {
let attr = entry.attr.clone(); let attr = entry.attr.clone();
self.dir_stack.push(PathStackEntry::new(entry)); self.dir_stack.push(PathStackEntry::new(entry));
let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
self.extract_special(file, attr).await?; self.extract_special(file, attr).await?;
self.dir_stack.pop(); self.dir_stack.pop();
} }
@ -1171,9 +1172,13 @@ impl<'a> ExtractorState<'a> {
pxar::EntryKind::File { size, .. } => { pxar::EntryKind::File { size, .. } => {
let file_name = CString::new(entry.file_name().as_bytes())?; let file_name = CString::new(entry.file_name().as_bytes())?;
let mut contents = entry.contents().await?; let mut contents = entry.contents().await?;
self.extractor self.extractor.async_extract_file(
.async_extract_file(&file_name, entry.metadata(), *size, &mut contents) &file_name,
.await entry.metadata(),
*size,
&mut contents,
)
.await
} }
_ => { _ => {
bail!( bail!(
@ -1192,13 +1197,11 @@ impl<'a> ExtractorState<'a> {
let file_name = CString::new(entry.file_name().as_bytes())?; let file_name = CString::new(entry.file_name().as_bytes())?;
match (catalog_attr, entry.kind()) { match (catalog_attr, entry.kind()) {
(DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => { (DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => {
block_in_place(|| { block_in_place(|| self.extractor.extract_symlink(
self.extractor.extract_symlink( &file_name,
&file_name, entry.metadata(),
entry.metadata(), symlink.as_os_str(),
symlink.as_os_str(), ))
)
})
} }
(DirEntryAttribute::Symlink, _) => { (DirEntryAttribute::Symlink, _) => {
bail!( bail!(
@ -1208,10 +1211,7 @@ impl<'a> ExtractorState<'a> {
} }
(DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => { (DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => {
block_in_place(|| { block_in_place(|| self.extractor.extract_hardlink(&file_name, hardlink.as_os_str()))
self.extractor
.extract_hardlink(&file_name, hardlink.as_os_str())
})
} }
(DirEntryAttribute::Hardlink, _) => { (DirEntryAttribute::Hardlink, _) => {
bail!( bail!(
@ -1224,18 +1224,16 @@ impl<'a> ExtractorState<'a> {
self.extract_device(attr.clone(), &file_name, device, entry.metadata()) self.extract_device(attr.clone(), &file_name, device, entry.metadata())
} }
(DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => block_in_place(|| { (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => {
self.extractor block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
.extract_special(&file_name, entry.metadata(), 0) }
}),
(DirEntryAttribute::Fifo, _) => { (DirEntryAttribute::Fifo, _) => {
bail!("catalog fifo {:?} not a fifo in the archive", self.path()); bail!("catalog fifo {:?} not a fifo in the archive", self.path());
} }
(DirEntryAttribute::Socket, pxar::EntryKind::Socket) => block_in_place(|| { (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => {
self.extractor block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
.extract_special(&file_name, entry.metadata(), 0) }
}),
(DirEntryAttribute::Socket, _) => { (DirEntryAttribute::Socket, _) => {
bail!( bail!(
"catalog socket {:?} not a socket in the archive", "catalog socket {:?} not a socket in the archive",
@ -1279,9 +1277,6 @@ impl<'a> ExtractorState<'a> {
); );
} }
} }
block_in_place(|| { block_in_place(|| self.extractor.extract_special(file_name, metadata, device.to_dev_t()))
self.extractor
.extract_special(file_name, metadata, device.to_dev_t())
})
} }
} }

View File

@ -1,8 +1,8 @@
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use anyhow::Error;
use bytes::BytesMut; use bytes::BytesMut;
use anyhow::{Error};
use futures::ready; use futures::ready;
use futures::stream::{Stream, TryStream}; use futures::stream::{Stream, TryStream};
@ -18,12 +18,7 @@ pub struct ChunkStream<S: Unpin> {
impl<S: Unpin> ChunkStream<S> { impl<S: Unpin> ChunkStream<S> {
pub fn new(input: S, chunk_size: Option<usize>) -> Self { pub fn new(input: S, chunk_size: Option<usize>) -> Self {
Self { Self { input, chunker: Chunker::new(chunk_size.unwrap_or(4*1024*1024)), buffer: BytesMut::new(), scan_pos: 0}
input,
chunker: Chunker::new(chunk_size.unwrap_or(4 * 1024 * 1024)),
buffer: BytesMut::new(),
scan_pos: 0,
}
} }
} }
@ -35,6 +30,7 @@ where
S::Ok: AsRef<[u8]>, S::Ok: AsRef<[u8]>,
S::Error: Into<Error>, S::Error: Into<Error>,
{ {
type Item = Result<BytesMut, Error>; type Item = Result<BytesMut, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
@ -86,11 +82,7 @@ pub struct FixedChunkStream<S: Unpin> {
impl<S: Unpin> FixedChunkStream<S> { impl<S: Unpin> FixedChunkStream<S> {
pub fn new(input: S, chunk_size: usize) -> Self { pub fn new(input: S, chunk_size: usize) -> Self {
Self { Self { input, chunk_size, buffer: BytesMut::new() }
input,
chunk_size,
buffer: BytesMut::new(),
}
} }
} }
@ -103,10 +95,7 @@ where
{ {
type Item = Result<BytesMut, S::Error>; type Item = Result<BytesMut, S::Error>;
fn poll_next( fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Result<BytesMut, S::Error>>> {
self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<BytesMut, S::Error>>> {
let this = self.get_mut(); let this = self.get_mut();
loop { loop {
if this.buffer.len() >= this.chunk_size { if this.buffer.len() >= this.chunk_size {

View File

@ -4,31 +4,30 @@ use std::time::Duration;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use http::header::HeaderValue;
use http::Uri; use http::Uri;
use http::header::HeaderValue;
use http::{Request, Response}; use http::{Request, Response};
use hyper::client::{Client, HttpConnector};
use hyper::Body; use hyper::Body;
use openssl::{ use hyper::client::{Client, HttpConnector};
ssl::{SslConnector, SslMethod}, use openssl::{ssl::{SslConnector, SslMethod}, x509::X509StoreContextRef};
x509::X509StoreContextRef,
};
use percent_encoding::percent_encode;
use serde_json::{json, Value}; use serde_json::{json, Value};
use percent_encoding::percent_encode;
use xdg::BaseDirectories; use xdg::BaseDirectories;
use proxmox::{
sys::linux::tty,
tools::fs::{file_get_json, replace_file, CreateOptions},
};
use proxmox_router::HttpError; use proxmox_router::HttpError;
use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
use proxmox_sys::linux::tty;
use proxmox_async::broadcast_future::BroadcastFuture;
use proxmox_http::client::{HttpsConnector, RateLimiter}; use proxmox_http::client::{HttpsConnector, RateLimiter};
use proxmox_http::uri::build_authority; use proxmox_http::uri::build_authority;
use proxmox_async::broadcast_future::BroadcastFuture;
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET; use pbs_api_types::{Authid, Userid, RateLimitConfig};
use pbs_api_types::{Authid, RateLimitConfig, Userid};
use pbs_tools::json::json_object_to_query; use pbs_tools::json::json_object_to_query;
use pbs_tools::ticket; use pbs_tools::ticket;
use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
use super::pipe_to_stream::PipeToSendStream; use super::pipe_to_stream::PipeToSendStream;
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME; use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
@ -56,6 +55,7 @@ pub struct HttpClientOptions {
} }
impl HttpClientOptions { impl HttpClientOptions {
pub fn new_interactive(password: Option<String>, fingerprint: Option<String>) -> Self { pub fn new_interactive(password: Option<String>, fingerprint: Option<String>) -> Self {
Self { Self {
password, password,
@ -146,6 +146,7 @@ pub struct HttpClient {
/// Delete stored ticket data (logout) /// Delete stored ticket data (logout)
pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> { pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
let base = BaseDirectories::with_prefix(prefix)?; let base = BaseDirectories::with_prefix(prefix)?;
// usually /run/user/<uid>/... // usually /run/user/<uid>/...
@ -159,17 +160,13 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Resu
map.remove(username.as_str()); map.remove(username.as_str());
} }
replace_file( replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
path,
data.to_string().as_bytes(),
CreateOptions::new().perm(mode),
false,
)?;
Ok(()) Ok(())
} }
fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<(), Error> { fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<(), Error> {
let base = BaseDirectories::with_prefix(prefix)?; let base = BaseDirectories::with_prefix(prefix)?;
// usually ~/.config/<prefix>/fingerprints // usually ~/.config/<prefix>/fingerprints
@ -211,6 +208,7 @@ fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<()
} }
fn load_fingerprint(prefix: &str, server: &str) -> Option<String> { fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
let base = BaseDirectories::with_prefix(prefix).ok()?; let base = BaseDirectories::with_prefix(prefix).ok()?;
// usually ~/.config/<prefix>/fingerprints // usually ~/.config/<prefix>/fingerprints
@ -228,13 +226,8 @@ fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
None None
} }
fn store_ticket_info( fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> {
prefix: &str,
server: &str,
username: &str,
ticket: &str,
token: &str,
) -> Result<(), Error> {
let base = BaseDirectories::with_prefix(prefix)?; let base = BaseDirectories::with_prefix(prefix)?;
// usually /run/user/<uid>/... // usually /run/user/<uid>/...
@ -264,12 +257,7 @@ fn store_ticket_info(
} }
} }
replace_file( replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
path,
new_data.to_string().as_bytes(),
CreateOptions::new().perm(mode),
false,
)?;
Ok(()) Ok(())
} }
@ -314,6 +302,7 @@ impl HttpClient {
auth_id: &Authid, auth_id: &Authid,
mut options: HttpClientOptions, mut options: HttpClientOptions,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let verified_fingerprint = Arc::new(Mutex::new(None)); let verified_fingerprint = Arc::new(Mutex::new(None));
let mut expected_fingerprint = options.fingerprint.take(); let mut expected_fingerprint = options.fingerprint.take();
@ -333,32 +322,25 @@ impl HttpClient {
let interactive = options.interactive; let interactive = options.interactive;
let fingerprint_cache = options.fingerprint_cache; let fingerprint_cache = options.fingerprint_cache;
let prefix = options.prefix.clone(); let prefix = options.prefix.clone();
ssl_connector_builder.set_verify_callback( ssl_connector_builder.set_verify_callback(openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| {
openssl::ssl::SslVerifyMode::PEER, match Self::verify_callback(valid, ctx, expected_fingerprint.as_ref(), interactive) {
move |valid, ctx| match Self::verify_callback(
valid,
ctx,
expected_fingerprint.as_ref(),
interactive,
) {
Ok(None) => true, Ok(None) => true,
Ok(Some(fingerprint)) => { Ok(Some(fingerprint)) => {
if fingerprint_cache && prefix.is_some() { if fingerprint_cache && prefix.is_some() {
if let Err(err) = if let Err(err) = store_fingerprint(
store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint) prefix.as_ref().unwrap(), &server, &fingerprint) {
{
eprintln!("{}", err); eprintln!("{}", err);
} }
} }
*verified_fingerprint.lock().unwrap() = Some(fingerprint); *verified_fingerprint.lock().unwrap() = Some(fingerprint);
true true
} },
Err(err) => { Err(err) => {
eprintln!("certificate validation failed - {}", err); eprintln!("certificate validation failed - {}", err);
false false
} },
}, }
); });
} else { } else {
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE); ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
} }
@ -368,31 +350,25 @@ impl HttpClient {
httpc.enforce_http(false); // we want https... httpc.enforce_http(false); // we want https...
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0))); httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
let mut https = HttpsConnector::with_connector( let mut https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
httpc,
ssl_connector_builder.build(),
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
);
if let Some(rate_in) = options.limit.rate_in { if let Some(rate_in) = options.limit.rate_in {
let burst_in = options.limit.burst_in.unwrap_or(rate_in).as_u64(); let burst_in = options.limit.burst_in.unwrap_or_else(|| rate_in).as_u64();
https.set_read_limiter(Some(Arc::new(Mutex::new(RateLimiter::new( https.set_read_limiter(Some(Arc::new(Mutex::new(
rate_in.as_u64(), RateLimiter::new(rate_in.as_u64(), burst_in)
burst_in, ))));
)))));
} }
if let Some(rate_out) = options.limit.rate_out { if let Some(rate_out) = options.limit.rate_out {
let burst_out = options.limit.burst_out.unwrap_or(rate_out).as_u64(); let burst_out = options.limit.burst_out.unwrap_or_else(|| rate_out).as_u64();
https.set_write_limiter(Some(Arc::new(Mutex::new(RateLimiter::new( https.set_write_limiter(Some(Arc::new(Mutex::new(
rate_out.as_u64(), RateLimiter::new(rate_out.as_u64(), burst_out)
burst_out, ))));
)))));
} }
let client = Client::builder() let client = Client::builder()
//.http2_initial_stream_window_size( (1 << 31) - 2) //.http2_initial_stream_window_size( (1 << 31) - 2)
//.http2_initial_connection_window_size( (1 << 31) - 2) //.http2_initial_connection_window_size( (1 << 31) - 2)
.build::<_, Body>(https); .build::<_, Body>(https);
let password = options.password.take(); let password = options.password.take();
@ -430,32 +406,18 @@ impl HttpClient {
let renewal_future = async move { let renewal_future = async move {
loop { loop {
tokio::time::sleep(Duration::new(60 * 15, 0)).await; // 15 minutes tokio::time::sleep(Duration::new(60*15, 0)).await; // 15 minutes
let (auth_id, ticket) = { let (auth_id, ticket) = {
let authinfo = auth2.read().unwrap().clone(); let authinfo = auth2.read().unwrap().clone();
(authinfo.auth_id, authinfo.ticket) (authinfo.auth_id, authinfo.ticket)
}; };
match Self::credentials( match Self::credentials(client2.clone(), server2.clone(), port, auth_id.user().clone(), ticket).await {
client2.clone(),
server2.clone(),
port,
auth_id.user().clone(),
ticket,
)
.await
{
Ok(auth) => { Ok(auth) => {
if use_ticket_cache && prefix2.is_some() { if use_ticket_cache && prefix2.is_some() {
let _ = store_ticket_info( let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
prefix2.as_ref().unwrap(),
&server2,
&auth.auth_id.to_string(),
&auth.ticket,
&auth.token,
);
} }
*auth2.write().unwrap() = auth; *auth2.write().unwrap() = auth;
} },
Err(err) => { Err(err) => {
eprintln!("re-authentication failed: {}", err); eprintln!("re-authentication failed: {}", err);
return; return;
@ -472,21 +434,14 @@ impl HttpClient {
port, port,
auth_id.user().clone(), auth_id.user().clone(),
password, password,
) ).map_ok({
.map_ok({
let server = server.to_string(); let server = server.to_string();
let prefix = options.prefix.clone(); let prefix = options.prefix.clone();
let authinfo = auth.clone(); let authinfo = auth.clone();
move |auth| { move |auth| {
if use_ticket_cache && prefix.is_some() { if use_ticket_cache && prefix.is_some() {
let _ = store_ticket_info( let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
prefix.as_ref().unwrap(),
&server,
&auth.auth_id.to_string(),
&auth.ticket,
&auth.token,
);
} }
*authinfo.write().unwrap() = auth; *authinfo.write().unwrap() = auth;
tokio::spawn(renewal_future); tokio::spawn(renewal_future);
@ -549,6 +504,7 @@ impl HttpClient {
expected_fingerprint: Option<&String>, expected_fingerprint: Option<&String>,
interactive: bool, interactive: bool,
) -> Result<Option<String>, Error> { ) -> Result<Option<String>, Error> {
if openssl_valid { if openssl_valid {
return Ok(None); return Ok(None);
} }
@ -559,21 +515,15 @@ impl HttpClient {
}; };
let depth = ctx.error_depth(); let depth = ctx.error_depth();
if depth != 0 { if depth != 0 { bail!("context depth != 0") }
bail!("context depth != 0")
}
let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) { let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
Ok(fp) => fp, Ok(fp) => fp,
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
}; };
let fp_string = hex::encode(&fp); let fp_string = proxmox::tools::digest_to_hex(&fp);
let fp_string = fp_string let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
.as_bytes() .collect::<Vec<&str>>().join(":");
.chunks(2)
.map(|v| std::str::from_utf8(v).unwrap())
.collect::<Vec<&str>>()
.join(":");
if let Some(expected_fingerprint) = expected_fingerprint { if let Some(expected_fingerprint) = expected_fingerprint {
let expected_fingerprint = expected_fingerprint.to_lowercase(); let expected_fingerprint = expected_fingerprint.to_lowercase();
@ -613,70 +563,76 @@ impl HttpClient {
} }
pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> { pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> {
let client = self.client.clone(); let client = self.client.clone();
let auth = self.login().await?; let auth = self.login().await?;
if auth.auth_id.is_token() { if auth.auth_id.is_token() {
let enc_api_token = format!( let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
"PBSAPIToken {}:{}", req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
auth.auth_id,
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
);
req.headers_mut().insert(
"Authorization",
HeaderValue::from_str(&enc_api_token).unwrap(),
);
} else { } else {
let enc_ticket = format!( let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
"PBSAuthCookie={}", req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET) req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
);
req.headers_mut()
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
req.headers_mut().insert(
"CSRFPreventionToken",
HeaderValue::from_str(&auth.token).unwrap(),
);
} }
Self::api_request(client, req).await Self::api_request(client, req).await
} }
pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> { pub async fn get(
&self,
path: &str,
data: Option<Value>,
) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "GET", path, data)?; let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
self.request(req).await self.request(req).await
} }
pub async fn delete(&self, path: &str, data: Option<Value>) -> Result<Value, Error> { pub async fn delete(
&mut self,
path: &str,
data: Option<Value>,
) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?; let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
self.request(req).await self.request(req).await
} }
pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> { pub async fn post(
&mut self,
path: &str,
data: Option<Value>,
) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "POST", path, data)?; let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
self.request(req).await self.request(req).await
} }
pub async fn put(&self, path: &str, data: Option<Value>) -> Result<Value, Error> { pub async fn put(
&mut self,
path: &str,
data: Option<Value>,
) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "PUT", path, data)?; let req = Self::request_builder(&self.server, self.port, "PUT", path, data)?;
self.request(req).await self.request(req).await
} }
pub async fn download(&self, path: &str, output: &mut (dyn Write + Send)) -> Result<(), Error> { pub async fn download(
&mut self,
path: &str,
output: &mut (dyn Write + Send),
) -> Result<(), Error> {
let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?; let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
let client = self.client.clone(); let client = self.client.clone();
let auth = self.login().await?; let auth = self.login().await?;
let enc_ticket = format!( let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
"PBSAuthCookie={}", req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
);
req.headers_mut()
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
let resp = tokio::time::timeout(HTTP_TIMEOUT, client.request(req)) let resp = tokio::time::timeout(
HTTP_TIMEOUT,
client.request(req)
)
.await .await
.map_err(|_| format_err!("http download request timed out"))??; .map_err(|_| format_err!("http download request timed out"))??;
let status = resp.status(); let status = resp.status();
@ -697,12 +653,13 @@ impl HttpClient {
} }
pub async fn upload( pub async fn upload(
&self, &mut self,
content_type: &str, content_type: &str,
body: Body, body: Body,
path: &str, path: &str,
data: Option<Value>, data: Option<Value>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let query = match data { let query = match data {
Some(data) => Some(json_object_to_query(data)?), Some(data) => Some(json_object_to_query(data)?),
None => None, None => None,
@ -714,8 +671,7 @@ impl HttpClient {
.uri(url) .uri(url)
.header("User-Agent", "proxmox-backup-client/1.0") .header("User-Agent", "proxmox-backup-client/1.0")
.header("Content-Type", content_type) .header("Content-Type", content_type)
.body(body) .body(body).unwrap();
.unwrap();
self.request(req).await self.request(req).await
} }
@ -725,36 +681,25 @@ impl HttpClient {
mut req: Request<Body>, mut req: Request<Body>,
protocol_name: String, protocol_name: String,
) -> Result<(H2Client, futures::future::AbortHandle), Error> { ) -> Result<(H2Client, futures::future::AbortHandle), Error> {
let client = self.client.clone(); let client = self.client.clone();
let auth = self.login().await?; let auth = self.login().await?;
if auth.auth_id.is_token() { if auth.auth_id.is_token() {
let enc_api_token = format!( let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
"PBSAPIToken {}:{}", req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
auth.auth_id,
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
);
req.headers_mut().insert(
"Authorization",
HeaderValue::from_str(&enc_api_token).unwrap(),
);
} else { } else {
let enc_ticket = format!( let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
"PBSAuthCookie={}", req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET) req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
);
req.headers_mut()
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
req.headers_mut().insert(
"CSRFPreventionToken",
HeaderValue::from_str(&auth.token).unwrap(),
);
} }
req.headers_mut() req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
.insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
let resp = tokio::time::timeout(HTTP_TIMEOUT, client.request(req)) let resp = tokio::time::timeout(
HTTP_TIMEOUT,
client.request(req)
)
.await .await
.map_err(|_| format_err!("http upgrade request timed out"))??; .map_err(|_| format_err!("http upgrade request timed out"))??;
let status = resp.status(); let status = resp.status();
@ -771,11 +716,12 @@ impl HttpClient {
let (h2, connection) = h2::client::Builder::new() let (h2, connection) = h2::client::Builder::new()
.initial_connection_window_size(max_window_size) .initial_connection_window_size(max_window_size)
.initial_window_size(max_window_size) .initial_window_size(max_window_size)
.max_frame_size(4 * 1024 * 1024) .max_frame_size(4*1024*1024)
.handshake(upgraded) .handshake(upgraded)
.await?; .await?;
let connection = connection.map_err(|_| eprintln!("HTTP/2.0 connection failed")); let connection = connection
.map_err(|_| eprintln!("HTTP/2.0 connection failed"));
let (connection, abort) = futures::future::abortable(connection); let (connection, abort) = futures::future::abortable(connection);
// A cancellable future returns an Option which is None when cancelled and // A cancellable future returns an Option which is None when cancelled and
@ -799,21 +745,12 @@ impl HttpClient {
password: String, password: String,
) -> Result<AuthInfo, Error> { ) -> Result<AuthInfo, Error> {
let data = json!({ "username": username, "password": password }); let data = json!({ "username": username, "password": password });
let req = Self::request_builder( let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
&server,
port,
"POST",
"/api2/json/access/ticket",
Some(data),
)?;
let cred = Self::api_request(client, req).await?; let cred = Self::api_request(client, req).await?;
let auth = AuthInfo { let auth = AuthInfo {
auth_id: cred["data"]["username"].as_str().unwrap().parse()?, auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(), ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
token: cred["data"]["CSRFPreventionToken"] token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
.as_str()
.unwrap()
.to_owned(),
}; };
Ok(auth) Ok(auth)
@ -838,14 +775,17 @@ impl HttpClient {
async fn api_request( async fn api_request(
client: Client<HttpsConnector>, client: Client<HttpsConnector>,
req: Request<Body>, req: Request<Body>
) -> Result<Value, Error> { ) -> Result<Value, Error> {
Self::api_response( Self::api_response(
tokio::time::timeout(HTTP_TIMEOUT, client.request(req)) tokio::time::timeout(
HTTP_TIMEOUT,
client.request(req)
)
.await .await
.map_err(|_| format_err!("http request timed out"))??, .map_err(|_| format_err!("http request timed out"))??
) ).await
.await
} }
// Read-only access to server property // Read-only access to server property
@ -857,13 +797,7 @@ impl HttpClient {
self.port self.port
} }
pub fn request_builder( pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
server: &str,
port: u16,
method: &str,
path: &str,
data: Option<Value>,
) -> Result<Request<Body>, Error> {
if let Some(data) = data { if let Some(data) = data {
if method == "POST" { if method == "POST" {
let url = build_uri(server, port, path, None)?; let url = build_uri(server, port, path, None)?;
@ -881,10 +815,7 @@ impl HttpClient {
.method(method) .method(method)
.uri(url) .uri(url)
.header("User-Agent", "proxmox-backup-client/1.0") .header("User-Agent", "proxmox-backup-client/1.0")
.header( .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
hyper::header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.body(Body::empty())?; .body(Body::empty())?;
Ok(request) Ok(request)
} }
@ -894,10 +825,7 @@ impl HttpClient {
.method(method) .method(method)
.uri(url) .uri(url)
.header("User-Agent", "proxmox-backup-client/1.0") .header("User-Agent", "proxmox-backup-client/1.0")
.header( .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
hyper::header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.body(Body::empty())?; .body(Body::empty())?;
Ok(request) Ok(request)
@ -911,27 +839,41 @@ impl Drop for HttpClient {
} }
} }
#[derive(Clone)] #[derive(Clone)]
pub struct H2Client { pub struct H2Client {
h2: h2::client::SendRequest<bytes::Bytes>, h2: h2::client::SendRequest<bytes::Bytes>,
} }
impl H2Client { impl H2Client {
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self { pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
Self { h2 } Self { h2 }
} }
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn get(
&self,
path: &str,
param: Option<Value>
) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "GET", path, param, None).unwrap(); let req = Self::request_builder("localhost", "GET", path, param, None).unwrap();
self.request(req).await self.request(req).await
} }
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn put(
&self,
path: &str,
param: Option<Value>
) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "PUT", path, param, None).unwrap(); let req = Self::request_builder("localhost", "PUT", path, param, None).unwrap();
self.request(req).await self.request(req).await
} }
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> { pub async fn post(
&self,
path: &str,
param: Option<Value>
) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "POST", path, param, None).unwrap(); let req = Self::request_builder("localhost", "POST", path, param, None).unwrap();
self.request(req).await self.request(req).await
} }
@ -972,8 +914,7 @@ impl H2Client {
content_type: &str, content_type: &str,
data: Vec<u8>, data: Vec<u8>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let request = let request = Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
let mut send_request = self.h2.clone().ready().await?; let mut send_request = self.h2.clone().ready().await?;
@ -987,9 +928,17 @@ impl H2Client {
.await .await
} }
async fn request(&self, request: Request<()>) -> Result<Value, Error> { async fn request(
&self,
request: Request<()>,
) -> Result<Value, Error> {
self.send_request(request, None) self.send_request(request, None)
.and_then(move |response| response.map_err(Error::from).and_then(Self::h2api_response)) .and_then(move |response| {
response
.map_err(Error::from)
.and_then(Self::h2api_response)
})
.await .await
} }
@ -998,8 +947,8 @@ impl H2Client {
request: Request<()>, request: Request<()>,
data: Option<bytes::Bytes>, data: Option<bytes::Bytes>,
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> { ) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
self.h2
.clone() self.h2.clone()
.ready() .ready()
.map_err(Error::from) .map_err(Error::from)
.and_then(move |mut send_request| async move { .and_then(move |mut send_request| async move {
@ -1014,7 +963,9 @@ impl H2Client {
}) })
} }
pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> { pub async fn h2api_response(
response: Response<h2::RecvStream>,
) -> Result<Value, Error> {
let status = response.status(); let status = response.status();
let (_head, mut body) = response.into_parts(); let (_head, mut body) = response.into_parts();
@ -1064,10 +1015,7 @@ impl H2Client {
let query = json_object_to_query(param)?; let query = json_object_to_query(param)?;
// We detected problem with hyper around 6000 characters - so we try to keep on the safe side // We detected problem with hyper around 6000 characters - so we try to keep on the safe side
if query.len() > 4096 { if query.len() > 4096 {
bail!( bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
"h2 query data too large ({} bytes) - please encode data inside body",
query.len()
);
} }
Some(query) Some(query)
} }

Some files were not shown because too many files have changed in this diff Show More